diff --git a/.gitignore b/.gitignore
index 3821f6b63ed..478794534b3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@ yt/geometry/oct_container.c
yt/geometry/oct_visitors.c
yt/geometry/particle_deposit.c
yt/geometry/particle_oct_container.c
+yt/geometry/particle_oct_container.cpp
yt/geometry/particle_smooth.c
yt/geometry/selection_routines.c
yt/utilities/amr_utils.c
@@ -34,11 +35,16 @@ yt/utilities/lib/alt_ray_tracers.c
yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/basic_octree.c
yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounded_priority_queue.c
yt/utilities/lib/bounding_volume_hierarchy.c
yt/utilities/lib/contour_finding.c
+yt/utilities/lib/cykdtree/kdtree.cpp
+yt/utilities/lib/cykdtree/utils.cpp
+yt/utilities/lib/cyoctree.cpp
yt/utilities/lib/depth_first_octree.c
yt/utilities/lib/distance_queue.c
yt/utilities/lib/element_mappings.c
+yt/utilities/lib/ewah_bool_wrap.cpp
yt/utilities/lib/fnv_hash.c
yt/utilities/lib/fortran_reader.c
yt/utilities/lib/freetype_writer.c
@@ -55,13 +61,16 @@ yt/utilities/lib/mesh_samplers.cpp
yt/utilities/lib/mesh_traversal.cpp
yt/utilities/lib/mesh_triangulation.c
yt/utilities/lib/mesh_utilities.c
+yt/utilities/lib/pixelization_routines.cpp
yt/utilities/lib/misc_utilities.c
+yt/utilities/lib/particle_kdtree_tools.cpp
yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/partitioned_grid.c
yt/utilities/lib/primitives.c
yt/utilities/lib/origami.c
yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/pixelization_routines.c
+yt/utilities/lib/pixelization_routines.cpp
yt/utilities/lib/png_writer.c
yt/utilities/lib/points_in_volume.c
yt/utilities/lib/quad_tree.c
@@ -71,7 +80,6 @@ yt/utilities/lib/grid_traversal.c
yt/utilities/lib/marching_cubes.c
yt/utilities/lib/png_writer.h
yt/utilities/lib/write_array.c
-yt/utilities/lib/perftools_wrap.c
yt/utilities/lib/partitioned_grid.c
yt/utilities/lib/volume_container.c
yt/utilities/lib/lenses.c
diff --git a/.gitmodules b/.gitmodules
index 5bd34913d2f..d2d34bb18cd 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,4 +1,4 @@
[submodule "answer-store"]
path = answer-store
url = https://github.com/yt-project/answer-store
- branch = master
+ branch = yt-4.0
diff --git a/.hgignore b/.hgignore
index cd0a9b6672e..78e6f7e77ee 100644
--- a/.hgignore
+++ b/.hgignore
@@ -37,6 +37,7 @@ yt/utilities/lib/contour_finding.c
yt/utilities/lib/depth_first_octree.c
yt/utilities/lib/distance_queue.c
yt/utilities/lib/element_mappings.c
+yt/utilities/lib/ewah_bool_wrap.cpp
yt/utilities/lib/fnv_hash.c
yt/utilities/lib/fortran_reader.c
yt/utilities/lib/freetype_writer.c
@@ -54,6 +55,7 @@ yt/utilities/lib/mesh_traversal.cpp
yt/utilities/lib/mesh_triangulation.c
yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
+yt/utilities/lib/particle_kdtree_tools.cpp
yt/utilities/lib/particle_mesh_operations.c
yt/utilities/lib/partitioned_grid.c
yt/utilities/lib/primitives.c
diff --git a/.travis.yml b/.travis.yml
index 801f5103004..e7cfad98563 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,5 +1,5 @@
language: python
-dist: xenial
+dist: bionic
cache:
pip: true
directories:
@@ -9,6 +9,7 @@ addons:
apt:
packages:
- libhdf5-serial-dev
+ - libnetcdf-dev
- libproj-dev
- proj-data
- proj-bin
@@ -55,9 +56,11 @@ install:
fi
if [[ ${TRAVIS_BUILD_STAGE_NAME} != "Lint" ]]; then
if [[ $MINIMAL == 1 ]]; then
+ # Ensure numpy and cython are installed so dependencies that need to be built
+ # don't error out
# The first numpy to support py3.6 is 1.12, but numpy 1.13 matches
# unyt so we'll match it here.
- $PIP install numpy==1.13.3 cython==0.24
+ $PIP install numpy==1.13.3 cython==0.26.1
$PIP install -r tests/test_minimal_requirements.txt
else
# Getting cartopy installed requires getting cython and numpy installed
@@ -95,9 +98,12 @@ jobs:
python: 3.8
script: coverage run $(which nosetests) -c nose_unit.cfg
+ # This is not necessarily going to be forever -- once we merge yt-4.0
+ # with master we will likely change this around to reduce the number of
+ # versions we test on.
- stage: tests
- name: "Python: 3.6 Answer Tests"
- python: 3.6
+ name: "Python: 3.7 Answer Tests"
+ python: 3.7
script: coverage run $(which nosetests) -c nose_answer.cfg
after_failure: python tests/report_failed_answers.py -f -m --xunit-file "answer_nosetests.xml"
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a5fa80ade7c..f77e890b01b 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -16,12 +16,8 @@ Coding is only one way to be involved!
Communication Channels
----------------------
-There are five main communication channels for yt:
+There are three main communication channels for yt:
- * We have an IRC channel, on ``irc.freenode.net`` in ``#yt``.
- You can connect through our web
- gateway without any special client, at https://yt-project.org/irc.html .
- *IRC is the first stop for conversation!*
* Many yt developers participate in the yt Slack community. Slack is a free
chat service that many teams use to organize their work. You can get an
invite to yt's Slack organization by clicking the "Join us @ Slack" button
@@ -405,12 +401,6 @@ the following subdirectories:
classes for data regions, covering grids, time series, and so on. This
also includes derived fields and derived quantities.
-``analysis_modules``
- This is where all mechanisms for processing data live. This includes
- things like clump finding, halo profiling, halo finding, and so on. This
- is something of a catchall, but it serves as a level of greater
- abstraction that simply data selection and modification.
-
``gui``
This is where all GUI components go. Typically this will be some small
tool used for one or two things, which contains a launching mechanism on
@@ -762,6 +752,7 @@ Source code style guide
* In general, follow PEP-8 guidelines.
https://www.python.org/dev/peps/pep-0008/
+ * We no longer have a copyright blurb in every source file.
* Classes are ``ConjoinedCapitals``, methods and functions are
``lowercase_with_underscores``.
* Use 4 spaces, not tabs, to represent indentation.
@@ -784,7 +775,7 @@ Source code style guide
that occur on an object. See :ref:`docstrings` below for a fiducial example
of a docstring.
* Use only one top-level import per line. Unless there is a good reason not to,
- imports should happen at the top of the file, after the copyright blurb.
+ imports should happen at the top of the file.
* Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
``is`` or ``is not``.
* If you are comparing with a numpy boolean array, just refer to the array.
diff --git a/MANIFEST.in b/MANIFEST.in
index 8858ce2186c..bd48ccb6303 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,14 +3,14 @@ include yt/visualization/mapserver/html/map.js
include yt/visualization/mapserver/html/map_index.html
include yt/utilities/mesh_types.yaml
exclude scripts/pr_backport.py
-recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
+recursive-include yt *.py *.pyx *.pxd *.h *.hpp README* *.txt LICENSE* *.cu
recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
include doc/extensions/README doc/Makefile
prune doc/source/reference/api/generated
prune doc/build
-recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader
+include yt/sample_data_registry.json
prune yt/frontends/_skeleton
recursive-include yt/frontends/amrvac *.par
diff --git a/answer-store b/answer-store
index 4691cccb917..fe17f9b706d 160000
--- a/answer-store
+++ b/answer-store
@@ -1 +1 @@
-Subproject commit 4691cccb917c971590fbde89d499c54fe0c7eaec
+Subproject commit fe17f9b706d4bee227afb0fa2cd41df0049ae924
diff --git a/appveyor.yml b/appveyor.yml
index 31e4c531579..ca1e282acb3 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -30,7 +30,10 @@ install:
- "python --version"
# Install specified version of numpy and dependencies
- - "conda install --yes -c conda-forge numpy scipy nose pytest setuptools ipython Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pyyaml"
+ - "conda install --yes -c conda-forge numpy scipy nose pytest setuptools ipython git
+ Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pooch pyyaml"
+ - "pip install git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt"
+ # install yt
- "pip install -e ."
# Not a .NET project
diff --git a/doc/Makefile b/doc/Makefile
index 8cc412ee0a3..342b956aee5 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -49,8 +49,7 @@ html:
ifneq ($(READTHEDOCS),True)
SPHINX_APIDOC_OPTIONS=members,undoc-members,inherited-members,show-inheritance sphinx-apidoc \
-o source/reference/api/ \
- -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp* \
- ../yt/analysis_modules/halo_finding/{fof,hop}
+ -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp*
endif
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
diff --git a/doc/helper_scripts/parse_cb_list.py b/doc/helper_scripts/parse_cb_list.py
index b91976993d9..4287b355594 100644
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -28,7 +28,7 @@ def write_docstring(f, name, cls):
sig = sig.replace("**kwargs", "**field_parameters")
clsproxy = "yt.visualization.plot_modifications.%s" % (cls.__name__)
#docstring = "\n".join([" %s" % line for line in docstring.split("\n")])
- #print docstring
+ #print(docstring)
f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
docstring = "\n".join(tw.wrap(docstring))))
#docstring = docstring))
diff --git a/doc/helper_scripts/show_fields.py b/doc/helper_scripts/show_fields.py
index a7ab1cce666..2c7365a6967 100644
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -54,8 +54,10 @@ def _strip_ftype(field):
unit_registry=ds.unit_registry)
for my_unit in ["m", "pc", "AU", "au"]:
new_unit = "%scm" % my_unit
- ds.unit_registry.add(new_unit, base_ds.unit_registry.lut[my_unit][0],
- dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
+ my_u = Unit(my_unit, registry=ds.unit_registry)
+ ds.unit_registry.add(new_unit, my_u.base_value,
+ dimensions.length, "\\rm{%s}/(1+z)" % my_unit,
+ prefixable=True)
@@ -143,7 +145,7 @@ def print_all_fields(fl):
print(" * Units: :math:`%s`" % fix_units(df.units))
else:
print(" * Units: :math:`%s`" % fix_units(df.units, in_cgs=True))
- print(" * Particle Type: %s" % (df.particle_type))
+ print(" * Sampling Method: %s" % (df.sampling_type))
print()
print("**Field Source**")
print()
diff --git a/doc/helper_scripts/table.py b/doc/helper_scripts/table.py
index 40e27223578..4faf0046c85 100644
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -23,13 +23,6 @@
("interacting/index.html", "Interacting with yt",
"Different ways -- scripting, GUIs, prompts, explorers -- to explore " +
"your data."),
- ("analysis_modules/index.html", "Analysis Modules",
- "Discussions of some provided procedures for astrophysical analysis " +
- "like halo finding and synthetic spectra. Halo finding, analyzing " +
- "cosmology simulations, halo mass functions, halo profiling, light " +
- "cone generator, making absorption spectrums, star particle " +
- "analysis, two-point functions, halo merger trees, clump finding, " +
- "radial column density, exporting to sunrise.")
]),
("Advanced Usage", [
("advanced/index.html", "Advanced yt usage",
diff --git a/doc/install_script.sh b/doc/install_script.sh
index 9560a9c5cb8..0185996394e 100644
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -32,13 +32,13 @@ INST_GIT=1 # Install git or not? If git is not already installed, yt
INST_EMBREE=0 # Install dependencies needed for Embree-accelerated ray tracing
INST_PYX=0 # Install PyX? Sometimes PyX can be problematic without a
# working TeX installation.
-INST_ROCKSTAR=0 # Install the Rockstar halo finder?
INST_SCIPY=0 # Install scipy?
INST_H5PY=1 # Install h5py?
INST_ASTROPY=0 # Install astropy?
INST_CARTOPY=0 # Install cartopy?
INST_NOSE=1 # Install nose?
INST_NETCDF4=1 # Install netcdf4 and its python bindings?
+INST_POOCH=1 # Install pooch?
INST_HG=0 # Install Mercurial or not?
# This is the branch we will install from for INST_YT_SOURCE=1
@@ -134,13 +134,13 @@ function write_config
echo INST_GIT=${INST_GIT} >> ${CONFIG_FILE}
echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
- echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
echo INST_EMBREE=${INST_EMBREE} >> ${CONFIG_FILE}
echo INST_H5PY=${INST_H5PY} >> ${CONFIG_FILE}
echo INST_ASTROPY=${INST_ASTROPY} >> ${CONFIG_FILE}
echo INST_CARTOPY=${INST_CARTOPY} >> ${CONFIG_FILE}
echo INST_NOSE=${INST_NOSE} >> ${CONFIG_FILE}
+ echo INST_POOCH=${INST_POOCH} >> ${CONFIG_FILE}
echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
}
@@ -286,17 +286,6 @@ then
PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
fi
-if [ $INST_ROCKSTAR -ne 0 ]
-then
- if [ $INST_YT_SOURCE -eq 0 ]
- then
- echo "yt must be compiled from source to install support for"
- echo "the rockstar halo finder. Please set INST_YT_SOURCE to 1"
- echo "and re-run the install script"
- exit 1
- fi
-fi
-
echo
echo
echo "========================================================================"
@@ -330,10 +319,6 @@ printf "%-18s = %s so I " "INST_PYX" "${INST_PYX}"
get_willwont ${INST_PYX}
echo "be installing PyX"
-printf "%-18s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_ROCKSTAR}
-echo "be installing Rockstar"
-
printf "%-18s = %s so I " "INST_H5PY" "${INST_H5PY}"
get_willwont ${INST_H5PY}
echo "be installing h5py"
@@ -350,6 +335,10 @@ printf "%-18s = %s so I " "INST_NOSE" "${INST_NOSE}"
get_willwont ${INST_NOSE}
echo "be installing nose"
+printf "%-18s = %s so I " "INST_POOCH" "${INST_POOCH}"
+get_willwont ${INST_POOCH}
+echo "be installing pooch"
+
echo
echo
@@ -522,6 +511,10 @@ if [ $INST_CARTOPY -ne 0 ]
then
YT_DEPS+=('cartopy')
fi
+if [ $INST_POOCH -ne 0 ]
+then
+ YT_DEPS+=('pooch')
+fi
YT_DEPS+=('conda-build')
if [ $INST_PY3 -eq 0 ] && [ $INST_HG -eq 1 ]
then
@@ -598,16 +591,6 @@ then
popd &> /dev/null
fi
-if [ $INST_ROCKSTAR -eq 1 ]
-then
- echo "Building Rockstar"
- ( ${GIT_EXE} clone https://github.com/yt-project/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
- ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
- log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
- log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
- ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
-fi
-
# conda doesn't package pyx, so we install manually with pip
if [ $INST_PYX -eq 1 ]
then
@@ -650,13 +633,8 @@ else
then
echo $DEST_DIR > ${YT_DIR}/embree.cfg
fi
- if [ $INST_ROCKSTAR -eq 1 ]
- then
- echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
- ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
- fi
pushd ${YT_DIR} &> /dev/null
- ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+ ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
popd &> /dev/null
fi
@@ -697,21 +675,6 @@ echo "You can also update the init file appropriate for your shell"
echo "(e.g. .bashrc, .bash_profile, .cshrc, or .zshrc) to include"
echo "the same command."
echo
-if [ $INST_ROCKSTAR -eq 1 ]
-then
- if [ $MYOS = "Darwin" ]
- then
- LD_NAME="DYLD_LIBRARY_PATH"
- else
- LD_NAME="LD_LIBRARY_PATH"
- fi
- echo
- echo "For rockstar to work, you must also set $LD_NAME:"
- echo
- echo " export $LD_NAME=$DEST_DIR/lib:\$$LD_NAME"
- echo
- echo "or whichever invocation is appropriate for your shell."
-fi
echo "========================================================================"
echo
echo "Oh, look at me, still talking when there's science to do!"
diff --git a/doc/source/analyzing/analysis_modules/PPVCube.ipynb b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
deleted file mode 100644
index 3f404884187..00000000000
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ /dev/null
@@ -1,455 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.config import ytcfg\n",
- "\n",
- "import yt\n",
- "import numpy as np\n",
- "from yt.analysis_modules.ppv_cube.api import PPVCube\n",
- "import yt.units as u"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Density: $\\rho(r) \\propto r^{\\alpha}$"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "where for simplicity we won't worry about the normalizations of these profiles. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "First, we'll set up the grid and the parameters of the profiles:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# increasing the resolution will make the images in this notebook more visually appealing\n",
- "nx,ny,nz = (64, 64, 64) # domain dimensions\n",
- "R = 10. # outer radius of disk, kpc\n",
- "r_0 = 3. # scale radius, kpc\n",
- "beta = 1.4 # for the tangential velocity profile\n",
- "alpha = -1. # for the radial density profile\n",
- "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
- "r = np.sqrt(x*x+y*y) # polar coordinates\n",
- "theta = np.arctan2(y, x) # polar coordinates"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Second, we'll construct the data arrays for the density, temperature, and velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero. "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "dens = np.zeros((nx,ny,nz))\n",
- "dens[:,:,nz//2-3:nz//2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
- "temp = np.zeros((nx,ny,nz))\n",
- "temp[:,:,nz//2-3:nz//2+3] = 1.0e5 # Isothermal\n",
- "vel_theta = 100.*r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
- "velx = np.zeros((nx,ny,nz))\n",
- "vely = np.zeros((nx,ny,nz))\n",
- "velx[:,:,nz//2-3:nz//2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
- "vely[:,:,nz//2-3:nz//2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
- "dens[r > R] = 0.0\n",
- "temp[r > R] = 0.0\n",
- "velx[r > R] = 0.0\n",
- "vely[r > R] = 0.0"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1 `code_length`. "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "data = {}\n",
- "data[\"density\"] = (dens,\"g/cm**3\")\n",
- "data[\"temperature\"] = (temp, \"K\")\n",
- "data[\"velocity_x\"] = (velx, \"km/s\")\n",
- "data[\"velocity_y\"] = (vely, \"km/s\")\n",
- "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
- "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
- "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc.set_log(\"velocity_x\", False)\n",
- "slc.set_log(\"velocity_y\", False)\n",
- "slc.set_log(\"velocity_magnitude\", False)\n",
- "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the x-axis. We'll create a normal vector:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "i = 60.*np.pi/180.\n",
- "L = [np.sin(i),0.0,np.cos(i)]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to choose the bounds in line-of-sight velocity that the data will be binned into, which is a 4-tuple in the shape of `(vmin, vmax, nbins, units)`, which specifies a linear range of `nbins` velocity bins from `vmin` to `vmax` in units of `units`. We may also optionally specify the dimensions of the data cube with the `dims` argument."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false,
- "scrolled": true
- },
- "outputs": [],
- "source": [
- "cube = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, method=\"sum\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Following this, we can now write this cube to a FITS file. The x and y axes of the file can be in length units, which can be optionally specified by `length_unit`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "cube.write_fits(\"cube.fits\", clobber=True, length_unit=\"kpc\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Or one can use the `sky_scale` and `sky_center` keywords to set up the coordinates in RA and Dec:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "sky_scale = (1.0, \"arcsec/kpc\")\n",
- "sky_center = (30., 45.) # RA, Dec in degrees\n",
- "cube.write_fits(\"cube_sky.fits\", clobber=True, sky_scale=sky_scale, sky_center=sky_center)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds_cube = yt.load(\"cube.fits\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Specifying no center gives us the center slice\n",
- "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"])\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Picking different velocities for the slices\n",
- "new_center = ds_cube.domain_center\n",
- "new_center[2] = ds_cube.spec2pixel(-100.*u.km/u.s)\n",
- "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "new_center[2] = ds_cube.spec2pixel(70.0*u.km/u.s)\n",
- "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "new_center[2] = ds_cube.spec2pixel(-30.0*u.km/u.s)\n",
- "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "prj = yt.ProjectionPlot(ds_cube, \"z\", [\"density\"], method=\"sum\")\n",
- "prj.set_log(\"density\", True)\n",
- "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
- "prj.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The `thermal_broad` keyword allows one to simulate thermal line broadening based on the temperature, and the `atomic_weight` argument is used to specify the atomic weight of the particle that is doing the emitting."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "cube2 = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, thermal_broad=True, \n",
- " atomic_weight=12.0, method=\"sum\")\n",
- "cube2.write_fits(\"cube2.fits\", clobber=True, length_unit=\"kpc\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Taking a slice of this cube shows:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds_cube2 = yt.load(\"cube2.fits\")\n",
- "new_center = ds_cube2.domain_center\n",
- "new_center[2] = ds_cube2.spec2pixel(70.0*u.km/u.s)\n",
- "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "new_center[2] = ds_cube2.spec2pixel(-100.*u.km/u.s)\n",
- "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "where we can see the emission has been smeared into this velocity slice from neighboring slices due to the thermal broadening. \n",
- "\n",
- "Finally, the \"velocity\" or \"spectral\" axis of the cube can be changed to a different unit, such as wavelength, frequency, or energy: "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (cube2.vbins[0], cube2.vbins[-1])\n",
- "cube2.transform_spectral_axis(400.0,\"nm\")\n",
- "print (cube2.vbins[0], cube2.vbins[-1])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If a FITS file is now written from the cube, the spectral axis will be in the new units. To reset the spectral axis back to the original velocity units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "cube2.reset_spectral_axis()\n",
- "print (cube2.vbins[0], cube2.vbins[-1])"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf b/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf
deleted file mode 100644
index 6529c17beca..00000000000
Binary files a/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
deleted file mode 100644
index a3a64de46d1..00000000000
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ /dev/null
@@ -1,245 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The change in the CMB intensity due to Compton scattering of CMB\n",
- "photons off of thermal electrons in galaxy clusters, otherwise known as the\n",
- "Sunyaev-Zeldovich (S-Z) effect, can to a reasonable approximation be represented by a\n",
- "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
- "contributions, and for large frequencies and high temperatures\n",
- "relativistic effects are important. For computing the full S-Z signal\n",
- "incorporating all of these effects, there is a library:\n",
- "SZpack ([Chluba et al 2012](https://ui.adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
- "\n",
- "The `sunyaev_zeldovich` analysis module in yt makes it possible\n",
- "to make projections of the full S-Z signal given the properties of the\n",
- "thermal gas in the simulation using SZpack. SZpack has several different options for computing the S-Z signal, from full\n",
- "integrations to very good approximations. Since a full or even a\n",
- "partial integration of the signal for each cell in the projection\n",
- "would be prohibitively expensive, we use the method outlined in\n",
- "[Chluba et al 2013](https://ui.adsabs.harvard.edu/abs/2013MNRAS.430.3054C) to expand the\n",
- "total S-Z signal in terms of moments of the projected optical depth $\\tau$, projected electron temperature $T_e$, and\n",
- "velocities $\\beta_{c,\\parallel}$ and $\\beta_{c,\\perp}$ (their equation 18):"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "$$S(\\tau, T_{e},\\beta_{c,\\parallel},\\beta_{\\rm c,\\perp}) \\approx S_{\\rm iso}^{(0)} + S_{\\rm iso}^{(2)}\\omega^{(1)} + C_{\\rm iso}^{(1)}\\sigma^{(1)} + D_{\\rm iso}^{(2)}\\kappa^{(1)} + E_{\\rm iso}^{(2)}\\beta_{\\rm c,\\perp,SZ}^2 +~...$$\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "yt makes projections of the various moments needed for the\n",
- "calculation, and then the resulting projected fields are used to\n",
- "compute the S-Z signal. In our implementation, the expansion is carried out to first-order\n",
- "terms in $T_e$ and zeroth-order terms in $\\beta_{c,\\parallel}$ by default, but terms up to second-order in can be optionally\n",
- "included. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Installing SZpack"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "SZpack can be downloaded [here](http://www.jb.man.ac.uk/~jchluba/Science/SZpack/SZpack.html). Make\n",
- "sure you install a version later than v1.1.1. For computing the S-Z\n",
- "integrals, SZpack requires the [GNU Scientific Library](http://www.gnu.org/software/gsl/). For compiling\n",
- "the Python module, you need to have a recent version of [swig](http://www.swig.org>) installed. After running `make` in the top-level SZpack directory, you'll need to run it in the `python` subdirectory, which is the\n",
- "location of the `SZpack` module. You may have to include this location in the `PYTHONPATH` environment variable.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "**NOTE**: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Creating S-Z Projections"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Once you have SZpack installed, making S-Z projections from yt\n",
- "datasets is fairly straightforward:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline\n",
- "import yt\n",
- "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
- "\n",
- "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
- "\n",
- "freqs = [90.,180.,240.]\n",
- "szprj = SZProjection(ds, freqs)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`freqs` is a list or array of frequencies in GHz at which the signal\n",
- "is to be computed. The `SZProjection` constructor also accepts the\n",
- "optional keywords, `mue` (mean molecular weight for computing the\n",
- "electron number density, 1.143 is the default) and `high_order` (set\n",
- "to True to compute terms in the S-Z signal expansion up to\n",
- "second-order in $T_{e,SZ}$ and $\\beta$). "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Once you have created the `SZProjection` object, you can use it to\n",
- "make on-axis and off-axis projections:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
- "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To make an off-axis projection, `szprj.off_axis` is called in the same way, except that the first argument is a three-component normal vector. \n",
- "\n",
- "Currently, only one projection can be in memory at once. These methods\n",
- "create images of the projected S-Z signal at each requested frequency,\n",
- "which can be accessed dict-like from the projection object (e.g.,\n",
- "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
- "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard yt\n",
- "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`. \n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Writing out the S-Z Projections"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "You may want to output the S-Z images to figures suitable for\n",
- "inclusion in a paper, or save them to disk for later use. There are a\n",
- "few methods included for this purpose. For PNG figures with a colorbar\n",
- "and axes, use `write_png`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "szprj.write_png(\"SZ_example\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "For simple output of the image data to disk, call `write_hdf5`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "szprj.write_hdf5(\"SZ_example.h5\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finally, for output to FITS files which can be opened or analyzed\n",
- "using other programs (such as ds9), call `export_fits`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "szprj.write_fits(\"SZ_example.fits\", clobber=True)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "which would write all of the projections to a single FITS file,\n",
- "including coordinate information in kpc. The optional keyword\n",
- "`clobber` allows a previous file to be overwritten. \n"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png b/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png
deleted file mode 100644
index 0fd6254e09b..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg b/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg
deleted file mode 100644
index 420c77fe61e..00000000000
--- a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg
+++ /dev/null
@@ -1,186 +0,0 @@
-
-
-
-
diff --git a/doc/source/analyzing/analysis_modules/_images/31micron.png b/doc/source/analyzing/analysis_modules/_images/31micron.png
deleted file mode 100644
index 834b020af16..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/31micron.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png b/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png
deleted file mode 100644
index 585882f3a08..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/PDF.png b/doc/source/analyzing/analysis_modules/_images/PDF.png
deleted file mode 100644
index 11b4213dead..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/PDF.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/PDF.svgz b/doc/source/analyzing/analysis_modules/_images/PDF.svgz
deleted file mode 100644
index a42236f2663..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/PDF.svgz and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png b/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png
deleted file mode 100644
index 89f2ead1730..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg b/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg
deleted file mode 100644
index 5553f1de577..00000000000
--- a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg
+++ /dev/null
@@ -1,617 +0,0 @@
-
-
-
diff --git a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png b/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png
deleted file mode 100644
index a46c5ebcfac..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png b/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png
deleted file mode 100644
index b623e24c004..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/SED.png b/doc/source/analyzing/analysis_modules/_images/SED.png
deleted file mode 100644
index 545ba5bafef..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/SED.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/SFR.png b/doc/source/analyzing/analysis_modules/_images/SFR.png
deleted file mode 100644
index 73f8242e296..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/SFR.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png b/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png
deleted file mode 100644
index 9ca3a8160f8..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png b/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png
deleted file mode 100644
index 70d31e9475f..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png b/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png
deleted file mode 100644
index aebfd704b81..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/bubbles.png b/doc/source/analyzing/analysis_modules/_images/bubbles.png
deleted file mode 100644
index 04179fcb7aa..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/bubbles.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png b/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png
deleted file mode 100644
index db3bbe6544f..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png b/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png
deleted file mode 100644
index 211f90c04f9..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/dsquared.png b/doc/source/analyzing/analysis_modules/_images/dsquared.png
deleted file mode 100644
index 5f43aac436a..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/dsquared.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/dust_continuum.png b/doc/source/analyzing/analysis_modules/_images/dust_continuum.png
deleted file mode 100644
index d33fc1d9646..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/dust_continuum.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png b/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png
deleted file mode 100644
index 2aebfa2e361..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/lightray.png b/doc/source/analyzing/analysis_modules/_images/lightray.png
deleted file mode 100644
index cf83394bafb..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/lightray.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png b/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png
deleted file mode 100644
index 3e4ac3cdade..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg b/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg
deleted file mode 100644
index b065071eae7..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/profiles.png b/doc/source/analyzing/analysis_modules/_images/profiles.png
deleted file mode 100644
index ea641d31dce..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/profiles.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/projections.png b/doc/source/analyzing/analysis_modules/_images/projections.png
deleted file mode 100644
index a0c6e27109b..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/projections.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/spectrum_full.png b/doc/source/analyzing/analysis_modules/_images/spectrum_full.png
deleted file mode 100644
index 6c38eeafdd3..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/spectrum_full.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png b/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png
deleted file mode 100644
index e9d109c0cc3..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png
deleted file mode 100644
index db7312edce1..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz
deleted file mode 100644
index 8c070fee9c3..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png
deleted file mode 100644
index 4212a3c9dfc..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz
deleted file mode 100644
index 9cd18fc932b..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png
deleted file mode 100644
index b07b72ab04d..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz
deleted file mode 100644
index 50167c99cdb..00000000000
Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz and /dev/null differ
diff --git a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
deleted file mode 100644
index a88e2ff5bcc..00000000000
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ /dev/null
@@ -1,441 +0,0 @@
-.. _absorption_spectrum:
-
-Creating Absorption Spectra
-===========================
-
-.. note::
-
- Development of the AbsorptionSpectrum module has been moved to the
- Trident package. This version is deprecated and will be removed from yt
- in a future release. See https://github.com/trident-project/trident
- for further information.
-
-Absorption line spectra are spectra generated using bright background sources
-to illuminate tenuous foreground material and are primarily used in studies
-of the circumgalactic medium and intergalactic medium. These spectra can
-be created using the
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
-and
-:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
-analysis modules.
-
-The
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class
-and its workhorse method
-:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum`
-return two arrays, one with wavelengths, the other with the normalized
-flux values at each of the wavelength values. It can also output a text file
-listing all important lines.
-
-For example, here is an absorption spectrum for the wavelength range from 900
-to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4:
-
-.. image:: _images/spectrum_full.png
- :width: 500
-
-And a zoom-in on the 1425-1450 Angstrom window:
-
-.. image:: _images/spectrum_zoom.png
- :width: 500
-
-Method for Creating Absorption Spectra
---------------------------------------
-
-Once a
-:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
-has been created traversing a dataset using the :ref:`light-ray-generator`,
-a series of arrays store the various fields of the gas parcels (represented
-as cells) intersected along the ray.
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
-steps through each element of the
-:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s
-arrays and calculates the column density for desired ion by multiplying its
-number density with the path length through the cell. Using these column
-densities along with temperatures to calculate thermal broadening, Voigt
-profiles are deposited on to a featureless background spectrum. By default,
-the peculiar velocity of the gas is included as a Doppler redshift in addition
-to any cosmological redshift of the data dump itself.
-
-Subgrid Deposition
-^^^^^^^^^^^^^^^^^^
-
-For features not resolved (i.e. possessing narrower width than the spectral
-resolution),
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
-performs subgrid deposition. The subgrid deposition algorithm creates a number
-of smaller virtual bins, by default the width of the virtual bins is 1/10th
-the width of the spectral feature. The Voigt profile is then deposited
-into these virtual bins where it is resolved, and then these virtual bins
-are numerically integrated back to the resolution of the original spectral bin
-size, yielding accurate equivalent widths values.
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
-informs the user how many spectral features are deposited in this fashion.
-
-Tutorial on Creating an Absorption Spectrum
--------------------------------------------
-
-Initializing `AbsorptionSpectrum` Class
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-To instantiate an
-:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
-object, the arguments required are the
-minimum and maximum wavelengths (assumed to be in Angstroms), and the number
-of wavelength bins to span this range (including the endpoints)
-
-.. code-block:: python
-
- from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
-
- sp = AbsorptionSpectrum(900.0, 1800.0, 10001)
-
-Adding Features to the Spectrum
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Absorption lines and continuum features can then be added to the spectrum.
-To add a line, you must know some properties of the line: the rest wavelength,
-f-value, gamma value, and the atomic mass in amu of the atom. That line must
-be tied in some way to a field in the dataset you are loading, and this field
-must be added to the LightRay object when it is created. Below, we will
-add the H Lyman-alpha line, which is tied to the neutral hydrogen field
-('H_number_density').
-
-.. code-block:: python
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
-
-In the above example, the *field* argument tells the spectrum generator which
-field from the ray data to use to calculate the column density. The
-``label_threshold`` keyword tells the spectrum generator to add all lines
-above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
-text line list output at the end. If None is provided, as is the default,
-no lines of this type will be added to the text list.
-
-Continuum features with optical depths that follow a power law can also be
-added. Like adding lines, you must specify details like the wavelength
-and the field in the dataset and LightRay that is tied to this feature.
-The wavelength refers to the location at which the continuum begins to be
-applied to the dataset, and as it moves to lower wavelength values, the
-optical depth value decreases according to the defined power law. The
-normalization value is the column density of the linked field which results
-in an optical depth of 1 at the defined wavelength. Below, we add the hydrogen
-Lyman continuum.
-
-.. code-block:: python
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
- normalization = 1.6e17
- index = 3.0
-
- sp.add_continuum(my_label, field, wavelength, normalization, index)
-
-Making the Spectrum
-^^^^^^^^^^^^^^^^^^^
-
-Once all the lines and continua are added, it is time to make a spectrum out
-of some light ray data.
-
-.. code-block:: python
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.fits',
- line_list_file='lines.txt')
-
-A spectrum will be made using the specified ray data and the wavelength and
-flux arrays will also be returned. If you set the optional
-``use_peculiar_velocity`` keyword to False, the lines will not incorporate
-doppler redshifts to shift the deposition of the line features.
-
-Three output file formats are supported for writing out the spectrum: fits,
-hdf5, and ascii. The file format used is based on the extension provided
-in the ``output_file`` keyword: ``.fits`` for a fits file,
-``.h5`` for an hdf5 file, and anything else for an ascii file.
-
-.. note:: To write out a fits file, you must install the `astropy `_ python library in order to access the astropy.io.fits module. You can usually do this by simply running `pip install astropy` at the command line.
-
-Generating Spectra in Parallel
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The `AbsorptionSpectrum` analysis module can be run in parallel simply by
-following the procedures laid out in :ref:`parallel-computation` for running
-yt scripts in parallel. Spectrum generation is parallelized using a multi-level
-strategy where each absorption line is deposited by a different processor.
-If the number of available processors is greater than the number of lines,
-then the deposition of individual lines will be divided over multiple
-processors.
-
-Fitting Absorption Spectra
-==========================
-
-.. sectionauthor:: Hilary Egan
-
-This tool can be used to fit absorption spectra, particularly those
-generated using the (``AbsorptionSpectrum``) tool. For more details
-on its uses and implementation please see (`Egan et al. (2013)
-`_). If you find this tool useful we
-encourage you to cite accordingly.
-
-Loading an Absorption Spectrum
-------------------------------
-
-To load an absorption spectrum created by
-(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``),
-we specify the output file name. It is advisable to use either an .h5
-or .fits file, rather than an ascii file to save the spectrum as rounding
-errors produced in saving to a ascii file will negatively impact fit quality.
-
-.. code-block:: python
-
- f = h5py.File('spectrum.h5', mode='r')
- wavelength = f["wavelength"][:]
- flux = f['flux'][:]
- f.close()
-
-Specifying Species Properties
------------------------------
-
-Before fitting a spectrum, you must specify the properties of all the
-species included when generating the spectrum.
-
-The physical properties needed for each species are the rest wavelength,
-f-value, gamma value, and atomic mass. These will be the same values
-as used to generate the initial absorption spectrum. These values are
-given in list form as some species generate multiple lines (as in the
-OVI doublet). The number of lines is also specified on its own.
-
-To fine tune the fitting procedure and give results in a minimal
-number of optimizing steps, we specify expected maximum and minimum
-values for the column density, Doppler parameter, and redshift. These
-values can be well outside the range of expected values for a typical line
-and are mostly to prevent the algorithm from fitting to negative values
-or becoming numerically unstable.
-
-Common initial guesses for Doppler parameter and column density should also
-be given. These values will not affect the specific values generated by
-the fitting algorithm, provided they are in a reasonably appropriate range
-(ie: within the range given by the max and min values for the parameter).
-
-For a spectrum containing both the H Lyman-alpha line and the OVI doublet,
-we set up a fit as shown below.
-
-.. code-block:: python
-
- HI_parameters = {'name':'HI',
- 'f': [.4164],
- 'Gamma':[6.265E8],
- 'wavelength':[1215.67],
- 'numLines':1,
- 'maxN': 1E22, 'minN':1E11,
- 'maxb': 300, 'minb':1,
- 'maxz': 6, 'minz':0,
- 'init_b':30,
- 'init_N':1E14}
-
- OVI_parameters = {'name':'OVI',
- 'f':[.1325,.06580],
- 'Gamma':[4.148E8,4.076E8],
- 'wavelength':[1031.9261,1037.6167],
- 'numLines':2,
- 'maxN':1E17,'minN':1E11,
- 'maxb':300, 'minb':1,
- 'maxz':6, 'minz':0,
- 'init_b':20,
- 'init_N':1E12}
-
- speciesDicts = {'HI':HI_parameters,'OVI':OVI_parameters}
-
-
-Generating Fit of Spectrum
---------------------------
-
-After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an appropriate fit can be generated.
-
-.. code-block:: python
-
- orderFits = ['OVI','HI']
-
- fitted_lines, fitted_flux = generate_total_fit(wavelength,
- flux, orderFits, speciesDicts)
-
-The orderFits variable is used to determine in what order the species
-should be fitted. This may affect the results of the resulting fit,
-as lines may be fit as an incorrect species. For best results, it is
-recommended to fit species the generate multiple lines first, as a fit
-will only be accepted if all of the lines are fit appropriately using
-a single set of parameters. At the moment no cross correlation between
-lines of different species is performed.
-
-The parameters of the lines that are needed to fit the spectrum are contained
-in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
-be a key in the ``fitted_lines`` dictionary. The entry for each species
-key will be another dictionary containing entries for 'N','b','z', and
-'group#' which are the column density, Doppler parameter, redshift,
-and associate line complex respectively. The i :superscript:`th` line
-of a given species is then given by the parameters ``N[i]``, ``b[i]``,
-and ``z[i]`` and is part of the same complex (and was fitted at the same time)
-as all lines with the same group number as ``group#[i]``.
-
-The ``fitted_flux`` is an ndarray of the same size as ``flux`` and
-``wavelength`` that contains the cumulative absorption spectrum generated
-by the lines contained in ``fitted_lines``.
-
-Saving a Spectrum Fit
----------------------
-
-Saving the results of a fitted spectrum for further analysis is
-accomplished automatically using the h5 file format. A group
-is made for each species that is fit, and each species group has
-a group for the corresponding N, b, z, and group# values.
-
-.. _fitting_procedure:
-
-Procedure for Generating Fits
------------------------------
-
-.. sectionauthor:: Hilary Egan
-
-To generate a fit for a spectrum
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit`
-is called.
-This function controls the identification of line complexes, the fit
-of a series of absorption lines for each appropriate species, checks of
-those fits, and returns the results of the fits.
-
-Finding Line Complexes
-----------------------
-
-Line complexes are found using the
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
-function. The process by which line complexes are found involves walking
-through the array of flux in order from minimum to maximum wavelength, and
-finding series of spatially contiguous cells whose flux is less than some
-limit. These regions are then checked in terms of an additional flux limit
-and size. The bounds of all the passing regions are then listed and returned.
-Those bounds that cover an exceptionally large region of wavelength space will
-be broken up if a suitable cut point is found. This method is only appropriate
-for noiseless spectra.
-
-The optional parameter ``complexLim`` (default = 0.999), controls the limit
-that triggers the identification of a spatially contiguous region of flux
-that could be a line complex. This number should be very close to 1 but not
-exactly equal. It should also be at least an order of magnitude closer to 1
-than the later discussed ``fitLim`` parameter, because a line complex where
-the flux of the trough is very close to the flux of the edge can be incredibly
-unstable when optimizing.
-
-The ``fitLim`` parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This
-effectively controls the sensitivity to very low column absorbers. Default
-value is ``fitLim`` = 0.99. If a region is identified where the flux of the
-trough is greater than this value, the region is simply ignored.
-
-The ``minLength`` parameter controls the minimum number of array elements
-that an identified region must have. This value must be greater than or
-equal to 3 as there are a minimum of 3 free parameters that must be fit.
-Default is ``minLength`` = 3.
-
-The ``maxLength`` parameter controls the maximum number of array elements
-that an identified region can have before it is split into separate regions.
-Default is ``maxLength`` = 1000. This should be adjusted based on the
-resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms.
-
-The ``splitLim`` parameter controls how exceptionally large regions are split.
-When such a region is identified by having more array elements than
-``maxLength``, the point of maximum flux (or minimum absorption) in the
-middle two quartiles is identified. If that point has a flux greater than
-or equal to ``splitLim``, then two separate complexes are created: one from
-the lower wavelength edge to the minimum absorption point and the other from
-the minimum absorption point to the higher wavelength edge. The default
-value is ``splitLim`` =.99, but it should not drastically affect results, so
-long as the value is reasonably close to 1.
-
-Fitting a Line Complex
-----------------------
-
-After a complex is identified, it is fitted by iteratively adding and
-optimizing a set of Voigt Profiles for a particular species until the
-region is considered successfully fit. The optimizing is accomplished
-using scipy's least squares optimizer. This requires an initial estimate
-of the parameters to be fit (column density, b-value, redshift) for each
-line.
-
-Each time a line is added, the guess of the parameters is based on
-the difference between the line complex and the fit so far. For the first line
-this just means the initial guess is based solely on the flux of the line
-complex. The column density is given by the initial column density given
-in the species parameters dictionary. If the line is saturated (some portion
-of the flux with a value less than .1) than the larger initial column density
-guess is chosen. If the flux is relatively high (all values >.9) than the
-smaller initial guess is given. These values are chosen to make optimization
-faster and more stable by being closer to the actual value, but the final
-results of fitting should not depend on them as they merely provide a
-starting point.
-
-After the parameters for a line are optimized for the first time, the
-optimized parameters are then used for the initial guess on subsequent
-iterations with more lines.
-
-The complex is considered successfully fit when the sum of the squares of
-the difference between the flux generated from the fit and the desired flux
-profile is less than ``errBound``. ``errBound`` is related to the optional
-parameter to
-:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
-``maxAvgError`` by the number of array elements in the region such that
-``errBound`` = number of elements * ``maxAvgError``.
-
-There are several other conditions under which the cycle of adding and
-optimizing lines will halt. If the error of the optimized fit from adding
-a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and
-the latest line is removed. Lines are also prevented from being added if
-the total number of lines is greater than the number of elements in the flux
-array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them.
-
-Checking Fit Results
---------------------
-
-After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits.
-
-First, the parameters must be in a reasonable range. This is a check to make
-sure that the optimization did not become unstable and generate a fit that
-diverges wildly outside the region where the fit was performed. This way, even
-if particular complex cannot be fit, the rest of the spectrum fitting still
-behaves as expected. The range of acceptability for each parameter is given
-in the species parameter dictionary. These are merely broad limits that will
-prevent numerical instability rather than physical limits.
-
-In cases where a single species generates multiple lines (as in the OVI
-doublet), the fits are then checked for higher wavelength lines. Originally
-the fits are generated only considering the lowest wavelength fit to a region.
-This is because we perform the fitting of complexes in order from the lowest
-wavelength to the highest, so any contribution to a complex being fit must
-come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength.
-
-Saturated Lyman Alpha Fitting Tools
------------------------------------
-
-In cases where a large or saturated line (there exists a point in the complex
-where the flux is less than .1) fails to be fit properly at first pass, a
-more robust set of fitting tools is used to try and remedy the situation.
-The basic approach is to simply try a much wider range of initial parameter
-guesses in order to find the true optimization minimum, rather than getting
-stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
-Also included in these parameter guesses is an initial guess of a high
-column cool line overlapping a lower column warm line, indicative of a
-broad Lyman alpha (BLA) absorber.
diff --git a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
deleted file mode 100644
index 1eb73e93c2f..00000000000
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ /dev/null
@@ -1,156 +0,0 @@
-.. _ellipsoid_analysis:
-
-Halo Ellipsoid Analysis
-=======================
-.. sectionauthor:: Geoffrey So
-
-.. warning:: This functionality is currently broken and needs to
- be updated to make use of the :ref:`halo_catalog` framework.
- Anyone interested in doing so should contact the yt-dev list.
-
-Purpose
--------
-
-The purpose of creating this feature in yt is to analyze field
-properties that surround dark matter haloes. Originally, this was
-usually done with the sphere 3D container, but since many halo
-particles are linked together in a more elongated shape, I thought it
-would be better to use an ellipsoid 3D container to wrap around the
-particles. This way, less of the empty-of-particle space around the
-halo would be included when doing the analysis of field properties
-where the particles are suppose to occupy.
-
-General Overview
-----------------
-
-In order to use the ellipsoid 3D container object, one must supply it
-with a center, the magnitude of the semi-principle axes, the direction
-of the first semi-principle axis, the tilt angle (rotation angle about
-the y axis that will align the first semi-principle axis with the x
-axis once it is aligned in the x-z plane.)
-
-Once those parameters are determined, the function "ellipsoid" will
-return the 3D object, and users will be able to get field attributes
-from the data object just as they would from spheres, cylinders etc.
-
-Example
--------
-
-To use the ellipsoid container to get field information, you
-will have to first determine the ellipsoid's parameters. This can be
-done with the haloes obtained from halo finding, but essentially it
-takes the information:
-
- #. Center position x,y,z
- #. List of particles position x,y,z
-
-And calculates the ellipsoid information needed for the 3D container.
-
-What I usually do is get this information from the halo finder output
-files in the .h5 HDF5 binary format. I load them into memory using the
-LoadHaloes() function instead of reading in the ASCII output.
-
-Halo Finding
-~~~~~~~~~~~~
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_finding.api import *
-
- ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- halo_list = HaloFinder(ds)
- halo_list.dump('MyHaloList')
-
-Ellipsoid Parameters
-~~~~~~~~~~~~~~~~~~~~
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_finding.api import *
-
- ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- haloes = LoadHaloes(ds, 'MyHaloList')
-
-Once the halo information is saved you can load it into the data
-object "haloes", you can get loop over the list of haloes and do
-
-.. code-block:: python
-
- ell_param = haloes[0].get_ellipsoid_parameters()
-
-This will return 6 items
-
-#. The center of mass as an array.
-#. A as a float. (Must have A>=B)
-#. B as a float. (Must have B>=C)
-#. C as a float. (Must have C > cell size)
-#. e0 vector as an array. (now normalized automatically in the code)
-#. tilt as a float.
-
-The center of mass would be the same one as returned by the halo
-finder. The A, B, C are the largest to smallest magnitude of the
-ellipsoid's semi-principle axes. "e0" is the largest semi-principle
-axis vector direction that would have magnitude A but normalized.
-The "tilt" is an angle measured in radians. It can be best described
-as after the rotation about the z-axis to align e0 to x in the x-y
-plane, and then rotating about the y-axis to align e0 completely to
-the x-axis, the angle remaining to rotate about the x-axis to align
-both e1 to the y-axis and e2 to the z-axis.
-
-Ellipsoid 3D Container
-~~~~~~~~~~~~~~~~~~~~~~
-
-Once the parameters are obtained from the get_ellipsoid_parameters()
-function, or picked at random by the user, it can be input into the
-ellipsoid container as:
-
-.. code-block:: python
-
- ell = ds.ellipsoid(ell_param[0],
- ell_param[1],
- ell_param[2],
- ell_param[3],
- ell_param[4],
- ell_param[5])
- dens = ell.quantities['TotalQuantity']('density')[0]
-
-This way, "ell" will be the ellipsoid container, and "dens" will be
-the total density of the ellipsoid in an unigrid simulation. One can
-of course use this container object with parameters that they come up
-with, the ellipsoid parameters do not have to come from the Halo
-Finder. And of course, one can use the ellipsoid container with other
-derived fields or fields that they are interested in.
-
-Drawbacks
----------
-
-Since this is a first attempt, there are many drawbacks and corners
-cut. Many things listed here will be amended when I have time.
-
-* The ellipsoid 3D container like the boolean object, do not contain
- particle position and velocity information.
-* This currently assume periodic boundary condition, so if an
- ellipsoid center is at the edge, it will return part of the opposite
- edge field information. Will try to put in the option to turn off
- periodicity in the future.
-* This method gives a minimalistic ellipsoid centered around the
- center of mass that contains all the particles, but sometimes people
- prefer an inertial tensor triaxial ellipsoid described in
- `Dubinski, Carlberg 1991
- `_. I have that
- method composed but it is not fully tested yet.
-* The method to obtain information from the halo still uses the center
- of mass as the center of the ellipsoid, so it is not making the
- smallest ellipsoid that contains the particles as possible. To
- start at the center of the particles based on position will require
- an O(:math:`N^2`) operation, right now I'm trying to limit
- everything to O(:math:`N`) operations. If particle count does not
- get too large, I may implement the O(:math:`N^2`) operation.
-* Currently the list of haloes can be analyzed using object
- parallelism (one halo per core), but I'm not sure if haloes will get
- big enough soon that other forms of parallelism will be needed to
- analyze them due to memory constraint.
-* This has only been tested on unigrid simulation data, not AMR. In
- unigrid simulations, I can take "dens" from the example and divide
- it by the total number of cells to get the average density, in AMR
- one would need to do an volume weighted average instead.
diff --git a/doc/source/analyzing/analysis_modules/exporting.rst b/doc/source/analyzing/analysis_modules/exporting.rst
deleted file mode 100644
index 9636c707f0e..00000000000
--- a/doc/source/analyzing/analysis_modules/exporting.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-Exporting to External Radiation Transport Codes
-===============================================
-
-.. toctree::
- :maxdepth: 2
-
- sunrise_export
- radmc3d_export
\ No newline at end of file
diff --git a/doc/source/analyzing/analysis_modules/halo_analysis.rst b/doc/source/analyzing/analysis_modules/halo_analysis.rst
deleted file mode 100644
index aaca23bcb58..00000000000
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _halo-analysis:
-
-Halo Analysis
-=============
-
-This section covers halo finding, performing extra analysis on halos,
-and the halo mass function calculator. If you already have halo
-catalogs and simply want to load them into yt, see
-:ref:`halo-catalog-data`.
-
-.. toctree::
- :maxdepth: 2
-
- halo_catalogs
- halo_mass_function
- halo_transition
- halo_merger_tree
- ellipsoid_analysis
diff --git a/doc/source/analyzing/analysis_modules/halo_catalogs.rst b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
deleted file mode 100644
index ea7cfe1099c..00000000000
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ /dev/null
@@ -1,503 +0,0 @@
-.. _halo_catalog:
-
-Halo Finding and Analysis
-=========================
-
-In yt-3.x, halo finding and analysis are combined into a single
-framework called the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-This framework is substantially different from the halo analysis
-machinery available in yt-2.x and is entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.x, see :ref:`halo-transition`.
-
-.. _halo_catalog_finding:
-
-Halo Finding
-------------
-
-If you already have a halo catalog, either produced by one of the methods
-below or in a format described in :ref:`halo-catalog-data`, and want to
-perform further analysis, skip to :ref:`halo_catalog_analysis`.
-
-Three halo finding methods exist within yt. These are:
-
-* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
- `_)
-* :ref:`hop_finding`: `Eisenstein and Hut (1998)
- `_.
-* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
- scales well and does substructure finding (`Behroozi et al.
- 2011 `_)
-
-Halo finding is performed through the creation of a
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
-object. The dataset on which halo finding is to be performed should
-be loaded and given to the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
-along with the ``finder_method`` keyword to specify the method to be
-used.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
-
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
- hc.create()
-
-The ``finder_method`` options should be given as "fof", "hop", or
-"rockstar". Each of these methods has their own set of keyword
-arguments to control functionality. These can specified in the form
-of a dictionary using the ``finder_kwargs`` keyword.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
-
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
- finder_kwargs={"ptype": "stars",
- "padding": 0.02})
- hc.create()
-
-For a full list of keywords for each halo finder, see
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
-and
-:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
-
-.. _fof_finding:
-
-FOF
-^^^
-
-This is a basic friends-of-friends algorithm. See
-`Efstathiou et al. (1985)
-`_ for more
-details as well as
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
-
-.. _hop_finding:
-
-HOP
-^^^
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
- smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
- particle to its densest neighbor. A particle which is
- its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
- grouped together.
-
-#. Groups are included, linked together, or discarded
- depending on the user-supplied over density
- threshold parameter. The default is 160.0.
-
-See the `HOP method paper
-`_ for
-full details as well as
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
-
-.. _rockstar_finding:
-
-Rockstar
-^^^^^^^^
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available `_. The lead
-developer is Peter Behroozi, and the methods are described in
-`Behroozi et al. 2011 `_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt `.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those. See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
- connected by an Infiniband network can be problematic. Therefore, for now
- we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
- using this flag: ``--mca btl ^openib``.
- For example, here is how Rockstar might be called using 24 cores:
- ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
- placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
- time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
- do most of the analysis). Default is MPI_TASKS-num_readers-1.
- If left undefined, the above options are automatically
- configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
- and smoothing lengths. This is in units of Mpc/h.
- If no value is provided, this parameter is automatically set to
- the width of the smallest grid element in the simulation from the
- last data snapshot (i.e. the one where time has evolved the
- longest) in the time series:
- ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
- total number of dark matter
- particles present in the simulation. For example, this is useful
- when analyzing a series of snapshots where the number of dark
- matter particles should not change and this will save some disk
- access time. If left unspecified, it will
- be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
- only dark matter particles present in the simulation.
- This option does not modify the halos found by Rockstar, however
- this option can save disk access time if there are no star particles
- (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-.. _rockstar-installation:
-
-Installing Rockstar
-"""""""""""""""""""
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar. This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy. The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``. You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows. Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
- cd $YT_DEST
- cd ..
- vi install_script.sh // or your favorite editor to change INST_ROCKSTAR=1
- bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.
-
-.. _halo_catalog_analysis:
-
-Extra Halo Analysis
--------------------
-
-As a reminder, all halo catalogs created by the methods outlined in
-:ref:`halo_catalog_finding` as well as those in the formats discussed in
-:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
-Once a halo catalog has been created, further analysis can be performed
-by providing both the halo catalog and the original simulation dataset to
-the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-
-.. code-block:: python
-
- halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
-
-A data object can also be supplied via the keyword ``data_source``,
-associated with either dataset, to control the spatial region in
-which halo analysis will be performed.
-
-The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
-allows the user to create a pipeline of analysis actions that will be
-performed on all halos in the existing catalog. The analysis can be
-performed in parallel with separate processors or groups of processors
-being allocated to perform the entire pipeline on individual halos.
-The pipeline is setup by adding actions to the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-Each action is represented by a callback function that will be run on
-each halo. There are four types of actions:
-
-* :ref:`halo_catalog_filters`
-* :ref:`halo_catalog_quantities`
-* :ref:`halo_catalog_callbacks`
-* :ref:`halo_catalog_recipes`
-
-A list of all available filters, quantities, and callbacks can be found in
-:ref:`halo_analysis_ref`.
-All interaction with this analysis can be performed by importing from
-halo_analysis.
-
-.. _halo_catalog_filters:
-
-Filters
-^^^^^^^
-
-A filter is a function that returns True or False. If the return value
-is True, any further queued analysis will proceed and the halo in
-question will be added to the final catalog. If the return value False,
-further analysis will not be performed and the halo will not be included
-in the final catalog.
-
-An example of adding a filter:
-
-.. code-block:: python
-
- hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
-
-Currently quantity_value is the only available filter, but more can be
-added by the user by defining a function that accepts a halo object as
-the first argument and then adding it as an available filter. If you
-think that your filter may be of use to the general community, you can
-add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a
-pull request.
-
-An example of defining your own filter:
-
-.. code-block:: python
-
- def my_filter_function(halo):
-
- # Define condition for filter
- filter_value = True
-
- # Return a boolean value
- return filter_value
-
- # Add your filter to the filter registry
- add_filter("my_filter", my_filter_function)
-
- # ... Later on in your script
- hc.add_filter("my_filter")
-
-.. _halo_catalog_quantities:
-
-Quantities
-^^^^^^^^^^
-
-A quantity is a call back that returns a value or values. The return values
-are stored within the halo object in a dictionary called "quantities." At
-the end of the analysis, all of these quantities will be written to disk as
-the final form of the generated halo catalog.
-
-Quantities may be available in the initial fields found in the halo catalog,
-or calculated from a function after supplying a definition. An example
-definition of center of mass is shown below. Currently available quantities
-are center_of_mass and bulk_velocity. Their definitions are available in
-``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
-your quantity may be of use to the general community, add it to
-``halo_quantities.py`` and issue a pull request. Default halo quantities are:
-
-* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
-* ``particle_mass`` -- Mass of halo
-* ``particle_position_x`` -- Location of halo
-* ``particle_position_y`` -- Location of halo
-* ``particle_position_z`` -- Location of halo
-* ``virial_radius`` -- Virial radius of halo
-
-An example of adding a quantity:
-
-.. code-block:: python
-
- hc.add_quantity('center_of_mass')
-
-An example of defining your own quantity:
-
-.. code-block:: python
-
- def my_quantity_function(halo):
- # Define quantity to return
- quantity = 5
-
- return quantity
-
- # Add your filter to the filter registry
- add_quantity('my_quantity', my_quantity_function)
-
-
- # ... Later on in your script
- hc.add_quantity("my_quantity")
-
-This quantity will then be accessible for functions called later via the
-*quantities* dictionary that is associated with the halo object.
-
-.. code-block:: python
-
- def my_new_function(halo):
- print(halo.quantities["my_quantity"])
- add_callback("print_quantity", my_new_function)
-
- # ... Anywhere after "my_quantity" has been called
- hc.add_callback("print_quantity")
-
-.. _halo_catalog_callbacks:
-
-Callbacks
-^^^^^^^^^
-
-A callback is actually the super class for quantities and filters and
-is a general purpose function that does something, anything, to a Halo
-object. This can include hanging new attributes off the Halo object,
-performing analysis and writing to disk, etc. A callback does not return
-anything.
-
-An example of using a pre-defined callback where we create a sphere for
-each halo with a radius that is twice the saved ``radius``.
-
-.. code-block:: python
-
- hc.add_callback("sphere", factor=2.0)
-
-Currently available callbacks are located in
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may
-be added by using the syntax shown below. If you think that your
-callback may be of use to the general community, add it to
-halo_callbacks.py and issue a pull request.
-
-An example of defining your own callback:
-
-.. code-block:: python
-
- def my_callback_function(halo):
- # Perform some callback actions here
- x = 2
- halo.x_val = x
-
- # Add the callback to the callback registry
- add_callback('my_callback', my_callback_function)
-
-
- # ... Later on in your script
- hc.add_callback("my_callback")
-
-.. _halo_catalog_recipes:
-
-Recipes
-^^^^^^^
-
-Recipes allow you to create analysis tasks that consist of a series of
-callbacks, quantities, and filters that are run in succession. An example
-of this is
-:func:`~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities`,
-which calculates virial quantities by first creating a sphere container,
-performing 1D radial profiles, and then interpolating to get values at a
-specified threshold overdensity. All of these operations are separate
-callbacks, but the recipes allow you to add them to your analysis pipeline
-with one call. For example,
-
-.. code-block:: python
-
- hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
-
-The available recipes are located in
-``yt/analysis_modules/halo_analysis/halo_recipes.py``. New recipes can be
-created in the following manner:
-
-.. code-block:: python
-
- def my_recipe(halo_catalog, fields, weight_field=None):
- # create a sphere
- halo_catalog.add_callback("sphere")
- # make profiles
- halo_catalog.add_callback("profile", ["radius"], fields,
- weight_field=weight_field)
- # save the profile data
- halo_catalog.add_callback("save_profiles", output_dir="profiles")
-
- # add recipe to the registry of recipes
- add_recipe("profile_and_save", my_recipe)
-
-
- # ... Later on in your script
- hc.add_recipe("profile_and_save", ["density", "temperature"],
- weight_field="cell_mass")
-
-Note, that unlike callback, filter, and quantity functions that take a ``Halo``
-object as the first argument, recipe functions should take a ``HaloCatalog``
-object as the first argument.
-
-Running the Pipeline
---------------------
-
-After all callbacks, quantities, and filters have been added, the
-analysis begins with a call to HaloCatalog.create.
-
-.. code-block:: python
-
- hc.create()
-
-The save_halos keyword determines whether the actual Halo objects
-are saved after analysis on them has completed or whether just the
-contents of their quantities dicts will be retained for creating the
-final catalog. The looping over halos uses a call to parallel_objects
-allowing the user to control how many processors work on each halo.
-The final catalog is written to disk in the output directory given
-when the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
-object was created.
-
-All callbacks, quantities, and filters are stored in an actions list,
-meaning that they are executed in the same order in which they were added.
-This enables the use of simple, reusable, single action callbacks that
-depend on each other. This also prevents unnecessary computation by allowing
-the user to add filters at multiple stages to skip remaining analysis if it
-is not warranted.
-
-Saving and Reloading Halo Catalogs
-----------------------------------
-
-A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
-saved to disk can be reloaded as a yt dataset with the
-standard call to ``yt.load``. See :ref:`halocatalog` for a demonstration
-of loading and working only with the catalog.
-Any side data, such as profiles, can be reloaded
-with a ``load_profiles`` callback and a call to
-:func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
-
-.. code-block:: python
-
- hds = yt.load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
- hc = HaloCatalog(halos_ds=hds,
- output_dir="halo_catalogs/catalog_0046")
- hc.add_callback("load_profiles", output_dir="profiles",
- filename="virial_profiles")
- hc.load()
-
-Halo Catalog in Action
-----------------------
-
-For a full example of how to use these methods together see
-:ref:`halo-analysis-example`.
diff --git a/doc/source/analyzing/analysis_modules/halo_mass_function.rst b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
deleted file mode 100644
index 33cb7aa9057..00000000000
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ /dev/null
@@ -1,238 +0,0 @@
-.. _halo_mass_function:
-
-.. note::
-
- This module has been deprecated as it no longer functions correctly and is
- unmaintained. The code has been moved to the `yt attic
- `__. If you'd like to take it
- over, please do!
-
-Halo Mass Function
-==================
-
-The Halo Mass Function extension is capable of outputting the halo mass function
-for a collection halos (input), and/or an analytical fit over a given mass range
-for a set of specified cosmological parameters.
-This extension is based on code generously provided by Brian O'Shea.
-
-General Overview
-----------------
-
-A halo mass function can be created for the halos identified in a cosmological
-simulation, as well as analytic fits using any arbitrary set of cosmological
-parameters. In order to create a mass function for simulated halos, they must
-first be identified (using HOP, FOF, or Rockstar, see
-:ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
-halo masses will then be found, and can be compared to the analytic prediction
-at the same redshift and using the same cosmological parameters as were used
-in the simulation. Care should be taken in this regard, as the analytic fit
-requires the specification of cosmological parameters that are not necessarily
-stored in the halo or simulation datasets, and must be specified by the user.
-Efforts have been made to set reasonable defaults for these parameters, but
-setting them to identically match those used in the simulation will produce a
-much better comparison.
-
-Analytic halo mass functions can also be created without a halo dataset by
-providing either a simulation dataset or specifying cosmological parameters by
-hand. yt includes 5 analytic fits for the halo mass function which can be
-selected.
-
-
-Analytical Fits
----------------
-
-There are five analytical fits to choose from.
-
- 1. `Press-Schechter (1974) `_
- 2. `Jenkins (2001) `_
- 3. `Sheth-Tormen (2002) `_
- 4. `Warren (2006) `_
- 5. `Tinker (2008) `_
-
-We encourage reading each of the primary sources.
-In general, we recommend the Warren fitting function because it matches
-simulations over a wide range of masses very well.
-The Warren fitting function is the default (equivalent to not specifying
-``fitting_function`` in ``HaloMassFcn()``, below).
-The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which
-appears to fit HOP threshold=80.0 fairly well.
-
-
-Basic Halo Mass Function Creation
----------------------------------
-
-The simplest way to create a halo mass function object is to simply pass it no
-arguments and let it use the default cosmological parameters.
-
-.. code-block:: python
-
- from yt.analysis_modules.halo_mass_function.api import *
-
- hmf = HaloMassFcn()
-
-This will create a HaloMassFcn object off of which arrays holding the information
-about the analytic mass function hang. Creating the halo mass function for a set
-of simulated halos requires only the loaded halo dataset to be passed as an
-argument. This also creates the analytic mass function using all parameters that
-can be extracted from the halo dataset, at the same redshift, spanning a similar
-range of halo masses.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.analysis_modules.halo_mass_function.api import *
-
- my_halos = load("rockstar_halos/halos_0.0.bin")
- hmf = HaloMassFcn(halos_ds=my_halos)
-
-A simulation dataset can be passed along with additional cosmological parameters
-to create an analytic mass function.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.analysis_modules.halo_mass_function.api import *
-
- my_ds = load("RD0027/RedshiftOutput0027")
- hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96,
- sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
-
-The analytic mass function can be created for a set of arbitrary cosmological
-parameters without any dataset being passed as an argument.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.analysis_modules.halo_mass_function.api import *
-
- hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27,
- omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
- log_mass_min=5, log_mass_max=9, fitting_function=5)
-
-Keyword Arguments
------------------
-
-* **simulation_ds** (*Simulation dataset object*)
- The loaded simulation dataset, used to set cosmological parameters.
- Default : None.
-
-* **halos_ds** (*Halo dataset object*)
- The halos from a simulation to be used for creation of the
- halo mass function in the simulation.
- Default : None.
-
-* **make_analytic** (*bool*)
- Whether or not to calculate the analytic mass function to go with
- the simulated halo mass function. Automatically set to true if a
- simulation dataset is provided.
- Default : True.
-
-* **omega_matter0** (*float*)
- The fraction of the universe made up of matter (dark and baryonic).
- Default : 0.2726.
-
-* **omega_lambda0** (*float*)
- The fraction of the universe made up of dark energy.
- Default : 0.7274.
-
-* **omega_baryon0** (*float*)
- The fraction of the universe made up of baryonic matter. This is not
- always stored in the dataset and should be checked by hand.
- Default : 0.0456.
-
-* **hubble0** (*float*)
- The expansion rate of the universe in units of 100 km/s/Mpc.
- Default : 0.704.
-
-* **sigma8** (*float*)
- The amplitude of the linear power spectrum at z=0 as specified by
- the rms amplitude of mass-fluctuations in a top-hat sphere of radius
- 8 Mpc/h. This is not always stored in the dataset and should be
- checked by hand.
- Default : 0.86.
-
-* **primordial_index** (*float*)
- This is the index of the mass power spectrum before modification by
- the transfer function. A value of 1 corresponds to the scale-free
- primordial spectrum. This is not always stored in the dataset and
- should be checked by hand.
- Default : 1.0.
-
-* **this_redshift** (*float*)
- The current redshift.
- Default : 0.
-
-* **log_mass_min** (*float*)
- The log10 of the mass of the minimum of the halo mass range. This is
- set automatically by the range of halo masses if a simulated halo
- dataset is provided. If a halo dataset if not provided and no value
- is specified, it will be set to 5. Units: M_solar
- Default : None.
-
-* **log_mass_max** (*float*)
- The log10 of the mass of the maximum of the halo mass range. This is
- set automatically by the range of halo masses if a simulated halo
- dataset is provided. If a halo dataset if not provided and no value
- is specified, it will be set to 16. Units: M_solar
- Default : None.
-
-* **num_sigma_bins** (*float*)
- The number of bins (points) to use for the calculation of the
- analytic mass function.
- Default : 360.
-
-* **fitting_function** (*int*)
- Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins,
- 3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
- Default : 4.
-
-Outputs
--------
-
-A HaloMassFnc object has several arrays hanging off of it containing the
-
-* **masses_sim**: Halo masses from simulated halos. Units: M_solar
-
-* **n_cumulative_sim**: Number density of halos with mass greater than the
- corresponding mass in masses_sim. Units: comoving Mpc^-3
-
-* **masses_analytic**: Masses used for the generation of the analytic mass
- function. Units: M_solar
-
-* **n_cumulative_analytic**: Number density of halos with mass greater then
- the corresponding mass in masses_analytic. Units: comoving Mpc^-3
-
-* **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
-
-After the mass function has been created for both simulated halos and the
-corresponding analytic fits, they can be plotted though something along the
-lines of
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_mass_function.api import *
- import matplotlib.pyplot as plt
-
- my_halos = yt.load("rockstar_halos/halos_0.0.bin")
- hmf = HaloMassFcn(halos_ds=my_halos)
-
- plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
- plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
-
-Attached to ``hmf`` is the convenience function ``write_out``, which saves the
-halo mass function to a text file. (continued from above)
-.. code-block:: python
-
- hmf.write_out(prefix='hmf', analytic=True, simulated=True)
-
-This writes the files ``hmf-analytic.dat`` with columns:
-
-* mass [Msun]
-* cumulative number density of halos [comoving Mpc^-3]
-* (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3]
-
-and the file ``hmf-simulated.dat`` with columns:
-
-* mass [Msun]
-* cumulative number density of halos [comoving Mpc^-3]
diff --git a/doc/source/analyzing/analysis_modules/halo_merger_tree.rst b/doc/source/analyzing/analysis_modules/halo_merger_tree.rst
deleted file mode 100644
index c66e32248b7..00000000000
--- a/doc/source/analyzing/analysis_modules/halo_merger_tree.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-.. _merger_tree:
-
-Halo Merger Tree
-================
-
-The ``yt`` merger tree was removed as of :code:`yt-3.0`. This
-functionality can still be found in :code:`yt-2.x`. However,
-the recommended option is to use the
-`ytree `_ package, which can be
-installed via pip:
-
-.. code-block:: bash
-
- pip install ytree
-
-For more information on ``ytree``, see the documentation
-`here `__.
diff --git a/doc/source/analyzing/analysis_modules/halo_transition.rst b/doc/source/analyzing/analysis_modules/halo_transition.rst
deleted file mode 100644
index 507595c9cdb..00000000000
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ /dev/null
@@ -1,111 +0,0 @@
-.. _halo-transition:
-
-Transitioning From yt-2 to yt-3
-===============================
-
-If you're used to halo analysis in yt-2.x, here's a guide to
-how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure. If you're starting
-from scratch, see :ref:`halo_catalog`.
-
-Finding Halos
--------------
-
-Previously, halos were found using calls to ``HaloFinder``,
-``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is
-encouraged that you find the halos upon creation of the halo catalog
-by supplying a value to the ``finder_method`` keyword when calling
-``HaloCatalog``. Currently, only halos found using rockstar or a
-previous instance of a halo catalog are able to be loaded
-using the ``halos_ds`` keyword.
-
-To pass additional arguments to the halo finders
-themselves, supply a dictionary to ``finder_kwargs`` where
-each key in the dictionary is a keyword of the halo finder
-and the corresponding value is the value to be passed for
-that keyword.
-
-Getting Halo Information
-------------------------
-All quantities that used to be present in a ``halo_list`` are
-still able to be found but are not necessarily included by default.
-Every halo will by default have the following properties:
-
-* particle_position_i (where i can be x,y,z)
-* particle_mass
-* virial_radius
-* particle_identifier
-
-If other quantities are desired, they can be included by adding
-the corresponding quantity before the catalog is created. See
-the full halo catalog documentation for further information about
-how to add these quantities and what quantities are available.
-
-You no longer have to iterate over halos in the ``halo_list``.
-Now a halo dataset can be treated as a regular dataset and
-all quantities are available by accessing ``all_data``.
-Specifically, all quantities can be accessed as shown:
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
- hc.create()
- ad = hc.halos_ds.all_data()
- masses = ad['particle_mass'][:]
-
-
-Prefiltering Halos
-------------------
-
-Prefiltering halos before analysis takes place is now done
-by adding a filter before the call to create. An example
-is shown below
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
- hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
- hc.create()
-
-Profiling Halos
----------------
-
-The halo profiler available in yt-2.x has been removed, and
-profiling functionality is now completely contained within the
-halo catalog. A complete example of how to profile halos by
-radius using the new infrastructure is given in
-:ref:`halo-analysis-example`.
-
-Plotting Halos
---------------
-
-Annotating halo locations onto a slice or projection works in
-the same way as in yt-2.x, but now a halo catalog must be
-passed to the annotate halo call rather than a halo list.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.halo_analysis.api import HaloCatalog
-
- data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
- hc.create()
-
- prj = yt.ProjectionPlot(data_ds, 'z', 'density')
- prj.annotate_halos(hc)
- prj.save()
-
-Written Data
-------------
-
-Data is now written out in the form of h5 files rather than
-text files. The directory they are written out to is
-controlled by the keyword ``output_dir``. Each quantity
-is a field in the file.
diff --git a/doc/source/analyzing/analysis_modules/index.rst b/doc/source/analyzing/analysis_modules/index.rst
deleted file mode 100644
index 23d7a2c48ef..00000000000
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-.. _analysis-modules:
-
-Topic-Specific Analysis Modules
-===============================
-
-These semi-autonomous analysis modules are unique to specific subject matter
-like tracking halos, generating synthetic observations, exporting output to
-external visualization routines, and more. Because they are somewhat
-specialized, they exist in their own corners of yt, and they do not get loaded
-by default when you :code:`import yt`. Read up on these advanced tools below.
-
-.. toctree::
- :maxdepth: 2
-
- cosmology_calculator
- halo_analysis
- synthetic_observation
- exporting
- two_point_functions
- clump_finding
diff --git a/doc/source/analyzing/analysis_modules/light_cone_generator.rst b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
deleted file mode 100644
index 0de16608277..00000000000
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ /dev/null
@@ -1,140 +0,0 @@
-.. _light-cone-generator:
-
-Light Cone Generator
-====================
-
-Light cones are created by stacking multiple datasets together to
-continuously span a given redshift interval. To make a projection of a
-field through a light cone, the width of individual slices is adjusted
-such that each slice has the same angular size.
-Each slice is randomly shifted and projected along a random axis to
-ensure that the same structures are not sampled multiple times. A
-recipe for creating a simple light cone projection can be found in
-the cookbook under :ref:`cookbook-light_cone`.
-
-.. image:: _images/LightCone_full_small.png
- :width: 500
-
-A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from
-z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual
-slices. The panels shows the contributions from the 9 individual slices with
-the final light cone image shown in the bottom, right.
-
-Configuring the Light Cone Generator
-------------------------------------
-
-The required arguments to instantiate a
-:class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone`
-object are the path to the simulation parameter file, the simulation type, the
-nearest redshift, and the furthest redshift of the light cone.
-
-.. code-block:: python
-
- from yt.analysis_modules.cosmological_observation.api import \
- LightCone
-
- lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
- 'Enzo', 0., 0.1)
-
-The additional keyword arguments are:
-
-* ``use_minimum_datasets`` (*bool*): If True, the minimum number of
- datasets is used to connect the initial and final redshift. If False,
- the light cone solution will contain as many entries as possible within
- the redshift interval. Default: True.
-
-* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between
- consecutive datasets in the returned list. Default: 0.0.
-
-* ``minimum_coherent_box_fraction`` (*float*): Used with
- ``use_minimum_datasets`` set to False, this parameter specifies the
- fraction of the total box size to be traversed before rerandomizing the
- projection axis and center. This was invented to allow light cones with
- thin slices to sample coherent large scale structure, but in practice does
- not work so well. Try setting this parameter to 1 and see what happens.
- Default: 0.0.
-
-* ``time_data`` (*bool*): Whether or not to include time outputs when
- gathering datasets for time series. Default: True.
-
-* ``redshift_data`` (*bool*): Whether or not to include redshift outputs
- when gathering datasets for time series. Default: True.
-
-* ``set_parameters`` (*dict*): Dictionary of parameters to attach to
- ds.parameters. Default: None.
-
-* ``output_dir`` (*string*): The directory in which images and data files
- will be written. Default: 'LC'.
-
-* ``output_prefix`` (*string*): The prefix of all images and data files.
- Default: 'LightCone'.
-
-Creating Light Cone Solutions
------------------------------
-
-A light cone solution consists of a list of datasets spanning a redshift
-interval with a random orientation for each dataset. A new solution
-is calculated with the
-:func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
-function:
-
-.. code-block:: python
-
- lc.calculate_light_cone_solution(seed=123456789, filename='lightcone.dat')
-
-The keyword argument are:
-
-* ``seed`` (*int*): the seed for the random number generator. Any light
- cone solution can be reproduced by giving the same random seed.
- Default: None.
-
-* ``filename`` (*str*): if given, a text file detailing the solution will be
- written out. Default: None.
-
-Making a Light Cone Projection
-------------------------------
-
-With the light cone solution in place, projections with a given field of
-view and resolution can be made of any available field:
-
-.. code-block:: python
-
- field = 'density'
- field_of_view = (600.0, "arcmin")
- resolution = (60.0, "arcsec")
- lc.project_light_cone(field_of_vew, resolution,
- field , weight_field=None,
- save_stack=True,
- save_slice_images=True)
-
-The field of view and resolution can be specified either as a tuple of
-value and unit string or as a unitful ``YTQuantity``.
-Additional keyword arguments:
-
-* ``weight_field`` (*str*): the weight field of the projection. This has
- the same meaning as in standard projections. Default: None.
-
-* ``photon_field`` (*bool*): if True, the projection data for each slice is
- decremented by 4 pi R :superscript:`2` , where R is the luminosity
- distance between the observer and the slice redshift. Default: False.
-
-* ``save_stack`` (*bool*): if True, the unflatted light cone data including
- each individual slice is written to an hdf5 file. Default: True.
-
-* ``save_final_image`` (*bool*): if True, save an image of the final light
- cone projection. Default: True.
-
-* ``save_slice_images`` (*bool*): save images for each individual projection
- slice. Default: False.
-
-* ``cmap_name`` (*string*): color map for images. Default: "algae".
-
-* ``njobs`` (*int*): The number of parallel jobs over which the light cone
- projection will be split. Choose -1 for one processor per individual
- projection and 1 to have all processors work together on each projection.
- Default: 1.
-
-* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the
- projections. Default: False.
-
-.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist. These are still available in :code:`yt-2.x`. If you would like to use these features in :code:`yt-3.x`, help is needed to port them over. Contact the yt-users mailing list if you are interested in doing this.
diff --git a/doc/source/analyzing/analysis_modules/light_ray_generator.rst b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
deleted file mode 100644
index 6a638e8b641..00000000000
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ /dev/null
@@ -1,235 +0,0 @@
-.. _light-ray-generator:
-
-Light Ray Generator
-===================
-
-.. note::
-
- Development of the LightRay module has been moved to the Trident
- package. This version is deprecated and will be removed from yt
- in a future release. See https://github.com/trident-project/trident
- for further information.
-
-Light rays are similar to light cones (:ref:`light-cone-generator`) in how
-they stack multiple datasets together to span a redshift interval. Unlike
-light cones, which stack randomly oriented projections from each
-dataset to create synthetic images, light rays use thin pencil beams to
-simulate QSO sight lines. A sample script can be found in the cookbook
-under :ref:`cookbook-light_ray`.
-
-.. image:: _images/lightray.png
-
-A ray segment records the information of all grid cells intersected by the
-ray as well as the path length, ``dl``, of the ray through the cell. Column
-densities can be calculated by multiplying physical densities by the path
-length.
-
-Configuring the Light Ray Generator
------------------------------------
-
-Below follows the creation of a light ray from multiple datasets stacked
-together. However, a light ray can also be made from a single dataset.
-For an example of this, see :ref:`cookbook-single-dataset-light-ray`.
-
-The arguments required to instantiate a
-:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
-object are the same as
-those required for a
-:class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone`
-object: the simulation parameter file, the
-simulation type, the nearest redshift, and the furthest redshift.
-
-.. code-block:: python
-
- from yt.analysis_modules.cosmological_observation.api import LightRay
- lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
- simulation_type="Enzo",
- near_redshift=0.0, far_redshift=0.1)
-
-Additional keyword arguments are:
-
-* ``use_minimum_datasets`` (*bool*): If True, the minimum number of datasets
- is used to connect the initial and final redshift. If false, the light
- ray solution will contain as many entries as possible within the redshift
- interval. Default: True.
-
-* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between
- consecutive datasets in the returned list. Default: 0.0.
-
-* ``max_box_fraction`` (*float*): In terms of the size of the domain, the
- maximum length a light ray segment can be in order to span the redshift interval
- from one dataset to another. If using a zoom-in simulation, this parameter can
- be set to the length of the high resolution region so as to limit ray segments
- to that size. If the high resolution region is not cubical, the smallest side
- should be used. Default: 1.0 (the size of the box)
-
-* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
- length of a ray, in terms of the size of the domain, before the trajectory
- is re-randomized. Set to 0 to have ray trajectory randomized for every
- dataset. Set to np.inf (infinity) to use a single trajectory for the
- entire ray. Default: 0.0.
-
-* ``time_data`` (*bool*): Whether or not to include time outputs when
- gathering datasets for time series. Default: True.
-
-* ``redshift_data`` (*bool*): Whether or not to include redshift outputs
- when gathering datasets for time series. Default: True.
-
-Making Light Ray Data
----------------------
-
-Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
-function will trace out the rays in each dataset and collect information for all the
-fields requested. The output file will be an HDF5 file containing all the
-cell field values for all the cells that were intersected by the ray. A
-single LightRay object can be used over and over to make multiple
-randomizations, simply by changing the value of the random seed with the
-``seed`` keyword.
-
-.. code-block:: python
-
- lr.make_light_ray(seed=8675309,
- fields=['temperature', 'density'],
- use_peculiar_velocity=True)
-
-The keyword arguments are:
-
-* ``seed`` (*int*): Seed for the random number generator. Default: None.
-
-* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
- boundaries. If False, ray trajectories will not be periodic. Default : True.
-
-* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
- region in which rays are to be generated. If None, the left edge will be
- that of the domain. Default: None.
-
-* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
- the region in which rays are to be generated. If None, the right edge
- will be that of the domain. Default: None.
-
-* ``min_level`` (*int*): The minimum refinement level of the spatial region in
- which the ray passes. This can be used with zoom-in simulations where the
- high resolution region does not keep a constant geometry. Default: None.
-
-* ``start_position`` (*list* of floats): Used only if creating a light ray
- from a single dataset. The coordinates of the starting position of the
- ray. Default: None.
-
-* ``end_position`` (*list* of floats): Used only if creating a light ray
- from a single dataset. The coordinates of the ending position of the ray.
- Default: None.
-
-* ``trajectory`` (*list* of floats): Used only if creating a light ray
- from a single dataset. The (r, theta, phi) direction of the light ray.
- Use either ``end_position`` or ``trajectory``, not both.
- Default: None.
-
-* ``fields`` (*list*): A list of fields for which to get data.
- Default: None.
-
-* ``solution_filename`` (*string*): Path to a text file where the
- trajectories of each subray is written out. Default: None.
-
-* ``data_filename`` (*string*): Path to output file for ray data.
- Default: None.
-
-* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from
- the peculiar velocity of gas along the ray is calculated and added to the
- cosmological redshift as the "effective" redshift.
- Default: True.
-
-* ``redshift`` (*float*): Used with light rays made from single datasets to
- specify a starting redshift for the ray. If not used, the starting
- redshift will be 0 for a non-cosmological dataset and the dataset redshift
- for a cosmological dataset. Default: None.
-
-* ``njobs`` (*int*): The number of parallel jobs over which the slices for
- the halo mask will be split. Choose -1 for one processor per individual
- slice and 1 to have all processors work together on each projection.
- Default: 1
-
-Useful Tips for Making LightRays
---------------------------------
-
-Below are some tips that may come in handy for creating proper LightRays.
-
-How many snapshots do I need?
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The number of snapshots required to traverse some redshift interval depends
-on the simulation box size and cosmological parameters. Before running an
-expensive simulation only to find out that you don't have enough outputs
-to span the redshift interval you want, have a look at
-:ref:`planning-cosmology-simulations`. The functionality described there
-will allow you to calculate the precise number of snapshots and specific
-redshifts at which they should be written.
-
-My snapshots are too far apart!
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
-allows the user to control how long a ray segment can be for an
-individual dataset. Be default, the `LightRay` generator will try to
-make segments no longer than the size of the box to avoid sampling the
-same structures more than once. However, this can be increased in the
-case that the redshift interval between datasets is longer than the
-box size. Increasing this value should be done with caution as longer
-ray segments run a greater risk of coming back to somewhere near their
-original position.
-
-What if I have a zoom-in simulation?
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A zoom-in simulation has a high resolution region embedded within a
-larger, low resolution volume. In this type of simulation, it is likely
-that you will want the ray segments to stay within the high resolution
-region. To do this, you must first specify the size of the high
-resolution region when creating the `LightRay` using the
-``max_box_fraction`` keyword. This will make sure that
-the calculation of the spacing of the segment datasets only takes into
-account the high resolution region and not the full box size. If your
-high resolution region is not a perfect cube, specify the smallest side.
-Then, in the call to
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
-use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
-precise location of the high resolution region.
-
-Technically speaking, the ray segments should no longer be periodic
-since the high resolution region is only a sub-volume within the
-larger domain. To make the ray segments non-periodic, set the
-``periodic`` keyword to False. The LightRay generator will continue
-to generate randomly oriented segments until it finds one that fits
-entirely within the high resolution region. If you have a high
-resolution region that can move and change shape slightly as structure
-forms, use the `min_level` keyword to mandate that the ray segment only
-pass through cells that are refined to at least some minimum level.
-
-If the size of the high resolution region is not large enough to
-span the required redshift interval, the `LightRay` generator can
-be configured to treat the high resolution region as if it were
-periodic simply by setting the ``periodic`` keyword to True. This
-option should be used with caution as it will lead to the creation
-of disconnected ray segments within a single dataset.
-
-I want a continuous trajectory over the entire ray.
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Set the ``minimum_coherent_box_fraction`` keyword argument to a very
-large number, like infinity (`numpy.inf`).
-
-.. note::
-
- As of :code:`yt-3.0`, the functionality for recording properties of
- the nearest halo to each element of the ray no longer exists. This
- is still available in :code:`yt-2.x`. If you would like to use this
- feature in :code:`yt-3.x`, help is needed to port it over. Contact
- the yt-users mailing list if you are interested in doing this.
-
-What Can I do with this?
-------------------------
-
-Once you have created a `LightRay`, you can use it to generate an
-:ref:`absorption_spectrum`. In addition, you can use the
-:class:`~yt.visualization.plot_modifications.RayCallback` to
-:ref:`annotate-ray` on your plots.
diff --git a/doc/source/analyzing/analysis_modules/photon_simulator.rst b/doc/source/analyzing/analysis_modules/photon_simulator.rst
deleted file mode 100644
index c1dcf1d0c6f..00000000000
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ /dev/null
@@ -1,666 +0,0 @@
-.. _photon_simulator:
-
-Constructing Mock X-ray Observations
-------------------------------------
-
-.. warning::
-
- The ``photon_simulator`` analysis module has been deprecated; it is
- no longer being updated, and it will be removed in a future version
- of yt. Users are encouraged to download and use the
- `pyXSIM `_ package
- instead.
-
-.. note::
-
- If you just want to create derived fields for X-ray emission,
- you should go `here `_ instead.
-
-The ``photon_simulator`` analysis module enables the creation of
-simulated X-ray photon lists of events from datasets that yt is able
-to read. The simulated events then can be exported to X-ray telescope
-simulators to produce realistic observations or can be analyzed in-line.
-
-For detailed information about the design of the algorithm in yt, check
-out `the SciPy 2014 Proceedings. `_.
-
-The algorithm is based off of that implemented in
-`PHOX `_ for SPH datasets
-by Veronica Biffi and Klaus Dolag. There are two relevant papers:
-
-`Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420,
-3545 `_
-
-`Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428,
-1395 `_
-
-The basic procedure is as follows:
-
-1. Using a spectral model for the photon flux given the gas properties,
- and an algorithm for generating photons from the dataset loaded in
- yt, produce a large number of photons in three-dimensional space
- associated with the cells of the dataset.
-2. Use this three-dimensional dataset as a sample from which to generate
- photon events that are projected along a line of sight, Doppler and
- cosmologically shifted, and absorbed by the Galactic foreground.
-3. Optionally convolve these photons with instrument responses and
- produce images and spectra.
-
-We'll demonstrate the functionality on a realistic dataset of a galaxy
-cluster to get you started.
-
-.. note::
-
- Currently, the ``photon_simulator`` analysis module only works with grid-based
- data.
-
-Creating an X-ray observation of a dataset on disk
-++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. code:: python
-
- import yt
- #yt.enable_parallelism() # If you want to run in parallel this should go here!
- from yt.analysis_modules.photon_simulator.api import *
- from yt.utilities.cosmology import Cosmology
-
-.. note::
-
- For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before*
- the import of the ``photon_simulator`` module, as shown above.
-
-We're going to load up an Athena dataset of a galaxy cluster core:
-
-.. code:: python
-
- ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
- units_override={"time_unit":(1.0,"Myr"),
- "length_unit":(1.0,"Mpc"),
- "mass_unit":(1.0e14,"Msun")})
-
-First, to get a sense of what the resulting image will look like, let's
-make a new yt field called ``"density_squared"``, since the X-ray
-emission is proportional to :math:`\rho^2`, and a weak function of
-temperature and metallicity.
-
-.. code:: python
-
- def _density_squared(field, data):
- return data["density"]**2
- ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6")
-
-Then we'll project this field along the z-axis.
-
-.. code:: python
-
- prj = yt.ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc"))
- prj.set_cmap("density_squared", "gray_r")
- prj.show()
-
-.. image:: _images/dsquared.png
-
-In this simulation the core gas is sloshing, producing spiral-shaped
-cold fronts.
-
-.. note::
-
- To work out the following examples, you should install
- `AtomDB `_ and get the files from the
- `xray_data `_ auxiliary
- data package (see the :ref:`xray_data_README` for details on the latter).
- Make sure that in what follows you specify the full path to the locations
- of these files.
-
-To generate photons from this dataset, we have several different things
-we need to set up. The first is a standard yt data object. It could
-be all of the cells in the domain, a rectangular solid region, a
-cylindrical region, etc. Let's keep it simple and make a sphere at the
-center of the domain, with a radius of 250 kpc:
-
-.. code:: python
-
- sp = ds.sphere("c", (250., "kpc"))
-
-This will serve as our ``data_source`` that we will use later. Next, we
-need to create the ``SpectralModel`` instance that will determine how
-the data in the grid cells will generate photons. By default, two
-options are available. The first, ``XSpecThermalModel``, allows one to
-use any thermal model that is known to
-`XSPEC `_, such as
-``"mekal"`` or ``"apec"``:
-
-.. code:: python
-
- mekal_model = XSpecThermalModel("mekal", 0.01, 10.0, 2000)
-
-This requires XSPEC and
-`PyXspec `_ to
-be installed. The second option, ``TableApecModel``, utilizes the data
-from the `AtomDB `_ tables. We'll use this one
-here:
-
-.. code:: python
-
- apec_model = TableApecModel("$SPECTRAL_DATA/spectral",
- 0.01, 20.0, 20000,
- thermal_broad=False,
- apec_vers="2.0.2")
-
-The first argument sets the location of the AtomDB files, and the next
-three arguments determine the minimum energy in keV, maximum energy in
-keV, and the number of linearly-spaced bins to bin the spectrum in. If
-the optional keyword ``thermal_broad`` is set to ``True``, the spectral
-lines will be thermally broadened.
-
-.. note::
-
- ``SpectralModel`` objects based on XSPEC models (both the thermal
- emission and Galactic absorption models mentioned below) only work
- in Python 2.7, since currently PyXspec only works with Python 2.x.
-
-Now that we have our ``SpectralModel`` that gives us a spectrum, we need
-to connect this model to a ``PhotonModel`` that will connect the field
-data in the ``data_source`` to the spectral model to actually generate
-photons. For thermal spectra, we have a special ``PhotonModel`` called
-``ThermalPhotonModel``:
-
-.. code:: python
-
- thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
- photons_per_chunk=100000000,
- method="invert_cdf")
-
-Where we pass in the ``SpectralModel``, and can optionally set values for
-the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
-``Z_met`` is a float, it will assume that value for the metallicity
-everywhere in terms of the solar metallicity. If it is a string, it will
-assume that is the name of the metallicity field (which may be spatially
-varying).
-
-The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source
-to generate the photons, to reduce memory usage and make parallelization more
-efficient. For each chunk, memory is set aside for the photon energies that will
-be generated. ``photons_per_chunk`` is an optional keyword argument which controls
-the size of this array. For large numbers of photons, you may find that
-this parameter needs to be set higher, or if you are looking to decrease memory
-usage, you might set this parameter lower.
-
-The ``method`` keyword argument is also optional, and determines how the individual
-photon energies are generated from the spectrum. It may be set to one of two values:
-
-* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
- it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
- on construction of the CDF and interpolation between the points, so for some spectra it
- may not be accurate enough).
-* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
- technique (accurate, but likely to be slow).
-
-``method="invert_cdf"`` (the default) should be sufficient for most cases.
-
-Next, we need to specify "fiducial" values for the telescope collecting
-area, exposure time, and cosmological redshift. Remember, the initial
-photon generation will act as a source for Monte-Carlo sampling for more
-realistic values of these parameters later, so choose generous values so
-that you have a large number of photons to sample from. We will also
-construct a ``Cosmology`` object:
-
-.. code:: python
-
- A = 3000.
- exp_time = 4.0e5
- redshift = 0.05
- cosmo = Cosmology()
-
-Now, we finally combine everything together and create a ``PhotonList``
-instance:
-
-.. code:: python
-
- photons = PhotonList.from_scratch(sp, redshift, A, exp_time,
- thermal_model, center="c",
- cosmology=cosmo)
-
-By default, the angular diameter distance to the object is determined
-from the ``cosmology`` and the cosmological ``redshift``. If a
-``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. The ``center`` keyword argument specifies
-the center of the photon distribution, and the photon positions will be
-rescaled with this value as the origin. This argument accepts the following
-values:
-
-* A NumPy array or list corresponding to the coordinates of the center in
- units of code length.
-* A ``YTArray`` corresponding to the coordinates of the center in some
- length units.
-* ``"center"`` or ``"c"`` corresponds to the domain center.
-* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density.
-* A two-element tuple specifying the max or min of a specific field, e.g.,
- ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
-
-If ``center`` is not specified, ``from_scratch`` will attempt to use the
-``"center"`` field parameter of the ``data_source``.
-
-``from_scratch`` takes a few other optional keyword arguments. If your
-source is local to the galaxy, you can set its distance directly, using
-a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and
-``cosmology`` will be ignored. Finally, if the photon generating
-function accepts any parameters, they can be passed to ``from_scratch``
-via a ``parameters`` dictionary.
-
-At this point, the ``photons`` are distributed in the three-dimensional
-space of the ``data_source``, with energies in the rest frame of the
-plasma. Doppler and/or cosmological shifting of the photons will be
-applied in the next step.
-
-The ``photons`` can be saved to disk in an HDF5 file:
-
-.. code:: python
-
- photons.write_h5_file("my_photons.h5")
-
-Which is most useful if it takes a long time to generate the photons,
-because a ``PhotonList`` can be created in-memory from the dataset
-stored on disk:
-
-.. code:: python
-
- photons = PhotonList.from_file("my_photons.h5")
-
-This enables one to make many simulated event sets, along different
-projections, at different redshifts, with different exposure times, and
-different instruments, with the same ``data_source``, without having to
-do the expensive step of generating the photons all over again!
-
-To get a set of photon events such as that observed by X-ray telescopes,
-we need to take the three-dimensional photon distribution and project it
-along a line of sight. Also, this is the step at which we put in the
-realistic values for the telescope collecting area, cosmological
-redshift and/or source distance, and exposure time. The order of
-operations goes like this:
-
-1. From the adjusted exposure time, redshift and/or source distance, and
- telescope collecting area, determine the number of photons we will
- *actually* observe.
-2. Determine the plane of projection from the supplied normal vector,
- and reproject the photon positions onto this plane.
-3. Doppler-shift the photon energies according to the velocity along the
- line of sight, and apply cosmological redshift if the source is not
- local.
-4. Optionally, alter the received distribution of photons via an
- energy-dependent galactic absorption model.
-5. Optionally, alter the received distribution of photons using an
- effective area curve provided from an ancillary response file (ARF).
-6. Optionally, scatter the photon energies into channels according to
- the information from a redistribution matrix file (RMF).
-
-First, if we want to apply galactic absorption, we need to set up a
-spectral model for the absorption coefficient, similar to the spectral
-model for the emitted photons we set up before. Here again, we have two
-options. The first, ``XSpecAbsorbModel``, allows one to use any
-absorption model that XSpec is aware of that takes only the Galactic
-column density :math:`N_H` as input:
-
-.. code:: python
-
- N_H = 0.1
- abs_model = XSpecAbsorbModel("wabs", N_H)
-
-The second option, ``TableAbsorbModel``, takes as input an HDF5 file
-containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
-
-.. code:: python
-
- abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
-
-Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``normal``. Second, we'll adjust the exposure time and the redshift.
-Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA and DEC on the sky in degrees.
-
-Also, we're going to convolve the photons with instrument ``responses``.
-For this, you need a ARF/RMF pair with matching energy bins. This is of
-course far short of a full simulation of a telescope ray-trace, but it's
-a quick-and-dirty way to get something close to the real thing. We'll
-discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later. If you just want to convolve
-the photons with an ARF, you may specify that as the only response, but some
-ARFs are unnormalized and still require the RMF for normalization. Check with
-the documentation associated with these files for details. If we are using the
-RMF to convolve energies, we must set ``convolve_energies=True``.
-
-.. code:: python
-
- ARF = "acisi_aimpt_cy17.arf"
- RMF = "acisi_aimpt_cy17.rmf"
- normal = [0.0,0.0,1.0]
- events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None,
- absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF],
- convolve_energies=True, no_shifting=False, north_vector=None,
- psf_sigma=None)
-
-In this case, we chose a three-vector ``normal`` to specify an arbitrary
-line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to
-project along one of those axes.
-
-``project_photons`` takes several other optional keyword arguments.
-
-* ``no_shifting`` (default ``False``) controls whether or not Doppler
- shifting of photon energies is turned on.
-* ``dist_new`` is a (value, unit) tuple that is used to set a new
- angular diameter distance by hand instead of having it determined
- by the cosmology and the value of the redshift. Should only be used
- for simulations of nearby objects.
-* For off-axis ``normal`` vectors, the ``north_vector`` argument can
- be used to control what vector corresponds to the "up" direction in
- the resulting event list.
-* ``psf_sigma`` may be specified to provide a crude representation of
- a PSF, and corresponds to the standard deviation (in degrees) of a
- Gaussian PSF model.
-
-Let's just take a quick look at the raw events object:
-
-.. code:: python
-
- print(events)
-
-.. code:: python
-
- {'eobs': YTArray([ 0.32086522, 0.32271389, 0.32562708, ..., 8.90600621,
- 9.73534237, 10.21614256]) keV,
- 'xsky': YTArray([ 187.5177707 , 187.4887825 , 187.50733609, ..., 187.5059345 ,
- 187.49897546, 187.47307048]) degree,
- 'ysky': YTArray([ 12.33519996, 12.3544496 , 12.32750903, ..., 12.34907707,
- 12.33327653, 12.32955225]) degree,
- 'ypix': array([ 133.85374195, 180.68583074, 115.14110561, ..., 167.61447493,
- 129.17278711, 120.11508562]),
- 'PI': array([ 27, 15, 25, ..., 609, 611, 672]),
- 'xpix': array([ 86.26331108, 155.15934197, 111.06337043, ..., 114.39586907,
- 130.93509652, 192.50639633])}
-
-
-We can bin up the events into an image and save it to a FITS file. The
-pixel size of the image is equivalent to the smallest cell size from the
-original dataset. We can specify limits for the photon energies to be
-placed in the image:
-
-.. code:: python
-
- events.write_fits_image("sloshing_image.fits", clobber=True, emin=0.5, emax=7.0)
-
-The resulting FITS image will have WCS coordinates in RA and Dec. It
-should be suitable for plotting in
-`ds9 `_, for example.
-There is also a great project for opening astronomical images in Python,
-called `APLpy `_:
-
-.. code:: python
-
- import aplpy
- fig = aplpy.FITSFigure("sloshing_image.fits", figsize=(10,10))
- fig.show_colorscale(stretch="log", vmin=0.1, cmap="gray_r")
- fig.set_axis_labels_font(family="serif", size=16)
- fig.set_tick_labels_font(family="serif", size=16)
-
-.. image:: _images/Photon_Simulator_30_4.png
-
-Which is starting to look like a real observation!
-
-.. warning::
-
- The binned images that result, even if you convolve with responses,
- are still of the same resolution as the finest cell size of the
- simulation dataset. If you want a more accurate simulation of a
- particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
-
-We can also bin up the spectrum into energy bins, and write it to a FITS
-table file. This is an example where we've binned up the spectrum
-according to the unconvolved photon energy:
-
-.. code:: python
-
- events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
-
-We can also set ``bin_type="channel"``. If we have convolved our events
-with response files, then any other keywords will be ignored and it will
-try to make a spectrum from the channel information that is contained
-within the RMF. Otherwise, the channels will be determined from the ``emin``,
-``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``.
-For now, we'll stick with the energy spectrum, and plot it up:
-
-.. code:: python
-
- import astropy.io.fits as pyfits
- f = pyfits.open("virgo_spec.fits")
- pylab.loglog(f["SPECTRUM"].data.field("ENERGY"), f["SPECTRUM"].data.field("COUNTS"))
- pylab.xlim(0.3, 10)
- pylab.xlabel("E (keV)")
- pylab.ylabel("counts/bin")
-
-.. image:: _images/Photon_Simulator_34_1.png
-
-
-We can also write the events to a FITS file that is of a format that can
-be manipulated by software packages like
-`CIAO `_ and read in by ds9 to do more
-standard X-ray analysis:
-
-.. code:: python
-
- events.write_fits_file("my_events.fits", clobber=True)
-
-.. warning:: We've done some very low-level testing of this feature, and
- it seems to work, but it may not be consistent with standard FITS events
- files in subtle ways that we haven't been able to identify. Please email
- jzuhone@gmail.com if you find any bugs!
-
-Two ``EventList`` instances can be added together, which is useful if they were
-created using different data sources:
-
-.. code:: python
-
- events3 = events1+events2
-
-.. warning:: This only works if the two event lists were generated using
- the same parameters!
-
-Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``,
-defined by a ds9 region (this functionality requires the
-`pyregion `_ package to be installed):
-
-.. code:: python
-
- circle_events = events.filter_events("circle.reg")
-
-Creating a X-ray observation from an in-memory dataset
-++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-It may be useful, especially for observational applications, to create
-datasets in-memory and then create simulated observations from
-them. Here is a relevant example of creating a toy cluster and evacuating two AGN-blown bubbles in it.
-
-First, we create the in-memory dataset (see :ref:`loading-numpy-array`
-for details on how to do this):
-
-.. code:: python
-
- import yt
- import numpy as np
- from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV
- from yt.units import mp
- from yt.utilities.cosmology import Cosmology
- from yt.analysis_modules.photon_simulator.api import *
- import aplpy
-
- R = 1000. # in kpc
- r_c = 100. # in kpc
- rho_c = 1.673e-26 # in g/cm^3
- beta = 1.
- T = 4. # in keV
- nx = 256
-
- bub_rad = 30.0
- bub_dist = 50.0
-
- ddims = (nx,nx,nx)
-
- x, y, z = np.mgrid[-R:R:nx*1j,
- -R:R:nx*1j,
- -R:R:nx*1j]
-
- r = np.sqrt(x**2+y**2+z**2)
-
- dens = np.zeros(ddims)
- dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
- dens[r > R] = 0.0
- temp = T*K_per_keV*np.ones(ddims)
- rbub1 = np.sqrt(x**2+(y-bub_rad)**2+z**2)
- rbub2 = np.sqrt(x**2+(y+bub_rad)**2+z**2)
- dens[rbub1 <= bub_rad] /= 100.
- dens[rbub2 <= bub_rad] /= 100.
- temp[rbub1 <= bub_rad] *= 100.
- temp[rbub2 <= bub_rad] *= 100.
-
-This created a cluster with a radius of 1 Mpc, a uniform temperature
-of 4 keV, and a density distribution from a :math:`\beta`-model. We then
-evacuated two "bubbles" of radius 30 kpc at a distance of 50 kpc from
-the center.
-
-Now, we create a yt Dataset object out of this dataset:
-
-.. code:: python
-
- data = {}
- data["density"] = (dens, "g/cm**3")
- data["temperature"] = (temp, "K")
- data["velocity_x"] = (np.zeros(ddims), "cm/s")
- data["velocity_y"] = (np.zeros(ddims), "cm/s")
- data["velocity_z"] = (np.zeros(ddims), "cm/s")
-
- bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
-
- ds = yt.load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
-
-where for simplicity we have set the velocities to zero, though we
-could have created a realistic velocity field as well. Now, we
-generate the photon and event lists in the same way as the previous
-example:
-
-.. code:: python
-
- sphere = ds.sphere("c", (1.0,"Mpc"))
-
- A = 3000.
- exp_time = 2.0e5
- redshift = 0.05
- cosmo = Cosmology()
-
- apec_model = TableApecModel("/Users/jzuhone/Data/atomdb_v2.0.2",
- 0.01, 20.0, 20000)
- abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
-
- thermal_model = ThermalPhotonModel(apec_model, photons_per_chunk=40000000)
- photons = PhotonList.from_scratch(sphere, redshift, A,
- exp_time, thermal_model, center="c")
-
-
- events = photons.project_photons([0.0,0.0,1.0],
- responses=["acisi_aimpt_cy17.arf",
- "acisi_aimpt_cy17.rmf"],
- absorb_model=abs_model,
- north_vector=[0.0,1.0,0.0])
-
- events.write_fits_image("img.fits", clobber=True)
-
-which yields the following image:
-
-.. code:: python
-
- fig = aplpy.FITSFigure("img.fits", figsize=(10,10))
- fig.show_colorscale(stretch="log", vmin=0.1, vmax=600., cmap="jet")
- fig.set_axis_labels_font(family="serif", size=16)
- fig.set_tick_labels_font(family="serif", size=16)
-
-.. image:: _images/bubbles.png
- :width: 80 %
-
-Storing events for future use and for reading-in by telescope simulators
-++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-If you want a more accurate representation of an observation taken by a
-particular instrument, there are tools available for such purposes. For
-the *Chandra* telescope, there is the venerable
-`MARX `_. For a wide range of
-instruments, both existing and future, there is
-`SIMX `_. We'll discuss two ways
-to store your event files so that they can be input by these and other
-codes.
-
-The first option is the most general, and the simplest: simply dump the
-event data to an HDF5 file:
-
-.. code:: python
-
- events.write_h5_file("my_events.h5")
-
-This will dump the raw event data, as well as the associated parameters,
-into the file. If you want to read these events back in, it's just as
-simple:
-
-.. code:: python
-
- events = EventList.from_h5_file("my_events.h5")
-
-You can use event data written to HDF5 files to input events into MARX
-using `this code `_.
-
-The second option, for use with SIMX, is to dump the events into a
-SIMPUT file:
-
-.. code:: python
-
- events.write_simput_file("my_events", clobber=True, emin=0.1, emax=10.0)
-
-which will write two files, ``"my_events_phlist.fits"`` and
-``"my_events_simput.fits"``, the former being a auxiliary file for the
-latter.
-
-.. note:: You can only write SIMPUT files if you didn't convolve
- the photons with responses, since the idea is to pass unconvolved
- photons to the telescope simulator.
-
-The following images were made from the same yt-generated events in both MARX and
-SIMX. They are 200 ks observations of the two example clusters from above
-(the Chandra images have been reblocked by a factor of 4):
-
-.. image:: _images/ds9_sloshing.png
-
-.. image:: _images/ds9_bubbles.png
-
-In November 2015, the structure of the photon and event HDF5 files changed. To
-convert an old-format file to the new format, use the ``convert_old_file`` utility:
-
-.. code:: python
-
- from yt.analysis_modules.photon_simulator.api import convert_old_file
- convert_old_file("old_photons.h5", "new_photons.h5", clobber=True)
- convert_old_file("old_events.h5", "new_events.h5", clobber=True)
-
-This utility will auto-detect the kind of file (photons or events) and will write
-the correct replacement for the new version.
-
-At times it may be convenient to write several ``EventLists`` to disk to be merged
-together later. This can be achieved with the ``merge_files`` utility. It takes a
-list of
-
-.. code:: python
-
- from yt.analysis_modules.photon_simulator.api import merge_files
- merge_files(["events_0.h5", "events_1.h5", "events_2.h5"], "merged_events.h5",
- add_exposure_times=True, clobber=False)
-
-At the current time this utility is very limited, as it only allows merging of
-``EventLists`` which have the same parameters, with the exception of the exposure
-time. If the ``add_exposure_times`` argument to ``merge_files`` is set to ``True``,
-the lists will be merged together with the exposure times added. Otherwise, the
-exposure times of the different files must be equal.
diff --git a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
deleted file mode 100644
index a032bf6a8ba..00000000000
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-.. _planning-cosmology-simulations:
-
-Planning Simulations to use LightCones or LightRays
-===================================================
-
-If you want to run a cosmological simulation that will have just enough data
-outputs to create a light cone or light ray, the
-:meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
-function will calculate a list of redshifts outputs that will minimally
-connect a redshift interval.
-
-.. code-block:: python
-
- from yt.analysis_modules.cosmological_observation.api import CosmologySplice
- my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
- my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out')
-
-This will write out a file, formatted for simulation type, with a list of
-redshift dumps. The keyword arguments are:
-
-* ``decimals`` (*int*): The decimal place to which the output redshift will
- be rounded. If the decimal place in question is nonzero, the redshift will
- be rounded up to ensure continuity of the splice. Default: 3.
-
-* ``filename`` (*str*): If provided, a file will be written with the redshift
- outputs in the form in which they should be given in the enzo parameter
- file. Default: None.
-
-* ``start_index`` (*int*): The index of the first redshift output. Default: 0.
diff --git a/doc/source/analyzing/analysis_modules/ppv_cubes.rst b/doc/source/analyzing/analysis_modules/ppv_cubes.rst
deleted file mode 100644
index 491b91d7767..00000000000
--- a/doc/source/analyzing/analysis_modules/ppv_cubes.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Creating Position-Position-Velocity FITS Cubes
--------------------------------------------------
-
-.. notebook:: PPVCube.ipynb
diff --git a/doc/source/analyzing/analysis_modules/radmc3d_export.rst b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
deleted file mode 100644
index bacfccef9d8..00000000000
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ /dev/null
@@ -1,203 +0,0 @@
-.. _radmc3d_export:
-
-Exporting to RADMC-3D
-=====================
-
-.. sectionauthor:: Andrew Myers
-.. versionadded:: 2.6
-
-.. figure:: _images/31micron.png
-
- Above: a sample image showing the continuum dust emission image around a massive protostar
- made using RADMC-3D and plotted with pyplot.
-
-`RADMC-3D
-`_ is a
-three-dimensional Monte-Carlo radiative transfer code that is capable of
-handling both line and continuum emission. yt comes equipped with a
-:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`
-class that exports AMR data to a format that RADMC-3D can read. Currently, only
-the ASCII-style data format is supported.
-In principle, this allows one to use RADMC-3D to make synthetic observations
-from any simulation data format that yt recognizes.
-
-Continuum Emission
-------------------
-
-To compute thermal emission intensities, RADMC-3D needs several inputs files that
-describe the spatial distribution of the dust and photon sources. To create these
-files, first import the RADMC-3D exporter, which is not loaded into your environment
-by default:
-
-.. code-block:: python
-
- import yt
- import numpy as np
- from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter, RadMC3DSource
-
-Next, load up a dataset and instantiate the :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`.
-For this example, we'll use the "StarParticle" dataset,
-available `here
-`_.
-
-.. code-block:: python
-
- ds = yt.load("StarParticles/plrd01000/")
- writer = RadMC3DWriter(ds)
-
-The first data file to create is the "amr_grid.inp" file, which describes the structure
-of the AMR index. To create this file, simply call:
-
-.. code-block:: python
-
- writer.write_amr_grid()
-
-Next, we must give RADMC-3D information about the dust density. To do this, we
-define a field that calculates the dust density in each cell. We
-assume a constant dust-to-gas mass ratio of 0.01:
-
-.. code-block:: python
-
- dust_to_gas = 0.01
- def _DustDensity(field, data):
- return dust_to_gas * data["density"]
- ds.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3")
-
-We save this information into a file called "dust_density.inp".
-
-.. code-block:: python
-
- writer.write_dust_file(("gas", "dust_density"), "dust_density.inp")
-
-Finally, we must give RADMC-3D information about any stellar sources that are
-present. To do this, we have provided the
-:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DSource`
-class. For this example, we place a single source with temperature 5780 K
-at the center of the domain:
-
-.. code-block:: python
-
- radius_cm = 6.96e10
- mass_g = 1.989e33
- position_cm = [0.0, 0.0, 0.0]
- temperature_K = 5780.0
- star = RadMC3DSource(radius_cm, mass_g, position_cm, temperature_K)
-
- sources_list = [star]
- wavelengths_micron = np.logspace(-1.0, 4.0, 1000)
-
- writer.write_source_files(sources_list, wavelengths_micron)
-
-The last line creates the files "stars.inp" and "wavelength_micron.inp",
-which describe the locations and spectra of the stellar sources as well
-as the wavelengths RADMC-3D will use in it's calculations.
-
-If everything goes correctly, after executing the above code, you should have
-the files "amr_grid.inp", "dust_density.inp", "stars.inp", and "wavelength_micron.inp"
-sitting in your working directory. RADMC-3D needs a few more configuration files to
-compute the thermal dust emission. In particular, you need an opacity file, like the
-"dustkappa_silicate.inp" file included in RADMC-3D, a main "radmc3d.inp" file that sets
-some runtime parameters, and a "dustopac.inp" that describes the assumed composition of the dust.
-yt cannot make these files for you; in the example that follows, we used a
-"radmc3d.inp" file that looked like:
-
-::
-
- nphot = 1000000
- nphot_scat = 1000000
-
-which basically tells RADMC-3D to use 1,000,000 photon packets instead of the default 100,000. The
-"dustopac.inp" file looked like:
-
-::
-
- 2
- 1
- -----------------------------
- 1
- 0
- silicate
- -----------------------------
-
-To get RADMC-3D to compute the dust temperature, run the command:
-
-::
-
- ./radmc3D mctherm
-
-in the directory that contains your "amr_grid.inp", "dust_density.inp", "stars.inp", "wavelength_micron.inp",
-"radmc3d.inp", "dustkappa_silicate.inp", and "dustopac.inp" files. If everything goes correctly, you should
-get a "dust_temperature.dat" file in your working directory. Once that file is generated, you can use
-RADMC-3D to generate SEDs, images, and so forth. For example, to create an image at 31 microns, do the command:
-
-::
-
- ./radmc3d image lambda 31 sizeau 30000 npix 800
-
-which should create a file called "image.out". You can view this image using pyplot or whatever other
-plotting package you want. To facilitate this, we provide helper functions
-that parse the image.out file, returning a header dictionary with some useful metadata
-and an np.array containing the image values. To plot this image in pyplot, you could do something like:
-
-.. code-block:: python
-
- import matplotlib.pyplot as plt
- import numpy as np
- from yt.analysis_modules.radmc3d_export.api import read_radmc3d_image
- header, image = read_radmc3d_image("image.out")
-
- Nx = header['Nx']
- Ny = header['Ny']
-
- x_hi = 0.5*header["pixel_size_cm_x"]*Nx
- x_lo = -x_hi
- y_hi = 0.5*header["pixel_size_cm_y"]*Ny
- y_lo = -y_hi
-
- X = np.linspace(x_lo, x_hi, Nx)
- Y = np.linspace(y_lo, y_hi, Ny)
-
- plt.pcolormesh(X, Y, np.log10(image), cmap='hot')
- cbar = plt.colorbar()
- plt.axis((x_lo, x_hi, y_lo, y_hi))
- ax = plt.gca()
- ax.set_xlabel(r"$x$ (cm)")
- ax.set_ylabel(r"$y$ (cm)")
- cbar.set_label(r"Log Intensity (erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$ ster$^{-1}$)")
- plt.savefig('dust_continuum.png')
-
-The resulting image should look like:
-
-.. image:: _images/dust_continuum.png
-
-This barely scratches the surface of what you can do with RADMC-3D. Our goal here is
-just to describe how to use yt to export the data it knows about (densities, stellar
-sources, etc.) into a format that RADMC-3D can recognize.
-
-Line Emission
--------------
-
-The file format required for line emission is slightly different. The
-following script will generate two files, one called "numderdens_co.inp",
-which contains the number density of CO molecules for every cell in the index,
-and another called "gas-velocity.inp", which is useful if you want to include
-Doppler broadening.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter
-
- x_co = 1.0e-4
- mu_h = yt.YTQuantity(2.34e-24, 'g')
- def _NumberDensityCO(field, data):
- return (x_co/mu_h)*data["density"]
- yt.add_field(("gas", "number_density_CO"), function=_NumberDensityCO, units="cm**-3")
-
- ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
- writer = RadMC3DWriter(ds)
-
- writer.write_amr_grid()
- writer.write_line_file(("gas", "number_density_CO"), "numberdens_co.inp")
- velocity_fields = ["velocity_x", "velocity_y", "velocity_z"]
- writer.write_line_file(velocity_fields, "gas_velocity.inp")
diff --git a/doc/source/analyzing/analysis_modules/star_analysis.rst b/doc/source/analyzing/analysis_modules/star_analysis.rst
deleted file mode 100644
index a026ac5d74c..00000000000
--- a/doc/source/analyzing/analysis_modules/star_analysis.rst
+++ /dev/null
@@ -1,297 +0,0 @@
-.. note::
-
- This module has been deprecated as it is unmaintained. The code has been
- moved to the `yt attic `__.
- If you'd like to take it over, please do!
-
-.. _star_analysis:
-
-Star Particle Analysis
-======================
-.. sectionauthor:: Stephen Skory
-.. versionadded:: 1.6
-
-This document describes tools in yt for analyzing star particles.
-The Star Formation Rate tool bins stars by time to produce star formation
-statistics over several metrics.
-A synthetic flux spectrum and a spectral energy density plot can be calculated
-with the Spectrum tool.
-
-.. _star_formation_rate:
-
-Star Formation Rate
--------------------
-
-This tool can calculate various star formation statistics binned over time.
-As input it can accept either a yt ``data_source``, such as a region or
-sphere (see :ref:`available-objects`), or arrays containing the data for
-the stars you wish to analyze.
-
-This example will analyze all the stars in the volume:
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.star_analysis.api import StarFormationRate
- ds = yt.load("Enzo_64/DD0030/data0030")
- ad = ds.all_data()
- sfr = StarFormationRate(ds, data_source=ad)
-
-or just a small part of the volume i.e. a small sphere at the center of the
-simulation volume with radius 10% the box size:
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.star_analysis.api import StarFormationRate
- ds = yt.load("Enzo_64/DD0030/data0030")
- sp = ds.sphere([0.5, 0.5, 0.5], 0.1)
- sfr = StarFormationRate(ds, data_source=sp)
-
-If the stars to be analyzed cannot be defined by a ``data_source``, YTArrays can
-be passed. For backward compatibility it is also possible to pass generic numpy
-arrays. In this case, the units for the ``star_mass`` must be in
-:math:`(\mathrm{\rm{M}_\odot})`, the ``star_creation_time`` in code units, and
-the volume must be specified in :math:`(\mathrm{\rm{Mpc}})` as a float (but it
-doesn't have to be correct depending on which statistic is important).
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.star_analysis.api import StarFormationRate
- from yt.data_objects.particle_filters import add_particle_filter
-
- def Stars(pfilter, data):
- return data[("all", "particle_type")] == 2
- add_particle_filter("stars", function=Stars, filtered_type='all',
- requires=["particle_type"])
-
- ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
- ds.add_particle_filter('stars')
- v, center = ds.find_max("density")
- sp = ds.sphere(center, (50, "kpc"))
-
- # This puts the particle data for *all* the particles in the sphere sp
- # into the arrays sm and ct.
- mass = sp[("stars", "particle_mass")].in_units('Msun')
- age = sp[("stars", "age")].in_units('Myr')
- ct = sp[("stars", "creation_time")].in_units('Myr')
-
- # Pick out only old stars using Numpy array fancy indexing.
- threshold = ds.quan(100.0, "Myr")
- mass_old = mass[age > threshold]
- ct_old = ct[age > threshold]
-
- sfr = StarFormationRate(ds, star_mass=mass_old, star_creation_time=ct_old,
- volume=sp.volume())
-
-To output the data to a text file, use the command ``.write_out``:
-
-.. code-block:: python
-
- sfr.write_out(name="StarFormationRate.out")
-
-In the file ``StarFormationRate.out``, there are seven columns of data:
-
- 1. Time (yr)
- 2. Look-back time (yr)
- 3. Redshift
- 4. Star formation rate in this bin per year :math:`(\mathrm{\rm{M}_\odot / \rm{yr}})`
- 5. Star formation rate in this bin per year per Mpc**3 :math:`(\mathrm{\rm{M}_\odot / \rm{h} / \rm{Mpc}^3})`
- 6. Stars formed in this time bin :math:`(\mathrm{\rm{M}_\odot})`
- 7. Cumulative stars formed up to this time bin :math:`(\mathrm{\rm{M}_\odot})`
-
-The output is easily plotted. This is a plot for some test data (that may or may not
-correspond to anything physical) using columns #2 and #4 for the x and y
-axes, respectively:
-
-.. image:: _images/SFR.png
- :width: 640
- :height: 480
-
-It is possible to access the output of the analysis without writing to disk.
-Attached to the ``sfr`` object are the following arrays which are identical
-to the ones that are saved to the text file as above:
-
- 1. ``sfr.time``
- 2. ``sfr.lookback_time``
- 3. ``sfr.redshift``
- 4. ``sfr.Msol_yr``
- 5. ``sfr.Msol_yr_vol``
- 6. ``sfr.Msol``
- 7. ``sfr.Msol_cumulative``
-
-.. _synthetic_spectrum:
-
-Synthetic Spectrum Generator
-----------------------------
-
-Based on code generously provided by Kentaro Nagamine ,
-this will generate a synthetic spectrum for the stars using the publicly-available
-tables of Bruzual & Charlot (hereafter B&C). Please see their `2003 paper
-`_ for more information
-and the `main data
-distribution page `_ for the original data.
-Based on the mass, age and metallicity of each star, a cumulative spectrum is
-generated and can be output in two ways, either raw, or as a spectral
-energy distribution.
-
-This analysis toolkit reads in the B&C data from HDF5 files that have been
-converted from the original ASCII files (available at the link above). The
-HDF5 files are one-quarter the size of the ASCII files, and greatly reduce
-the time required to read the data off disk. The HDF5 files are available from
-the main yt website `here `_.
-Both the Salpeter and Chabrier models have been converted,
-and it is simplest to download all the files to the same location.
-Please read the original B&C sources for information on the differences between
-the models.
-
-In order to analyze stars, first the Bruzual & Charlot data tables need to be
-read in from disk. This is accomplished by initializing ``SpectrumBuilder`` and
-specifying the location of the HDF5 files with the ``bcdir`` parameter.
-The models are chosen with the ``model`` parameter, which is either
-*"chabrier"* or *"salpeter"*.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.star_analysis.api import SpectrumBuilder
- ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
- spec = SpectrumBuilder(ds, bcdir="bc", model="chabrier")
-
-In order to analyze a set of stars, use the ``calculate_spectrum`` command.
-It accepts either a ``data_source``, or a set of YTarrays with the star
-information. Continuing from the above example:
-
-.. code-block:: python
-
- v, center = ds.find_max("density")
- sp = ds.sphere(center, (50, "kpc"))
- spec.calculate_spectrum(data_source=sp)
-
-If a subset of stars are desired, call it like this:
-
-.. code-block:: python
-
- from yt.data_objects.particle_filters import add_particle_filter
-
- def Stars(pfilter, data):
- return data[("all", "particle_type")] == 2
- add_particle_filter("stars", function=Stars, filtered_type='all',
- requires=["particle_type"])
-
- # Pick out only old stars using Numpy array fancy indexing.
- threshold = ds.quan(100.0, "Myr")
- mass_old = sp[("stars", "age")][age > threshold]
- metal_old = sp[("stars", "metallicity_fraction")][age > threshold]
- ct_old = sp[("stars", "creation_time")][age > threshold]
-
- spec.calculate_spectrum(star_mass=mass_old, star_creation_time=ct_old,
- star_metallicity_fraction=metal_old)
-
-For backward compatibility numpy arrays can be used instead for ``star_mass``
-(in units :math:`\mathrm{\rm{M}_\odot}`), ``star_creation_time`` and
-``star_metallicity_fraction`` (in code units).
-Alternatively, when using either a ``data_source`` or individual arrays,
-the option ``star_metallicity_constant`` can be specified to force all the
-stars to have the same metallicity. If arrays are being used, the
-``star_metallicity_fraction`` array need not be specified.
-
-.. code-block:: python
-
- # Make all the stars have solar metallicity.
- spec.calculate_spectrum(data_source=sp, star_metallicity_constant=0.02)
-
-Newly formed stars are often shrouded by thick gas. With the ``min_age`` option
-of ``calculate_spectrum``, young stars can be excluded from the spectrum.
-The units are in years.
-The default is zero, which is equivalent to including all stars.
-
-.. code-block:: python
-
- spec.calculate_spectrum(data_source=sp, star_metallicity_constant=0.02,
- min_age=ds.quan(1.0, "Myr"))
-
-There are two ways to write out the data once the spectrum has been calculated.
-The command ``write_out`` outputs two columns of data:
-
- 1. Wavelength (:math:`\text{Angstroms}`)
- 2. Flux (Luminosity per unit wavelength :math:`(\mathrm{\rm{L}_\odot} / \text{Angstrom})` , where
- :math:`\mathrm{\rm{L}_\odot} = 3.826 \cdot 10^{33}\, \mathrm{ergs / s}` ).
-
-and can be called simply, specifying the output file:
-
-.. code-block:: python
-
- spec.write_out(name="spec.out")
-
-The other way is to output a spectral energy density plot. Along with the
-``name`` parameter, this command can also take the ``flux_norm`` option,
-which is the wavelength in Angstroms of the flux to normalize the
-distribution to. The default is 5200 Angstroms. This command outputs the data
-in two columns:
-
- 1. Wavelength :math:`(\text{Angstroms})`
- 2. Relative flux normalized to the flux at *flux_norm*.
-
-.. code-block:: python
-
- spec.write_out_SED(name="SED.out", flux_norm=5200)
-
-Below is an example of an absurd SED for universe-old stars all with
-solar metallicity at a redshift of zero. Note that even in this example,
-a ``ds`` is required.
-
-.. code-block:: python
-
- import yt
- import numpy as np
- from yt.analysis_modules.star_analysis.api import SpectrumBuilder
-
- ds = yt.load("Enzo_64/DD0030/data0030")
- spec = SpectrumBuilder(ds, bcdir="bc", model="chabrier")
- sm = np.ones(100)
- ct = np.zeros(100)
- spec.calculate_spectrum(star_mass=sm, star_creation_time=ct,
- star_metallicity_constant=0.02)
- spec.write_out_SED('SED.out')
-
-And the plot:
-
-.. image:: _images/SED.png
- :width: 640
- :height: 480
-
-Iterate Over a Number of Halos
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In this example below, the halos for a dataset are found, and the SED is calculated
-and written out for each.
-
-.. code-block:: python
-
- import yt
- from yt.analysis_modules.star_analysis.api import SpectrumBuilder
- from yt.data_objects.particle_filters import add_particle_filter
- from yt.analysis_modules.halo_finding.api import HaloFinder
-
- def Stars(pfilter, data):
- return data[("all", "particle_type")] == 2
- add_particle_filter("stars", function=Stars, filtered_type='all',
- requires=["particle_type"])
-
- ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
- ds.add_particle_filter('stars')
- halos = HaloFinder(ds, dm_only=False)
- # Set up the spectrum builder.
- spec = SpectrumBuilder(ds, bcdir="bc", model="salpeter")
-
- # Iterate over the halos.
- for halo in halos:
- sp = halo.get_sphere()
- spec.calculate_spectrum(
- star_mass=sp[("stars", "particle_mass")],
- star_creation_time=sp[("stars", "creation_time")],
- star_metallicity_fraction=sp[("stars", "metallicity_fraction")])
- # Write out the SED using the default flux normalization.
- spec.write_out_SED(name="halo%05d.out" % halo.id)
diff --git a/doc/source/analyzing/analysis_modules/sunrise_export.rst b/doc/source/analyzing/analysis_modules/sunrise_export.rst
deleted file mode 100644
index de23bcf026c..00000000000
--- a/doc/source/analyzing/analysis_modules/sunrise_export.rst
+++ /dev/null
@@ -1,157 +0,0 @@
-.. _sunrise_export:
-
-.. note::
-
- This module has been deprecated as it is unmaintained. The code has been
- moved to the `yt attic `__.
- If you'd like to take it over, please do!
-
-Exporting to Sunrise
-====================
-
-.. sectionauthor:: Christopher Moody
-.. versionadded:: 1.8
-
-.. note::
-
- As of :code:`yt-3.0`, the sunrise exporter is not currently functional.
- This functionality is still available in :code:`yt-2.x`. If you would like
- to use these features in :code:`yt-3.x`, help is needed to port them over.
- Contact the yt-users mailing list if you are interested in doing this.
-
-The yt-Sunrise exporter essentially takes grid cell data and translates it into a binary octree format, attaches star particles, and saves the output to a FITS file Sunrise can read. For every cell, the gas mass, metals mass (a fraction of which is later assumed to be in the form of dust), and the temperature are saved. Star particles are defined entirely by their mass, position, metallicity, and a 'radius.' This guide outlines the steps to exporting the data, troubleshoots common problems, and reviews recommended sanity checks.
-
-Simple Export
--------------
-
-The code outlined here is a barebones Sunrise export:
-
-.. code-block:: python
-
- from yt.mods import *
- import numpy as na
-
- ds = ARTDataset(file_amr)
- potential_value,center=ds.find_min('Potential_New')
- root_cells = ds.domain_dimensions[0]
- le = np.floor(root_cells*center) #left edge
- re = np.ceil(root_cells*center) #right edge
- bounds = [(le[0], re[0]-le[0]), (le[1], re[1]-le[1]), (le[2], re[2]-le[2])]
- #bounds are left edge plus a span
- bounds = numpy.array(bounds,dtype='int')
- amods.sunrise_export.export_to_sunrise(ds, out_fits_file,subregion_bounds = bounds)
-
-To ensure that the camera is centered on the galaxy, we find the center by finding the minimum of the gravitational potential. The above code takes that center, and casts it in terms of which root cells should be extracted. At the moment, Sunrise accepts a strict octree, and you can only extract a 2x2x2 domain on the root grid, and not an arbitrary volume. See the optimization section later for workarounds. On my reasonably recent machine, the export process takes about 30 minutes.
-
-Some codes do not yet enjoy full yt support. As a result, export_to_sunrise() can manually include particles in the yt output fits file:
-
-.. code-block:: python
-
- import pyfits
-
- col_list = []
- col_list.append(pyfits.Column("ID", format="I", array=np.arange(mass_current.size)))
- col_list.append(pyfits.Column("parent_ID", format="I", array=np.arange(mass_current.size)))
- col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
- col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
- col_list.append(pyfits.Column("creation_mass", format="D", array=mass_initial, unit="Msun"))
- col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
- col_list.append(pyfits.Column("radius", format="D", array=star_radius, unit="kpc"))
- col_list.append(pyfits.Column("mass", format="D", array=mass_current, unit="Msun"))
- col_list.append(pyfits.Column("age_m", format="D", array=age, unit="yr"))
- col_list.append(pyfits.Column("age_l", format="D", array=age, unit="yr"))
- col_list.append(pyfits.Column("metallicity", format="D",array=z))
- col_list.append(pyfits.Column("L_bol", format="D",array=np.zeros(mass_current.size)))
- cols = pyfits.ColDefs(col_list)
-
- amods.sunrise_export.export_to_sunrise(ds, out_fits_file,write_particles=cols,
- subregion_bounds = bounds)
-
-This code snippet takes the stars in a region outlined by the ``bounds`` variable, organizes them into pyfits columns which are then passed to export_to_sunrise. Note that yt units are in CGS, and Sunrise accepts units in (physical) kpc, kelvin, solar masses, and years.
-
-Remember that in Sunrise, photons are not spawned at the exact point of the star particle, but stochastically in a radius around it. Default to setting this radius to the resolution (or smoothing kernel) of your simulation - and then test that Sunrise is not sensitive to a doubling or halving of this number.
-
-Sanity Check: Young Stars
--------------------------
-
-Young stars are treated in a special way in Sunrise. Stars under 10 Myr do not emit in the normal fashion; instead they are replaced with MAPPINGS III particles that emulate the emission characteristics of star forming clusters. Among other things this involves a calculation of the local pressure, P/k, which Sunrise reports for debugging purposes and is something you should also check.
-
-The code snippet below finds the location of every star under 10 Myr and looks up the cell containing it:
-
-.. code-block:: python
-
- for x,a in enumerate(zip(pos,age)): #loop over stars
- center = x*ds['kpc']
- grid,idx = find_cell(ds.index.grids[0],center)
- pk[i] = grid['Pk'][idx]
-
-This code is how Sunrise calculates the pressure, so we can add our own derived field:
-
-.. code-block:: python
-
- def _Pk(field,data):
- #calculate pressure over Boltzmann's constant: P/k=(n/V)T
- #Local stellar ISM values are ~16500 Kcm^-3
- vol = data['cell_volume'].astype('float64')*data.ds['cm']**3.0 #volume in cm
- m_g = data["cell_mass"]*1.988435e33 #mass of H in g
- n_g = m_g*5.97e23 #number of H atoms
- teff = data["temperature"]
- val = (n_g/vol)*teff #should be of order 1e2-1e5
- return val
- add_field("Pk", function=_Pk,units=r"Kcm^{-3}")
-
-
-This snippet locates the cell containing a star and returns the grid and grid id.
-
-.. code-block:: python
-
- def find_cell(grid,position):
- x=grid
- #print(grid.LeftEdge)
- for child in grid.Children:
- if numpy.all(child.LeftEdge < position) and\
- numpy.all(child.RightEdge > position):
- return find_cell(child,position)
-
- #if the point is not contained within any of the child grids
- #find it within the extent of the current grid
- le,re = x.LeftEdge,x.RightEdge
- ad = x.ActiveDimensions
- span = (re-le)/ad
- idx = (position-le)/span
- idx = numpy.floor(idx)
- idx = numpy.int64(idx)
- assert numpy.all(idx < ad)
- return grid,idx
-
-Sanity Check: Gas & Stars Line Up
----------------------------------
-
-If you add your star particles separately from the gas cell index, then it is worth checking that they still lined up once they've been loaded into Sunrise. This is fairly easy to do with a useful 'auxiliary' run. In Sunrise, set all of your rays to zero, (nrays_nonscatter, nrays_scatter,nrays_intensity,nrays_ir ) except for nrays_aux, and this will produce an mcrx FITS file with a gas map, a metals map, a temperature*gass_mass map and a stellar map for each camera. As long as you keep some cameras at theta,phi = 0,0 or 90,0, etc., then a standard yt projection down the code's xyz axes should look identical:
-
-.. code-block:: python
-
- pc.add_projection("density", 0, "density")
-
-
-Convergence: High Resolution
-----------------------------
-
-At the moment, yt exports are the only grid data format Sunrise accepts. Otherwise, Sunrise typically inputs SPH particles or AREPO Voronoi grids. Among the many convergence checks you should perform is a high resolution check, which subdivides all leaves in the octree and copies the parent data into them, effectively increasing the resolution but otherwise not adding more information. Sunrise should yield similar results, and it is worth checking that indeed it does. Do so by just passing export_to_sunrise(...,dummy_subdivide=True). The resulting file should be slightly less than 8 times larger because of newly added cells.
-
-Other checks:
--------------
-
-Check that the width of your extracted region is at least the size of your camera's field of view. It should probably be significantly larger than your FOV, and cutting that short could throw out otherwise interesting objects.
-
-A good idea is to leverage yt to find the inertia tensor of the stars, find the rotation matrix that diagonalizes it, and use that to define cameras for Sunrise. Unless your code grid is aligned with your galaxy, this is required for getting edge-on or face-on shots.
-
-The final product:
-------------------
-
-.. image:: _images/mw3_0420.jpg
- :width: 479
- :height: 479
-
-Above is a false color image where RGB are assigned to IR, optical and UV broadband filters, respectively.
-
diff --git a/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst b/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst
deleted file mode 100644
index 0a063a42e22..00000000000
--- a/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _sunyaev-zeldovich:
-
-Mock Observations of the Sunyaev-Zeldovich Effect
--------------------------------------------------
-
-.. notebook:: SZ_projections.ipynb
diff --git a/doc/source/analyzing/analysis_modules/synthetic_observation.rst b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
deleted file mode 100644
index 3e4ca7ed018..00000000000
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-.. _synthetic-observations:
-
-Synthetic Observation
-=====================
-
-Methods for generating various types of synthetic observations
-from simulation data.
-
-.. toctree::
- :maxdepth: 2
-
- light_cone_generator
- light_ray_generator
- planning_cosmology_simulations
- absorption_spectrum
- star_analysis
- xray_emission_fields
- sunyaev_zeldovich
- photon_simulator
- ppv_cubes
diff --git a/doc/source/analyzing/analysis_modules/two_point_functions.rst b/doc/source/analyzing/analysis_modules/two_point_functions.rst
deleted file mode 100644
index b4ae754fb5d..00000000000
--- a/doc/source/analyzing/analysis_modules/two_point_functions.rst
+++ /dev/null
@@ -1,943 +0,0 @@
-.. note::
-
- This module has been deprecated as it is unmaintained. The code has been
- moved to the `yt attic `__.
- If you'd like to take it over, please do!
-
-.. _two_point_functions:
-
-Two Point Functions
-===================
-.. sectionauthor:: Stephen Skory
-.. versionadded:: 1.7
-
-.. note::
-
- As of :code:`yt-3.0`, the two point function analysis module is not
- currently functional. This functionality is still available in
- :code:`yt-2.x`. If you would like to use these features in :code:`yt-3.x`,
- help is needed to port them over. Contact the yt-users mailing list if you
- are interested in doing this.
-
-The Two Point Functions framework (TPF) is capable of running several
-multi-dimensional two point functions simultaneously on a dataset using
-memory and workload parallelism.
-Examples of two point functions are structure functions and two-point
-correlation functions.
-It can analyze the entire simulation, or a small rectangular subvolume.
-The results can be output in convenient text format and in efficient
-HDF5 files.
-
-Requirements
-------------
-
-The TPF relies on the Fortran kD-tree that is used
-by the parallel HOP halo finder. The kD-tree is not built by default with yt
-so it must be built by hand.
-
-Quick Example
--------------
-
-It is very simple to setup and run a structure point function on a dataset.
-The script below will output the RMS velocity difference over the entire volume
-for a range of distances. There are some brief comments given below for each
-step.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.analysis_modules.two_point_functions.api import *
-
- ds = load("data0005")
-
- # Calculate the S in RMS velocity difference between the two points.
- # All functions have five inputs. The first two are containers
- # for field values, and the second two are the raw point coordinates
- # for the point pair. The fifth is the normal vector between the two points
- # in r1 and r2. Not all the inputs must be used.
- # The name of the function is used to name output files.
- def rms_vel(a, b, r1, r2, vec):
- vdiff = a - b
- np.power(vdiff, 2.0, vdiff)
- vdiff = np.sum(vdiff, axis=1)
- return vdiff
-
-
- # Initialize a function generator object.
- # Set the input fields for the function(s),
- # the number of pairs of points to calculate, how big a data queue to
- # use, the range of pair separations and how many lengths to use,
- # and how to divide that range (linear or log).
- tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z"],
- total_values=1e5, comm_size=10000,
- length_number=10, length_range=[1./128, .5],
- length_type="log")
-
- # Adds the function to the generator. An output label is given,
- # and whether or not to square-root the results in the text output is given.
- # Note that the items below are being added as lists.
- f1 = tpf.add_function(function=rms_vel, out_labels=['RMSvdiff'], sqrt=[True])
-
- # Define the bins used to store the results of the function.
- f1.set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13], bin_number=1000)
-
- # Runs the functions.
- tpf.run_generator()
-
- # This calculates the M in RMS and writes out a text file with
- # the RMS values and the lengths. The R happens because sqrt=True in
- # add_function, above.
- # If one is doing turbulence, the contents of this text file are what
- # is wanted for plotting.
- # The file is named 'rms_vel.txt'.
- tpf.write_out_means()
- # Writes out the raw PDF bins and bin edges to a HDF5 file.
- # The file is named 'rms_vel.h5'.
- tpf.write_out_arrays()
-
-As an aside, note that any analysis function in yt can be accessed directly
-and imported automatically using the ``amods`` construct.
-Here is an abbreviated example:
-
-.. code-block:: python
-
- from yt.mods import *
- ...
- tpf = amods.two_point_functions.TwoPointFunctions(ds, ...)
-
-
-Probability Distribution Function
----------------------------------
-
-For a given length of separation between points, the TPF stores the
-Probability Distribution Function (PDF) of the output values.
-The PDF allows more varied analysis of the TPF output than storing
-the function itself.
-The image below assists in how to think about this.
-If the function is measuring the absolute difference in temperature
-between two points, for each point separation length L, the measured
-differences are binned by temperature difference (delta T).
-Therefore in the figure below, for a length L, the x-axis is temperature difference
-(delta T), and the y-axis is the probability of finding that temperature
-difference.
-To find the mean temperature difference for the length L, one just needs
-to multiply the value of the temperature difference bin by its probability,
-and add up over all the bins.
-
-.. image:: _images/PDF.png
- :width: 538
- :height: 494
-
-How It Works
-------------
-
-In order to use the TPF, one must understand how it works.
-When run in parallel the defined analysis volume, whether it is the full
-volume or a small region, is subdivided evenly and each task is assigned
-a different subvolume.
-The total number of point pairs to be created per pair separation length
-is ``total_values``, and each
-task is given an equal share of that total.
-Each task will create its share of ``total_values`` by first making
-a randomly placed point in its local volume.
-The second point will be placed a distance away with location set by random
-values of (phi, theta) in spherical coordinates and length by the length ranges.
-If that second point is inside the tasks subvolume, the functions
-are evaluated and their results binned.
-However, if the second point lies outside the subvolume (as in a different
-tasks subvolume), the point pair is stored in a point data queue, as well as the
-field values for the first point in a companion data queue.
-When a task makes its share of ``total_values``, or it fills up its data
-queue with points it can't fully process, it passes its queues to its neighbor on
-the right.
-It then receives the data queues from its neighbor on the left, and processes
-the queues.
-If it can evaluate a point in the received data queues, meaning it can find the
-field values for the second point, it computes the functions for
-that point pair, and removes that entry from the queue.
-If it still needs to fulfill ``total_values``, it can put its own point pair
-into that entry in the queues.
-Once the queues are full of points that a task cannot process, it passes them
-on.
-The data communication cycle ends when all tasks have made their share of
-``total_values``, and all the data queues are cleared.
-When all the cycles have run, the bins are added up globally to find the
-global PDF.
-
-Below is a two-dimensional representation of how the full simulation is
-subdivided into 16 smaller subvolumes.
-Each subvolume is assigned to one of 16 tasks
-labelled with an integer [0-15].
-Each task is responsible for only the field
-values inside its subvolume - it is completely ignorant about all the other
-subvolumes.
-When point separation rulers are laid down, some like the ruler
-labelled A, have both points completely inside a single subvolume.
-In this case,
-task 5 can evaluate the function(s) on its own.
-In situations like
-B or C, the points lie in different subvolumes, and no one task can evaluate
-the functions independently.
-
-.. image:: _images/struct_fcn_subvolumes0.png
- :width: 403
- :height: 403
-
-This next figure shows how the data queues are passed from task to task.
-Once task 0 is done with its points, or its queue is full, it passes the queue
-to task 1.
-Likewise, 1 passes to 2, and 15 passes back around to 0, completing the circle.
-If a point pair lies in the subvolumes of 0 and 15, it can take up to 15
-communication cycles for that pair to be evaluated.
-
-.. image:: _images/struct_fcn_subvolumes1.png
- :width: 526
- :height: 403
-
-Sometimes the sizes of the data fields being computed on are not very large,
-and the memory-parallelism of the TPF isn't crucial.
-However, if one still wants to run with lots of processors to make large amounts of
-random pairs, subdividing the volumes as above is not as efficient as it could
-be due to communication overhead.
-By using the ``vol_ratio`` setting of TPF (see :ref:`Create the
-Function Generator Object `), the full
-volume can be subdivided into larger subvolumes than above,
-and tasks will own non-unique copies of the fields data.
-In the figure below, the two-dimensional volume has been subdivided into
-four subvolumes, and four tasks each own a copy of the data in each subvolume.
-As shown, the queues are handed off in the same order as before.
-But in this simple example, the maximum number of communication cycles for any
-point to be evaluated is three.
-This means that the communication overhead will be lower and runtimes
-somewhat faster.
-
-.. image:: _images/struct_fcn_subvolumes2.png
- :width: 526
- :height: 403
-
-A Step By Step Overview
------------------------
-
-In order to run the TPF, these steps must be taken:
-
- #. Load yt (of course), and any other Python modules that are needed.
- #. Define any non-default fields in the standard yt manner.
- #. :ref:`tpf_fcns`.
- #. :ref:`tpf_tpf`.
- #. :ref:`tpf_add_fcns`.
- #. :ref:`tpf_pdf`.
- #. :ref:`tpf_run`.
- #. :ref:`tpf_output`.
-
-.. _tpf_fcns:
-
-Define Functions
-^^^^^^^^^^^^^^^^
-
-All functions must adhere to these specifications:
-
- * There must be five input variables. The first two are arrays for the
- fields needed by the function, and the next two are the raw coordinate
- values for the points. The fifth input is an array with the normal
- vector between each of the points in r1 and r2.
- * The output must be in array format.
- * The names of the functions need to be unique.
-
-The first two variables of a function are arrays that contain the field values.
-The order of the field values in the lists is set by the call to ``TwoPointFunctions``
-(that comes later).
-In the example above, ``a`` and ``b``
-contain the field velocities for the two points, respectively, in an N by M
-array, where N is equal to ``comm_size`` (set in ``TwoPointFunctions``), and M
-is the total number of input fields used by functions.
-``a[:,0]`` and ``b[:,0]`` are the ``x-velocity`` field values because that field
-is the first field given in the ``TwoPointFunctions``.
-
-The second two variables ``r1`` and ``r2`` are the raw point coordinates for the two points.
-The fifth input is an array containing the normal vector between each pair of points.
-These arrays are all N by 3 arrays.
-Note that they are not used in the example above because they are not needed.
-
-Functions need to output in array format, with dimensionality
-N by R, where R is the dimensionality of the function.
-Multi-dimensional functions can be written that output
-several values simultaneously.
-
-The names of the functions must be unique because they are used to name
-output files, and name collisions will result in over-written output.
-
-.. _tpf_tpf:
-
-Create the Two Point Function Generator Object
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Before any functions can be added, the ``TwoPointFunctions`` object needs
-to be created. It has these inputs:
-
- * ``ds`` (the only required input and is always the first term).
- * Field list, required, an ordered list of field names used by the
- functions. The order in this list will need to be referenced when writing
- functions. Derived fields may be used here if they are defined first.
- * ``left_edge``, ``right_edge``, three-element lists of floats:
- Used to define a sub-region of the full volume in which to run the TDS.
- Default=None, which is equivalent to running on the full volume. Both must
- be set to have any effect.
- * ``total_values``, integer: The number of random points to generate globally
- per point separation length. If run in parallel, each task generates its
- fair share of this number.
- Default=1000000.
- * ``comm_size``, integer: How many pairs of points that are stored in the
- data queue objects on each task. Too large wastes memory, and too small will
- result in longer run times due to extra communication cycles. Each unit of
- ``comm_size`` costs (6 + number_of_fields)*8 bytes, where number_of_fields
- is the size of the set of unique data fields used by all the functions added to the
- TPF. In the RMS velocity example above, number_of_fields=3, and a
- ``comm_size`` of 10,000 means each queue costs 10,000*8*(6+3) =
- 720 KB per task.
- Default=10000.
- * ``length_type``, string ("lin" or "log"): Sets how to evenly space the point
- separation lengths, either linearly or logarithmic (log10).
- Default="lin".
- * ``length_number``, integer: How many point separations to run.
- Default=10.
- * ``length_range``, two-element list of floats: Two values that define
- the minimum and maximum point separations to run over. The lengths that will
- be used are divided into ``length_number`` pieces evenly separated according
- to ``length_type``.
- Default=None, which is equivalent to [sqrt(3)*dx, min_simulation_edge/2.], where
- min_simulation_edge is the length of the smallest edge (1D) of the simulation,
- and dx is the smallest cell size in the dataset. The sqrt(3) is there because
- that is the distance between opposite corners of a unit cube, and that
- guarantees that the point pairs will be in different cells for the most
- refined regions.
- If the first term of the list is -1, the minimum length will be automatically
- set to sqrt(3)*dx, ex: ``length_range = [-1, 10/ds['kpc']]``.
- * ``vol_ratio``, integer: How to multiply-assign subvolumes to the parallel
- tasks. This number must be an integer factor of the total number of tasks or
- very bad things will happen. The default value of 1 will assign one task
- to each subvolume, and there will be an equal number of subvolumes as tasks.
- A value of 2 will assign two tasks to each subvolume and there will be
- one-half as many subvolumes as tasks.
- A value equal to the number of parallel tasks will result in each task
- owning a complete copy of all the fields data, meaning each task will be
- operating on the identical full volume.
- Setting this to -1 automatically adjusts ``vol_ratio`` such that all tasks
- are given the full volume.
- * ``salt``, integer: A number that will be added to the random number generator
- seed. Use this if a different random series of numbers is desired when
- keeping everything else constant from this set: (MPI task count,
- number of ruler lengths, ruler min/max, number of functions,
- number of point pairs per ruler length). Default: 0.
- * ``theta``, float: For random pairs of points, the second point is found by
- traversing a distance along a ray set by the angle (phi, theta) from the
- first point. To keep this angle constant, set ``theta`` to a value in the
- range [0, pi]. Default = None, which will randomize theta for every pair of
- points.
- * ``phi``, float: Similar to theta above, but the range of values is
- [0, 2*pi). Default = None, which will randomize phi for every pair of
- points.
-
-.. _tpf_add_fcns:
-
-Add Functions
-^^^^^^^^^^^^^
-
-Each function is added to the TPF using the ``add_function`` command.
-Each call must have the following inputs:
-
- #. The function name as previously defined.
- #. A list with label(s) for the output(s) of the function.
- Even if the function outputs only one value, this must be a list.
- These labels are used for output.
- #. A list with bools of whether or not to sqrt the output, in the same order
- as the output label list. E.g. ``[True, False]``.
-
-The call to ``add_function`` returns a ``FcnSet`` object. For convenience,
-it is best to store the output in a variable (as in the example above) so
-it can be referenced later.
-The functions can also be referenced through the ``TwoPointFunctions`` object
-in the order in which they were added.
-So would ``tpf[0]`` refer to the same thing as ``f1`` in the quick example,
-above.
-
-.. _tpf_pdf:
-
-Set PDF Parameters
-^^^^^^^^^^^^^^^^^^
-
-Once the function is added to the TPF, the probability distribution
-bins need to be defined for each using the command ``set_pdf_params``.
-It has these inputs:
-
- * ``bin_type``, string or list of strings ("lin" or "log"):
- How to evenly subdivide the bins over the given range. If the
- function has multiple outputs, the input needs to be a list with equal
- elements.
- * ``bin_range``, list or list of lists:
- Define the min/max values for the bins for the output(s) of the
- function.
- If there are multiple outputs, there must be an equal number of lists.
- * ``bin_number``, integer or list of integers: How many bins to create over
- the min/max range defined by ``bin_range`` evenly spaced by the ``bin_type``
- parameter.
- If there are multiple outputs, there must be an equal number of integers.
-
-The memory costs associated with the PDF bins must be considered when writing
-an analysis script.
-There is one set of PDF bins created per function, per point separation length.
-Each PDF bin costs product(bin_number)*8 bytes, where product(bin_number) is
-the product of the entries in the bin_number list, and this is duplicated
-on every task.
-For multidimensional PDFs, the memory costs can grow very quickly.
-For example, for 3 functions, each with two outputs, with 1000 point
-separation lengths set for the TPF, and with 5000 PDF bins per output dimension,
-the PDF bins will cost: 3*1000*(5000)^2*8=600 GB of memory *per task*!
-
-Note: ``bin_number`` actually specifies the number of *bin edges* to make,
-rather than the number of bins to make. The number of bins will actually be
-``bin_number``-1 because values are dropped into bins between the two closest
-bin edge values,
-and values outside the min/max bin edges are thrown away.
-If precisely ``bin_number`` bins are wanted, add 1 when setting the PDF
-parameters.
-
-.. _tpf_run:
-
-Run the TPF
-^^^^^^^^^^^
-
-The command ``run_generator()`` pulls the trigger and runs the TPF.
-There are no inputs.
-
-After the generator runs, it will print messages like this, one per
-function::
-
- yt INFO 2010-03-13 12:46:54,541 Function rms_vel had 1 values too high and 4960 too low that were not binned.
-
-Consider changing the range of the PDF bins to reduce or eliminate un-binned
-values.
-
-.. _tpf_output:
-
-Output the Results
-^^^^^^^^^^^^^^^^^^
-
-There are two ways to output data from the TPF for structure functions.
-
- #. The command ``write_out_means`` writes out a text file per function
- that contains the means for each dimension of the function output
- for each point separation length.
- The file is named "function_name.txt", so in the example the file is named
- "rms_vel.txt".
- In the example above, the ``sqrt=True`` option is turned on, which square-roots
- the mean values. Here is some example output for the RMS velocity example::
-
- # length count RMSvdiff
- 7.81250e-03 95040 8.00152e+04
- 1.24016e-02 100000 1.07115e+05
- 1.96863e-02 100000 1.53741e+05
- 3.12500e-02 100000 2.15070e+05
- 4.96063e-02 100000 2.97069e+05
- 7.87451e-02 99999 4.02917e+05
- 1.25000e-01 100000 5.54454e+05
- 1.98425e-01 100000 7.53650e+05
- 3.14980e-01 100000 9.57470e+05
- 5.00000e-01 100000 1.12415e+06
-
- The ``count`` column lists the number of pair points successfully binned
- at that point separation length.
-
- If the output is multidimensional, pass a list of bools to control the
- sqrt column by column (``sqrt=[False, True]``) to ``add_function``.
- For multidimensional functions, the means are calculated by first
- collapsing the values in the PDF matrix in the other
- dimensions, before multiplying the result by the bin edges for that output
- dimension. So in the extremely simple fabricated case of:
-
- .. code-block:: python
-
- # Temperature difference bin edges
- # dimension 0
- Tdiff_bins = [10, 100, 1000]
- # Density difference bin edges
- # dimension 1
- Ddiff_bins = [50,500,5000]
-
- # 2-D PDF for a point pair length of 0.05
- PDF = [ [ 0.3, 0.1],
- [ 0.4, 0.2] ]
-
- What the PDF is recording is that there is a 30% probability of getting a
- temperature difference between [10, 100), at the same time of getting a
- density difference between [50, 500). There is a 40% probability for Tdiff
- in [10, 100) and Ddiff in [500, 5000). The text output of this PDF is
- calculated like this:
-
- .. code-block:: python
-
- # Temperature
- T_PDF = PDF.sum(axis=0)
- # ... which gets ...
- T_PDF = [0.7, 0.3]
- # Then to get the mean, multiply by the centers of the temperature bins.
- means = [0.7, 0.3] * [55, 550]
- # ... which gets ...
- means = [38.5, 165]
- mean = sum(means)
- # ... which gets ...
- mean = 203.5
-
- # Density
- D_PDF = PDF.sum(axis=1)
- # ... which gets ...
- D_PDF = [0.4, 0.6]
- # As above...
- means = [0.4, 0.6] * [275, 2750]
- mean = sum(means)
- # ... which gets ...
- mean = 1760
-
- The text file would look something like this::
-
- # length count Tdiff Ddiff
- 0.05 980242 2.03500e+02 1.76000e+3
-
- #. The command ``write_out_arrays()`` writes the raw PDF bins, as well as the
- bin edges for each output dimension to a HDF5 file named
- ``function_name.h5``.
- Here is example content for the RMS velocity script above::
-
- $ h5ls rms_vel.h5
- bin_edges_00_RMSvdiff Dataset {1000}
- bin_edges_names Dataset {1}
- counts Dataset {10}
- lengths Dataset {10}
- prob_bins_00000 Dataset {999}
- prob_bins_00001 Dataset {999}
- prob_bins_00002 Dataset {999}
- prob_bins_00003 Dataset {999}
- prob_bins_00004 Dataset {999}
- prob_bins_00005 Dataset {999}
- prob_bins_00006 Dataset {999}
- prob_bins_00007 Dataset {999}
- prob_bins_00008 Dataset {999}
- prob_bins_00009 Dataset {999}
-
- Every HDF5 file produced will have the datasets ``lengths``,
- ``bin_edges_names``, and ``counts``.
- ``lengths`` contains the list of the pair separation
- lengths used for the TPF, and is identical to the first column in the
- text output file.
- ``bin_edges_names`` lists the name(s) of the dataset(s) that contain the bin
- edge values.
- ``counts`` contains the number of successfully binned point pairs for each
- point separation length, and is equivalent to the second column in the
- text output file.
- In the HDF5 file above, the ``lengths`` dataset looks like this::
-
- $ h5dump -d lengths rms_vel.h5
- HDF5 "rms_vel.h5" {
- DATASET "lengths" {
- DATATYPE H5T_IEEE_F64LE
- DATASPACE SIMPLE { ( 10 ) / ( 10 ) }
- DATA {
- (0): 0.0078125, 0.0124016, 0.0196863, 0.03125, 0.0496063, 0.0787451,
- (6): 0.125, 0.198425, 0.31498, 0.5
- }
- }
- }
-
- There are ten length values. ``prob_bins_00000`` is the PDF for pairs of
- points separated by the first length value given, which is 0.0078125.
- Points separated by 0.0124016 are recorded in ``prob_bins_00001``, and so
- on.
- The entries in the ``prob_bins`` datasets are the raw PDF for that function
- for that point separation length.
- If the function has multiple outputs, the arrays stored in the datasets
- are multidimensional.
-
- ``bin_edges_names`` looks like this::
-
- $ h5dump -d bin_edges_names rms_vel.h5
- HDF5 "rms_vel.h5" {
- DATASET "bin_edges_names" {
- DATATYPE H5T_STRING {
- STRSIZE 22;
- STRPAD H5T_STR_NULLPAD;
- CSET H5T_CSET_ASCII;
- CTYPE H5T_C_S1;
- }
- DATASPACE SIMPLE { ( 1 ) / ( 1 ) }
- DATA {
- (0): "/bin_edges_00_RMSvdiff"
- }
- }
- }
-
- This gives the names of the datasets that contain the bin edges, in the
- same order as the function output the data.
- If the function outputs several items, there will be more than one
- dataset listed in ``bin_edges-names``.
- ``bin_edges_00_RMSvdiff`` therefore contains the (dimension 0) bin edges
- as specified when the PDF parameters were set.
- If there were other output fields, they would be named
- ``bin_edges_01_outfield1``, ``bin_edges_02_outfield2`` respectively.
-
-.. _tpf_strategies:
-
-Strategies for Computational Efficiency
----------------------------------------
-
-Here are a few recommendations that will make the function generator
-run as quickly as possible, in particular when running in parallel.
-
- * Calculate how much memory the data fields and PDFs will require, and
- figure out what fraction can fit on a single compute node. For example
- (ignoring the PDF memory costs), if four data fields are required, and each
- takes up 8GB of memory (as in each field has 1e9 doubles), 32GB total is
- needed. If the analysis is being run on a machine with 4GB per node,
- at least eight nodes must be used (but in practice it is often just under
- 4GB available to applications, so more than eight nodes are needed).
- The number of nodes gives the minimal number of MPI tasks to use, which
- corresponds to the minimal volume decomposition required.
- Benchmark tests show that the function generator runs the quickest
- when each MPI task owns as much of the full volume as possible.
- If this number of MPI tasks calculated above is fewer than desired due to
- the number of pairs to be generated, instead of further subdividing the volume,
- use the ``vol_ratio`` parameter to multiply-assign tasks to the same subvolume.
- The total number of compute nodes will have to be increased because field
- data is being duplicated in memory, but tests have shown that things run
- faster in this mode. The bottom line: pick a vol_ratio that is as large
- as possible.
-
- * The ideal ``comm_size`` appears to be around 1e5 or 1e6 in size.
-
- * If possible, write the functions using only Numpy functions and methods.
- The input and output must be in array format, but the logic inside the function
- need not be. However, it will run much slower if optimized methods are not used.
-
- * Run a few test runs before doing a large run so that the PDF parameters can
- be correctly set.
-
-
-Advanced Two Point Function Techniques
---------------------------------------
-
-Density Threshold
-^^^^^^^^^^^^^^^^^
-
-If points are to only be compared if they both are above some density threshold,
-simply pass the density field to the function, and return a value
-that lies outside the PDF min/max if the density is too low.
-Here are the modifications to the RMS velocity example to do this that
-requires a gas density of at least 1e-26 g cm^-3 at each point:
-
-.. code-block:: python
-
- def rms_vel(a, b, r1, r2, vec):
- # Pick out points with only good densities
- a_good = a[:,3] >= 1.e-26
- b_good = b[:,3] >= 1.e-26
- # Pick out the pairs with both good densities
- both_good = np.bitwise_and(a_good, b_good)
- # Operate only on the velocity columns
- vdiff = a[:,0:3] - b[:,0:3]
- np.power(vdiff, 2.0, vdiff)
- vdiff = np.sum(vdiff, axis=1)
- # Multiplying by a boolean array has the effect of multiplying by 1 for
- # True, and 0 for False. This operation below will force pairs of not
- # good points to zero, outside the PDF (see below), and leave good
- # pairs unchanged.
- vdiff *= both_good
- return vdiff
-
- ...
- tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z", "density"],
- total_values=1e5, comm_size=10000,
- length_number=10, length_range=[1./128, .5],
- length_type="log")
-
- tpf.add_function(rms_vel, ['RMSvdiff'], [False])
- tpf[0].set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13], bin_number=1000)
-
-Because 0 is outside of the ``bin_range``, a pair of points that don't satisfy
-the density requirements do not contribute to the PDF.
-If density cutoffs are to be done in this fashion, the fractional volume that is
-above the density threshold should be calculated first, and ``total_values``
-multiplied by the square of the inverse of this (which should be a multiplicative factor
-greater than one, meaning more point pairs will be generated to compensate
-for trashed points).
-
-Multidimensional PDFs
-^^^^^^^^^^^^^^^^^^^^^
-
-It is easy to modify the example above to output in multiple dimensions. In
-this example, the ratio of the densities of the two points is recorded at
-the same time as the velocity differences.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.analysis_modules.two_point_functions.api import *
-
- ds = load("data0005")
-
- # Calculate the S in RMS velocity difference between the two points.
- # Also store the ratio of densities (keeping them >= 1).
- # All functions have four inputs. The first two are containers
- # for field values, and the second two are the raw point coordinates
- # for the point pair. The name of the function is used to name
- # output files.
- def rms_vel_D(a, b, r1, r2, vec):
- # Operate only on the velocity columns
- vdiff = a[:,0:3] - b[:,0:3]
- np.power(vdiff, 2.0, vdiff)
- vdiff = np.sum(vdiff, axis=1)
- # Density ratio
- Dratio = np.max(a[:,3]/b[:,3], b[:,3]/a[:,3])
- return [vdiff, Dratio]
-
- # Initialize a function generator object.
- # Set the number of pairs of points to calculate, how big a data queue to
- # use, the range of pair separations and how many lengths to use,
- # and how to divide that range (linear or log).
- tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z", "density"],
- total_values=1e5, comm_size=10000,
- length_number=10, length_range=[1./128, .5],
- length_type="log")
-
- # Adds the function to the generator.
- f1 = tpf.add_function(rms_vel, ['RMSvdiff', 'Dratio'], [True, False])
-
- # Define the bins used to store the results of the function.
- # Note that the bin edges can have different division, "lin" and "log".
- # In particular, a bin edge of 0 doesn't play well with "log".
- f1.set_pdf_params(bin_type=['log', 'lin'],
- bin_range=[[5e4, 5.5e13], [1., 10000.]],
- bin_number=[1000, 1000])
-
- # Runs the functions.
- tpf.run_generator()
-
- # This calculates the M in RMS and writes out a text file with
- # the RMS values and the lengths. The R happens because sqrt=[True, False]
- # in add_function.
- # The file is named 'rms_vel_D.txt'. It will sqrt only the MS velocity column.
- tpf.write_out_means()
- # Writes out the raw PDF bins and bin edges to a HDF5 file.
- # The file is named 'rms_vel_D.h5'.
- tpf.write_out_arrays()
-
-Two-Point Correlation Functions
--------------------------------
-
-In a Gaussian random field of galaxies, the probability of finding a pair of
-galaxies within the volumes :math:`dV_1` and :math:`dV_2` is
-
-.. math::
-
- dP = n^2 dV_1 dV_2
-
-where n is the average number density of galaxies. Real galaxies are not
-distributed randomly, rather they tend to be clustered on a characteristic
-length scale.
-Therefore, the probability of two galaxies being paired is a function of
-radius
-
-.. math::
-
- dP = n^2 (1 + \xi(\mathbf{r}_{12})) dV_1 dV_2
-
-where :math:`\xi(\mathbf{r}_{12})` gives the excess probability as a function of
-:math:`\mathbf{r}_{12}`,
-and is the two-point correlation function.
-Values of :math:`\xi` greater than one mean galaxies are super-gaussian,
-and visa-versa.
-In order to use the TPF to calculate two point correlation functions,
-the number of pairs of galaxies between the two dV volumes is measured.
-A PDF is built that gives the probabilities of finding the number of pairs.
-To find the excess probability, a function `write_out_correlation` does
-something similar to `write_out_means` (above), but also normalizes by the
-number density of galaxies and the dV volumes.
-As an aside, a good rule of thumb is that
-for galaxies, :math:`\xi(r) = (r_0/r)^{1.8}` where :math:`r_0=5` Mpc/h.
-
-.. image:: _images/2ptcorrelation.png
- :width: 275
- :height: 192
-
-It is possible to calculate the correlation function for galaxies using
-the TPF using a script based on the example below.
-Unlike the figure above, the volumes are spherical.
-This script can be run in parallel.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.utilities.kdtree import *
- from yt.analysis_modules.two_point_functions.api import *
-
- # Specify the dataset on which we want to base our work.
- ds = load('data0005')
-
- # Read in the halo centers of masses.
- CoM = []
- data = file('HopAnalysis.out', 'r')
- for line in data:
- if '#' in line: continue
- line = line.split()
- xp = float(line[7])
- yp = float(line[8])
- zp = float(line[9])
- CoM.append(np.array([xp, yp, zp]))
- data.close()
-
- # This is the same dV as in the formulation of the two-point correlation.
- dV = 0.05
- radius = (3./4. * dV / np.pi)**(2./3.)
-
- # Instantiate our TPF object.
- # For technical reasons (hopefully to be fixed someday) `vol_ratio`
- # needs to be equal to the number of tasks used if this is run
- # in parallel. A value of -1 automatically does this.
- tpf = TwoPointFunctions(ds, ['x'],
- total_values=1e7, comm_size=10000,
- length_number=11, length_range=[2*radius, .5],
- length_type="lin", vol_ratio=-1)
-
- # Build the kD tree of halos. This will be built on all
- # tasks so it shouldn't be too large.
- # All of these need to be set even if they're not used.
- # Convert the data to fortran major/minor ordering
- add_tree(1)
- fKD.t1.pos = np.array(CoM).T
- fKD.t1.nfound_many = np.empty(tpf.comm_size, dtype='int64')
- fKD.t1.radius = radius
- # These must be set because the function find_many_r_nearest
- # does more than how we are using it, and it needs these.
- fKD.t1.radius_n = 1
- fKD.t1.nn_dist = np.empty((fKD.t1.radius_n, tpf.comm_size), dtype='float64')
- fKD.t1.nn_tags = np.empty((fKD.t1.radius_n, tpf.comm_size), dtype='int64')
- # Makes the kD tree.
- create_tree(1)
-
- # Remembering that two of the arguments for a function are the raw
- # coordinates, we define a two-point correlation function as follows.
- def tpcorr(a, b, r1, r2, vec):
- # First, we will find out how many halos are within fKD.t1.radius of our
- # first set of points, r1, which will be stored in fKD.t1.nfound_many.
- fKD.t1.qv_many = r1.T
- find_many_r_nearest(1)
- nfirst = fKD.t1.nfound_many.copy()
- # Second.
- fKD.t1.qv_many = r2.T
- find_many_r_nearest(1)
- nsecond = fKD.t1.nfound_many.copy()
- # Now we simply multiply these two arrays together. The rest comes later.
- nn = nfirst * nsecond
- return nn
-
- # Now we add the function to the TPF.
- # ``corr_norm`` is used to normalize the correlation function.
- tpf.add_function(function=tpcorr, out_labels=['tpcorr'], sqrt=[False],
- corr_norm=dV**2 * len(CoM)**2)
-
- # And define how we want to bin things.
- # It has to be linear bin_type because we want 0 to be in the range.
- # The big end of bin_range should correspond to the square of the maximum
- # number of halos expected inside dV in the volume.
- tpf[0].set_pdf_params(bin_type='lin', bin_range=[0, 2500000], bin_number=1000)
-
- # Runs the functions.
- tpf.run_generator()
-
- # Write out the data to "tpcorr_correlation.txt"
- # The file has two columns, the first is radius, and the second is
- # the value of \xi.
- tpf.write_out_correlation()
-
- # Empty the kdtree
- del fKD.t1.pos, fKD.t1.nfound_many, fKD.t1.nn_dist, fKD.t1.nn_tags
- free_tree(1)
-
-If one wishes to operate on field values, rather than discrete objects like
-halos, the situation is a bit simpler, but still a bit confusing.
-In the example below, we find the two-point correlation of cells above
-a particular density threshold.
-Instead of constant-size spherical dVs, the dVs here are the sizes of the grid
-cells at each end of the rulers.
-Because there can be cells of different volumes when using AMR,
-the number of pairs counted is actually the number of most-refined-cells
-contained within the volume of the cell.
-For one level of refinement, this means that a root-grid cell has the equivalent
-of 8 refined grid cells in it.
-Therefore, when the number of pairs are counted, it has to be normalized by
-the volume of the cells.
-
-.. code-block:: python
-
- from yt.mods import *
- from yt.utilities.kdtree import *
- from yt.analysis_modules.two_point_functions.api import *
-
- # Specify the dataset on which we want to base our work.
- ds = load('data0005')
-
- # We work in simulation's units, these are for conversion.
- vol_conv = ds['cm'] ** 3
- sm = ds.index.get_smallest_dx()**3
-
- # Our density limit, in gm/cm**3
- dens = 2e-31
-
- # We need to find out how many cells (equivalent to the most refined level)
- # are denser than our limit overall.
- def _NumDens(data):
- select = data["density"] >= dens
- cv = data["cell_volume"][select] / vol_conv / sm
- return (cv.sum(),)
- def _combNumDens(data, d):
- return d.sum()
- add_quantity("TotalNumDens", function=_NumDens,
- combine_function=_combNumDens, n_ret=1)
- all = ds.all_data()
- n = all.quantities["TotalNumDens"]()
-
- print(n,'n')
-
- # Instantiate our TPF object.
- tpf = TwoPointFunctions(ds, ['density', 'cell_volume'],
- total_values=1e5, comm_size=10000,
- length_number=11, length_range=[-1, .5],
- length_type="lin", vol_ratio=1)
-
- # Define the density threshold two point correlation function.
- def dens_tpcorr(a, b, r1, r2, vec):
- # We want to find out which pairs of Densities from a and b are both
- # dense enough. The first column is density.
- abig = (a[:,0] >= dens)
- bbig = (b[:,0] >= dens)
- both = np.bitwise_and(abig, bbig)
- # We normalize by the volume of the most refined cells.
- both = both.astype('float')
- both *= a[:,1] * b[:,1] / vol_conv**2 / sm**2
- return both
-
- # Now we add the function to the TPF.
- # ``corr_norm`` is used to normalize the correlation function.
- tpf.add_function(function=dens_tpcorr, out_labels=['tpcorr'], sqrt=[False],
- corr_norm=n**2 * sm**2)
-
- # And define how we want to bin things.
- # It has to be linear bin_type because we want 0 to be in the range.
- # The top end of bin_range should be 2^(2l)+1, where l is the number of
- # levels, and bin_number=2^(2l)+2
- tpf[0].set_pdf_params(bin_type='lin', bin_range=[0, 2], bin_number=3)
-
- # Runs the functions.
- tpf.run_generator()
-
- # Write out the data to "dens_tpcorr_correlation.txt"
- # The file has two columns, the first is radius, and the second is
- # the value of \xi.
- tpf.write_out_correlation()
diff --git a/doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb
similarity index 82%
rename from doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb
rename to doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb
index 002d9637f86..39a9429cb6f 100644
--- a/doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb
+++ b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb
@@ -5,7 +5,7 @@
"metadata": {},
"source": [
"> Note: If you came here trying to figure out how to create simulated X-ray photons and observations,\n",
- " you should go [here](photon_simulator.html) instead."
+ " you should go [here](http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim/) instead."
]
},
{
@@ -35,7 +35,6 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"scrolled": false
},
"outputs": [],
@@ -67,9 +66,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"print (xray_fields)"
@@ -85,13 +82,11 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"sp = ds.sphere(\"c\", (2.0, \"Mpc\"))\n",
- "print (sp.quantities.total_quantity(\"xray_luminosity_0.5_7.0_keV\"))"
+ "print (sp.quantities.total_quantity((\"gas\",\"xray_luminosity_0.5_7.0_keV\")))"
]
},
{
@@ -105,12 +100,12 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"scrolled": false
},
"outputs": [],
"source": [
- "slc = yt.SlicePlot(ds, 'z', ['xray_emissivity_0.5_7.0_keV','xray_photon_emissivity_0.5_7.0_keV'],\n",
+ "slc = yt.SlicePlot(ds, 'z', [('gas', 'xray_emissivity_0.5_7.0_keV'),\n",
+ " ('gas', 'xray_photon_emissivity_0.5_7.0_keV')],\n",
" width=(0.75, \"Mpc\"))\n",
"slc.show()"
]
@@ -128,7 +123,6 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"scrolled": false
},
"outputs": [],
@@ -151,9 +145,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"print (xray_fields2)"
@@ -163,22 +155,20 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the intensity fields:"
+ "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the energy intensity field:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false,
"scrolled": false
},
"outputs": [],
"source": [
- "prj = yt.ProjectionPlot(ds2, \"x\", [\"xray_intensity_0.5_2.0_keV\", \"xray_photon_intensity_0.5_2.0_keV\"],\n",
+ "prj = yt.ProjectionPlot(ds2, \"x\", (\"gas\",\"xray_intensity_0.5_2.0_keV\"),\n",
" center=\"max\", width=(40, \"kpc\"))\n",
"prj.set_zlim(\"xray_intensity_0.5_2.0_keV\", 1.0e-32, 5.0e-24)\n",
- "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n",
"prj.show()"
]
},
@@ -193,12 +183,43 @@
" abundance information from your dataset. Finally, if your dataset contains no abundance information,\n",
" a primordial hydrogen mass fraction (X = 0.76) will be assumed."
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, if you want to place the source at a local, non-cosmological distance, you can forego the `redshift` and `cosmology` arguments and supply a `dist` argument instead, which is either a `(value, unit)` tuple or a `YTQuantity`. Note that here the redshift is assumed to be zero. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "xray_fields3 = yt.add_xray_emissivity_field(ds2, 0.5, 2.0, dist=(1.0,\"Mpc\"), metallicity=(\"gas\", \"metallicity\"), \n",
+ " table_type='cloudy')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "prj = yt.ProjectionPlot(ds2, \"x\", (\"gas\", \"xray_photon_intensity_0.5_2.0_keV\"),\n",
+ " center=\"max\", width=(40, \"kpc\"))\n",
+ "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n",
+ "prj.show()"
+ ]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python [default]",
+ "display_name": "Python 3",
"language": "python",
"name": "python3"
},
@@ -212,7 +233,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.2"
+ "version": "3.7.1"
}
},
"nbformat": 4,
diff --git a/doc/source/analyzing/analysis_modules/clump_finding.rst b/doc/source/analyzing/domain_analysis/clump_finding.rst
similarity index 100%
rename from doc/source/analyzing/analysis_modules/clump_finding.rst
rename to doc/source/analyzing/domain_analysis/clump_finding.rst
diff --git a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst b/doc/source/analyzing/domain_analysis/cosmology_calculator.rst
similarity index 100%
rename from doc/source/analyzing/analysis_modules/cosmology_calculator.rst
rename to doc/source/analyzing/domain_analysis/cosmology_calculator.rst
diff --git a/doc/source/analyzing/domain_analysis/index.rst b/doc/source/analyzing/domain_analysis/index.rst
new file mode 100644
index 00000000000..d844a6c1a1b
--- /dev/null
+++ b/doc/source/analyzing/domain_analysis/index.rst
@@ -0,0 +1,76 @@
+.. _domain-analysis:
+
+Domain-Specific Analysis
+========================
+
+yt powers a number modules that provide specialized analysis tools
+relevant to one or a few domains. Some of these are internal to yt,
+but many exist as external packages, either maintained by the yt
+project or independently.
+
+Internal Analysis Modules
+-------------------------
+
+These modules exist within yt itself.
+
+.. note::
+
+ As of yt version 3.5, most of the astrophysical analysis tools
+ have been moved to the :ref:`yt-astro` and :ref:`attic`
+ packages. See below for more information.
+
+.. toctree::
+ :maxdepth: 2
+
+ cosmology_calculator
+ clump_finding
+ xray_emission_fields
+
+External Analysis Modules
+-------------------------
+
+These are external packages maintained by the yt project.
+
+.. _yt-astro:
+
+yt Astro Analysis
+^^^^^^^^^^^^^^^^^
+
+Source: https://github.com/yt-project/yt_astro_analysis
+
+Documentation: https://yt-astro-analysis.readthedocs.io/
+
+The ``yt_astro_analysis`` package houses most of the astrophysical
+analysis tools that were formerly in the ``yt.analysis_modules``
+import. These include halo finding, custom halo analysis, synthetic
+observations, and exports to radiative transfer codes. See
+:ref:`yt_astro_analysis:modules` for a list of available
+functionality.
+
+.. _attic:
+
+yt Attic
+^^^^^^^^
+
+Source: https://github.com/yt-project/yt_attic
+
+Documentation: https://yt-attic.readthedocs.io/
+
+The ``yt_attic`` contains former yt analysis modules that have
+fallen by the wayside. These may have small bugs or were simply
+not kept up to date as yt evolved. Tools in here are looking for
+a new owner and a new home. If you find something in here that
+you'd like to bring back to life, either by adding it to
+:ref:`yt-astro` or as part of your own package, you are welcome
+to it! If you'd like any help, let us know! See
+:ref:`yt_attic:attic_modules` for a list of inventory of the
+attic.
+
+Extensions
+----------
+
+There are a number of independent, yt-related packages for things
+like visual effects, interactive widgets, synthetic absorption
+spectra, X-ray observations, and merger-trees. See the
+`yt Extensions ` page for
+a list of available extension packages.
diff --git a/doc/source/analyzing/analysis_modules/xray_data_README.rst b/doc/source/analyzing/domain_analysis/xray_data_README.rst
similarity index 100%
rename from doc/source/analyzing/analysis_modules/xray_data_README.rst
rename to doc/source/analyzing/domain_analysis/xray_data_README.rst
diff --git a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst b/doc/source/analyzing/domain_analysis/xray_emission_fields.rst
similarity index 100%
rename from doc/source/analyzing/analysis_modules/xray_emission_fields.rst
rename to doc/source/analyzing/domain_analysis/xray_emission_fields.rst
diff --git a/doc/source/analyzing/fields.rst b/doc/source/analyzing/fields.rst
index 1595d350d77..2388c040fad 100644
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -381,6 +381,65 @@ different magnetic field units in the different :ref:`unit systems `_, ``unyt``.
+
+For a detailed discussion of how to use ``unyt``, we suggest taking a look at
+the unyt documentation available at https://unyt.readthedocs.io/, however yt
+adds additional capabilities above and beyond what is provided by ``unyt``
+alone, we describe those capabilities below.
+
+Selecting data from a data object
+---------------------------------
+
+The data returned by yt will have units attached to it. For example, let's query
+a data object for the ``('gas', 'density')`` field:
+
+ >>> import yt
+ >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+ >>> dd = ds.all_data()
+ >>> dd['gas', 'density']
+ unyt_array([4.92775113e-31, 4.94005233e-31, 4.93824694e-31, ...,
+ 1.12879234e-25, 1.59561490e-25, 1.09824903e-24], 'g/cm**3')
+
+We can see how we get back a ``unyt_array`` instance. A ``unyt_array`` is a
+subclass of NumPy's NDarray type that has units attached to it:
+
+ >>> dd['gas', 'density'].units
+ g/cm**3
+
+It is straightforward to convert data to different units:
+
+ >>> dd['gas', 'density'].to('Msun/kpc**3')
+ unyt_array([7.28103608e+00, 7.29921182e+00, 7.29654424e+00, ...,
+ 1.66785569e+06, 2.35761291e+06, 1.62272618e+07], 'Msun/kpc**3')
+
+For more details about working with ``unyt_array``, see the `the documentation
+`__ for ``unyt``.
+
+Applying Units to Data
+----------------------
+
+A ``unyt_array`` can be created from a list, tuple, or NumPy array using
+multiplication with a ``Unit`` object. For convenience, each yt dataset has a
+``units`` attribute one can use to obtain unit objects for this purpose:
+
+ >>> data = np.random.random((100, 100))
+ >>> data_with_units = data * ds.units.gram
+
+All units known to the dataset will be available via ``ds.units``, including
+code units and comoving units.
+
+Derived Field Units
+-------------------
+
+Special care often needs to be taken to ensure the result of a derived field
+will come out in the correct units. The yt unit system will double-check for you
+to make sure you are not accidentally making a unit conversion mistake. To see
+what that means in practice, let's define a derived field corresponding to the
+square root of the gas density:
+
+ >>> import yt
+ >>> import numpy as np
+
+ >>> def root_density(field, data):
+ ... return np.sqrt(data['gas', 'density'])
+
+ >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+ >>> ds.add_field(("gas", "root_density"), units="(g/cm**3)**(1/2)",
+ ... function=root_density, sampling_type='cell')
+
+ >>> ad = ds.all_data()
+ >>> ad['gas', 'root_density']
+ unyt_array([7.01979425e-16, 7.02855059e-16, 7.02726614e-16, ...,
+ 3.35975050e-13, 3.99451486e-13, 1.04797377e-12], 'sqrt(g)/cm**(3/2)')
+
+No special unit logic needs to happen inside of the function: the result of
+``np.sqrt`` will have the correct units:
+
+ >>> np.sqrt(ad['gas', 'density'])
+ unyt_array([7.01979425e-16, 7.02855059e-16, 7.02726614e-16, ...,
+ 3.35975050e-13, 3.99451486e-13, 1.04797377e-12], 'sqrt(g)/cm**(3/2)')
+
+One could also specify any other units that have dimensions of square root of
+density and yt would automatically convert the return value of the field
+function to the specified units. An error would be raised if the units are not
+dimensionally equivalent to the return value of the field function.
+
+Code Units
+----------
+
+All yt datasets are associated with a "code" unit system that corresponds to
+whatever unit system the data is represented in on-disk. Let's take a look at
+the data in an Enzo simulation, specifically the ``("enzo", "Density")`` field:
+
+ >>> import yt
+ >>> ds = yt.load('Enzo_64/DD0043/data0043')
+ >>> ad = ds.all_data()
+ >>> ad["enzo", "Density"]
+ unyt_array([6.74992726e-02, 6.12111635e-02, 8.92988636e-02, ...,
+ 9.09875931e+01, 5.66932465e+01, 4.27780263e+01], 'code_mass/code_length**3')
+
+we see we get back data from yt in units of ``code_mass/code_length**3``. This
+is the density unit formed out of the base units of mass and length in the
+internal unit system in the simulation. We can see the values of these units by
+looking at the ``length_unit`` and ``mass_unit`` attributes of the dataset
+object:
+
+ >>> ds.length_unit
+ unyt_quantity(128, 'Mpccm/h')
+ >>> ds.mass_unit
+ unyt_quantity(4.89045159e+50, 'g')
+
+And we can see that both of these have values of 1 in the code unit system.
+
+ >>> ds.length_unit.to('code_length')
+ unyt_quantity(1., 'code_length')
+ >>> ds.mass_unit.to('code_mass')
+ unyt_quantity(1., 'code_mass')
+
+In addition to ``length_unit`` and ``mass_unit``, there are also ``time_unit``,
+``velocity_unit``, and ``magnetic_unit`` attributes for this dataset. Some
+frontends also define a ``density_unit``, ``pressure_unit``,
+``temperature_unit``, and ``specific_energy`` attribute. If these are not defined
+then the corresponding unit is calculated from the base length, mass, and time unit.
+Each of these attributes corresponds to a unit in the code unit system:
+
+ >>> [un for un in dir(ds.units) if un.startswith('code')]
+ ['code_density',
+ 'code_length',
+ 'code_magnetic',
+ 'code_mass',
+ 'code_metallicity',
+ 'code_pressure',
+ 'code_specific_energy',
+ 'code_temperature',
+ 'code_time',
+ 'code_velocity']
+
+You can use these unit names to convert arbitrary data into a dataset's code
+unit system:
+
+ >>> u = ds.units
+ >>> data = 10**-30 * u.g / u.cm**3
+ >>> data.to('code_density')
+ unyt_quantity(0.36217187, 'code_density')
+
+Note how in this example we used ``ds.units`` instead of the top-level ``unyt``
+namespace or ``yt.units``. This is because the units from ``ds.units`` know
+about the dataset's code unit system and can convert data into it. Unit objects
+from ``unyt`` or ``yt.units`` will not know about any particular dataset's unit
+system.
+
+Comoving units for Cosmological Simulations
+-------------------------------------------
+
+The length unit of the dataset I used above uses a cosmological unit:
+
+ >>> print(ds.length_unit)
+ 128 Mpccm/h
+
+In English, this says that the length unit is 128 megaparsecs in the comoving
+frame, scaled as if the hubble constant were 100 km/s/Mpc. Although :math:`h`
+isn't really a unit, yt treats it as one for the purposes of the unit system.
+
+As an aside, `Darren Croton's research note `_
+on the history, use, and interpretation of :math:`h` as it appears in the
+astronomical literature is pretty much required reading for anyone who has to
+deal with factors of :math:`h` every now and then.
+
+In yt, comoving length unit symbols are named following the pattern ``< length
+unit >cm``, i.e. ``pccm`` for comoving parsec or ``mcm`` for a comoving
+meter. A comoving length unit is different from the normal length unit by a
+factor of :math:`(1+z)`:
+
+ >>> u = ds.units
+ >>> print((1*u.Mpccm)/(1*u.Mpc))
+ 0.9986088499304777 dimensionless
+ >>> 1 / (1 + ds.current_redshift)
+ 0.9986088499304776
+
+As we saw before, h is treated like any other unit symbol. It has dimensionless
+units, just like a scalar:
+
+ >>> (1*u.Mpc)/(1*u.Mpc/u.h)
+ unyt_quantity(0.71, '(dimensionless)')
+ >>> ds.hubble_constant
+ 0.71
+
+Using parsec as an example,
+
+ * ``pc``
+ Proper parsecs, :math:`\rm{pc}`.
+
+ * ``pccm``
+ Comoving parsecs, :math:`\rm{pc}/(1+z)`.
+
+ * ``pccm/h``
+ Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`.
+
+ * ``pc/h``
+ Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`.
+
+Overriding Code Unit Defintions
+-------------------------------
+
+On occasion, you might have a dataset for a supported frontend that does not
+have the conversions to code units accessible or you may want to change them
+outright. ``yt`` provides a mechanism so that one may provide their own code
+unit definitions to ``yt.load``, which override the default rules for a given
+frontend for defining code units.
+
+This is provided through the ``units_override`` argument to ``yt.load``. We'll
+use an example of an Athena dataset. First, a call to ``yt.load`` without
+``units_override``:
+
+ >>> ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk")
+ >>> ds.length_unit
+ unyt_quantity(1., 'cm')
+ >>> ds.mass_unit
+ unyt_quantity(1., 'g')
+ >>> ds.time_unit
+ unyt_quantity(1., 's')
+ >>> sp1 = ds1.sphere("c", (0.1, "unitary"))
+ >>> print(sp1["gas", "density"])
+ [0.05134981 0.05134912 0.05109047 ... 0.14608461 0.14489453 0.14385277] g/cm**3
+
+This particular simulation is of a galaxy cluster merger so these density values
+are way, way too high. This is happening because Athena does not encode any
+information about the unit system used in the simulation or the output data, so
+yt cannot infer that information and must make an educated guess. In this case
+it incorrectly assumes the data are in CGS units.
+
+However, we know `a priori` what the unit system *should* be, and we can supply
+a ``units_override`` dictionary to ``yt.load`` to override the incorrect
+assumptions yt is making about this dataset. Let's define:
+
+ >>> units_override = {"length_unit": (1.0, "Mpc"),
+ ... "time_unit": (1.0, "Myr"),
+ ... "mass_unit": (1.0e14, "Msun")}
+
+The ``units_override`` dictionary can take the following keys:
+
+ * ``length_unit``
+ * ``time_unit``
+ * ``mass_unit``
+ * ``magnetic_unit``
+ * ``temperature_unit``
+
+and the associated values can be ``(value, "unit")`` tuples, ``unyt_quantity``
+instances, or floats (in the latter case they are assumed to have the
+corresponding cgs unit). Now let's reload the dataset using our
+``units_override`` dict:
+
+ >>> ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
+ ... units_override=units_override)
+ >>> sp = ds.sphere("c",(0.1,"unitary"))
+ >>> print(sp["gas", "density"])
+ [3.47531683e-28 3.47527018e-28 3.45776515e-28 ... 9.88689766e-28
+ 9.80635384e-28 9.73584863e-28] g/cm**3
+
+and we see how the data now have much more sensible values for a galaxy cluster
+merge simulation.
+
+Comparing Units From Different Simulations
+------------------------------------------
+
+The code units from different simulations will have different conversions to
+physical coordinates. This can get confusing when working with data from more
+than one simulation or from a single simulation where the units change with
+time.
+
+As an example, let's load up two enzo datasets from different redshifts in the
+same cosmology simulation, one from high redshift:
+
+ >>> ds1 = yt.load('Enzo_64/DD0002/data0002')
+ >>> ds1.current_redshift
+ 7.8843748886903
+ >>> ds1.length_unit
+ unyt_quantity(128, 'Mpccm/h')
+ >>> ds1.length_unit.in_cgs()
+ unyt_quantity(6.26145538e+25, 'cm')
+
+And another from low redshift:
+
+ >>> ds2 = yt.load('Enzo_64/DD0043/data0043')
+ >>> ds2.current_redshift
+ 0.0013930880640796
+ >>> ds2.length_unit
+ unyt_quantity(128, 'Mpccm/h')
+ >>> ds2.length_unit.in_cgs()
+ unyt_quantity(5.55517285e+26, 'cm')
+
+Now despite the fact that ``'Mpccm/h'`` means different things for the two
+datasets, it's still a well-defined operation to take the ratio of the two
+length units:
+
+ >>> ds2.length_unit / ds1.length_unit
+ unyt_quantity(8.87201539, '(dimensionless)')
+
+Because code units and comoving units are defined relative to a physical unit
+system, ``unyt`` is able to give the correct answer here. So long as the result
+comes out dimensionless or in a physical unit then the answer will be
+well-defined. However, if we want the answer to come out in the internal units
+of one particular dataset, additional care must be taken. For an example where
+this might be an issue, let's try to compute the sum of two comoving distances
+from each simulation:
+
+ >>> d1 = 12 * ds1.units.Mpccm
+ >>> d2 = 12 * ds2.units.Mpccm
+ >>> d1 + d2
+ unyt_quantity(118.46418468, 'Mpccm')
+ >>> d2 + d1
+ unyt_quantity(13.35256754, 'Mpccm')
+
+So this is definitely weird - addition appears to not be associative anymore!
+However, both answers are correct, the confusion is arising because ``"Mpccm"``
+is ambiguous in these expressions. In situations like this, ``unyt`` will use
+the definition for units from the leftmost term in an expression, so the first
+example is returning data in high-redshift comoving megaparsecs, while the
+second example returns data in low-redshift comoving megaparsecs.
+
+Wherever possible it's best to do calculations in physical units when working
+with more than one dataset. If you need to use comoving units or code units then
+extra care must be taken in your code to avoid ambiguity.
+
diff --git a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
deleted file mode 100644
index 9cf99769b2f..00000000000
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ /dev/null
@@ -1,744 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Dimensional analysis"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The fastest way to get into the unit system is to explore the quantities that live in the `yt.units` namespace:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import meter, gram, kilogram, second, joule\n",
- "print (kilogram*meter**2/second**2 == joule)\n",
- "print (kilogram*meter**2/second**2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import m, kg, s, W\n",
- "kg*m**2/s**3 == W"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import kilometer\n",
- "three_kilometers = 3*kilometer\n",
- "print (three_kilometers)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import gram, kilogram\n",
- "print (gram+kilogram)\n",
- "\n",
- "print (kilogram+gram)\n",
- "\n",
- "print (kilogram/gram)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "These unit symbols are all instances of a new class we've added to yt 3.0, `YTQuantity`. `YTQuantity` is useful for storing a single data point."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "type(kilogram)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We also provide `YTArray`, which can store arrays of quantities:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "arr = [3,4,5]*kilogram\n",
- "\n",
- "print (arr)\n",
- "\n",
- "print (type(arr))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Creating arrays and quantities"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Most people will interact with the new unit system using `YTArray` and `YTQuantity`. These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`. `YTArray` is intended to store array data, while `YTQuantity` is intended to store scalars in a particular unit system.\n",
- "\n",
- "There are two ways to create arrays and quantities. The first is to explicitly create it by calling the class constructor and supplying a unit string:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units.yt_array import YTArray\n",
- "\n",
- "sample_array = YTArray([1,2,3], 'g/cm**3')\n",
- "\n",
- "print (sample_array)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The unit string can be an arbitrary combination of metric unit names. Just a few examples:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units.yt_array import YTQuantity\n",
- "from yt.units import kboltz\n",
- "from numpy.random import random\n",
- "import numpy as np\n",
- "\n",
- "print (\"Length:\")\n",
- "print (YTQuantity(random(), 'm'))\n",
- "print (YTQuantity(random(), 'cm'))\n",
- "print (YTQuantity(random(), 'Mpc'))\n",
- "print (YTQuantity(random(), 'AU'))\n",
- "print ('')\n",
- "\n",
- "print (\"Time:\")\n",
- "print (YTQuantity(random(), 's'))\n",
- "print (YTQuantity(random(), 'min'))\n",
- "print (YTQuantity(random(), 'hr'))\n",
- "print (YTQuantity(random(), 'day'))\n",
- "print (YTQuantity(random(), 'yr'))\n",
- "print ('')\n",
- "\n",
- "print (\"Mass:\")\n",
- "print (YTQuantity(random(), 'g'))\n",
- "print (YTQuantity(random(), 'kg'))\n",
- "print (YTQuantity(random(), 'Msun'))\n",
- "print ('')\n",
- "\n",
- "print (\"Energy:\")\n",
- "print (YTQuantity(random(), 'erg'))\n",
- "print (YTQuantity(random(), 'g*cm**2/s**2'))\n",
- "print (YTQuantity(random(), 'eV'))\n",
- "print (YTQuantity(random(), 'J'))\n",
- "print ('')\n",
- "\n",
- "print (\"Temperature:\")\n",
- "print (YTQuantity(random(), 'K'))\n",
- "print ((YTQuantity(random(), 'eV')/kboltz).in_cgs())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Dimensional arrays and quantities can also be created by multiplication with another array or quantity:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import kilometer\n",
- "print (kilometer)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "three_kilometers = 3*kilometer\n",
- "print (three_kilometers)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When working with a YTArray with complicated units, you can use `unit_array` and `unit_quantity` to conveniently apply units to data:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "test_array = YTArray(np.random.random(20), 'erg/s')\n",
- "\n",
- "print (test_array)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`unit_quantity` returns a `YTQuantity` with a value of 1.0 and the same units as the array it is a attached to."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (test_array.unit_quantity)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`unit_array` returns a `YTArray` with the same units and shape as the array it is a attached to and with all values set to 1.0."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (test_array.unit_array)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "These are useful when doing arithmetic:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (test_array + 1.0*test_array.unit_quantity)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (test_array + np.arange(20)*test_array.unit_array)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`. You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (test_array.uq)\n",
- "\n",
- "print (test_array.unit_quantity == test_array.uq)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from numpy import array_equal\n",
- "\n",
- "print (test_array.ua)\n",
- "\n",
- "print (array_equal(test_array.ua, test_array.unit_array))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Unit metadata is encoded in the `units` attribute that hangs off of `YTArray` or `YTQuantity` instances:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import kilometer, erg\n",
- "\n",
- "print (\"kilometer's units:\", kilometer.units)\n",
- "print (\"kilometer's dimensions:\", kilometer.units.dimensions)\n",
- "\n",
- "print ('')\n",
- "\n",
- "print (\"erg's units:\", erg.units)\n",
- "print (\"erg's dimensions: \", erg.units.dimensions)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Arithmetic with `YTQuantity` and `YTArray`"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Of course it wouldn't be very useful if all we could do is create data with units. The real power of the new unit system is that we can add, subtract, multiply, and divide using quantities and dimensional arrays:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "a = YTQuantity(3, 'cm')\n",
- "b = YTQuantity(3, 'm')\n",
- "\n",
- "print (a+b)\n",
- "print (b+a)\n",
- "print ('')\n",
- "\n",
- "print ((a+b).in_units('ft'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "a = YTQuantity(42, 'mm')\n",
- "b = YTQuantity(1, 's')\n",
- "\n",
- "print (a/b)\n",
- "print ((a/b).in_cgs())\n",
- "print ((a/b).in_mks())\n",
- "print ((a/b).in_units('km/s'))\n",
- "print ('')\n",
- "\n",
- "print (a*b)\n",
- "print ((a*b).in_cgs())\n",
- "print ((a*b).in_mks())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "m = YTQuantity(35, 'g')\n",
- "a = YTQuantity(9.8, 'm/s**2')\n",
- "\n",
- "print (m*a)\n",
- "print ((m*a).in_units('dyne'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import G, kboltz\n",
- "\n",
- "print (\"Newton's constant: \", G)\n",
- "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n",
- "\n",
- "print (\"Boltzmann constant: \", kboltz)\n",
- "print (\"Boltzmann constant in MKS: \", kboltz.in_mks())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "rho = YTQuantity(1, 'g/cm**3')\n",
- "t_ff = (G*rho)**(-0.5)\n",
- "\n",
- "print (t_ff)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "An exception is raised if we try to do a unit operation that doesn't make any sense:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.utilities.exceptions import YTUnitOperationError\n",
- "\n",
- "a = YTQuantity(3, 'm')\n",
- "b = YTQuantity(5, 'erg')\n",
- "\n",
- "try:\n",
- " print (a+b)\n",
- "except YTUnitOperationError as e:\n",
- " print (e)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "A plain `ndarray` or a `YTArray` created with empty units is treated as a dimensionless quantity and can be used in situations where unit consistency allows it to be used: "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "a = YTArray([1.,2.,3.], 'm')\n",
- "b = np.array([2.,2.,2.])\n",
- "\n",
- "print (\"a: \", a)\n",
- "print (\"b: \", b)\n",
- "print (\"a*b: \", a*b)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "c = YTArray([2,2,2])\n",
- "\n",
- "print (\"c: \", c)\n",
- "print (\"a*c: \", a*c)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Saving and Loading `YTArray`s to/from disk"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### HDF5"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To write to HDF5, use `write_hdf5`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')\n",
- "my_temp = YTArray(np.random.random(10), 'K')\n",
- "my_dens.write_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
- "my_temp.write_hdf5(\"my_data.h5\", dataset_name=\"temperature\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.\n",
- "\n",
- "We can use the `from_hdf5` classmethod to read the data back in:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "read_dens = YTArray.from_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
- "print (read_dens)\n",
- "print (my_dens)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "my_vels = YTArray(np.random.normal(10), 'km/s')\n",
- "info = {\"source\":\"galaxy cluster\",\"user\":\"jzuhone\"}\n",
- "my_vels.write_hdf5(\"my_data.h5\", dataset_name=\"velocity\", info=info)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### ASCII"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import yt\n",
- "a = YTArray(np.random.random(size=10), \"cm\")\n",
- "b = YTArray(np.random.random(size=10), \"g\")\n",
- "c = YTArray(np.random.random(size=10), \"s\")\n",
- "yt.savetxt(\"my_data.dat\", [a,b,c], header='My cool data', footer='Data is over', delimiter=\"\\t\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%%bash \n",
- "more my_data.dat"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "bb, cc = yt.loadtxt(\"my_data.dat\", usecols=(1,2), delimiter=\"\\t\")\n",
- "print (bb)\n",
- "print (b)\n",
- "print ('')\n",
- "print (cc)\n",
- "print (c)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
deleted file mode 100644
index d35be58a9bf..00000000000
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ /dev/null
@@ -1,697 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "In the past, querying a data object with a field name returned a NumPy `ndarray` . In the new unit system, data object queries will return a `YTArray`, a subclass of `ndarray` that preserves all of the nice properties of `ndarray`, including broadcasting, deep and shallow copies, and views. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Selecting data from an object"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`YTArray` is 'unit-aware'. Let's show how this works in practice using a sample Enzo dataset:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import yt\n",
- "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
- "\n",
- "dd = ds.all_data()\n",
- "maxval, maxloc = ds.find_max('density')\n",
- "\n",
- "dens = dd['density']"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (maxval)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dens)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "mass = dd['cell_mass']\n",
- "\n",
- "print (\"Cell Masses in CGS: \\n\", mass, \"\\n\")\n",
- "print (\"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\")\n",
- "print (\"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\")\n",
- "print (\"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "dx = dd['dx']\n",
- "print (\"Cell dx in code units: \\n\", dx, \"\\n\")\n",
- "print (\"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\")\n",
- "print (\"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\")\n",
- "print (\"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Unit conversions"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
- "\n",
- "* `in_units`\n",
- "* `in_cgs`\n",
- "* `in_mks`\n",
- "* `in_base`\n",
- "* `convert_to_units`\n",
- "* `convert_to_cgs`\n",
- "* `convert_to_mks`\n",
- "* `convert_to_base`"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The first method, `in_units`, returns a copy of the array in the units denoted by a string argument:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['density'].in_units('Msun/pc**3'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`in_cgs` and `in_mks` return a copy of the array converted to CGS and MKS units, respectively:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['pressure'])\n",
- "print (dd['pressure'].in_cgs())\n",
- "print (dd['pressure'].in_mks())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['pressure'].in_base('imperial')) # Imperial/English base units\n",
- "print (dd['pressure'].in_base('galactic')) # Base units of kpc, Msun, Myr\n",
- "print (dd['pressure'].in_base('planck')) # Base units in the Planck system\n",
- "print (dd['pressure'].in_base()) # defaults to cgs if no argument given"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`in_base` also takes the `\"code\"` argument to convert the `YTArray` into the base units of the dataset:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['pressure'].in_base(\"code\")) # The IsolatedGalaxy dataset from above"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The rest of the methods do in-place conversions:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "dens = dd['density']\n",
- "print (dens)\n",
- "\n",
- "dens.convert_to_units('Msun/pc**3')\n",
- "print (dens)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "One possibly confusing wrinkle when using in-place conversions is if you try to query `dd['density']` again, you'll find that it has been converted to solar masses per cubic parsec:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['density'])\n",
- "\n",
- "dens.convert_to_units('g/cm**3')\n",
- "\n",
- "print (dens)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n",
- "\n",
- "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy. This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['cell_mass'])\n",
- "print (dd['cell_volume'].in_units('cm**3'))\n",
- "\n",
- "print ((dd['cell_mass']/dd['cell_volume']).in_cgs())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Electrostatic/Electromagnetic Units"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Electromagnetic units can be a bit tricky, because the units for such quantities in different unit systems can have entirely different dimensions, even if they are meant to represent the same physical quantities. For example, in the SI system of units, current in Amperes is a fundamental unit of measure, so the unit of charge \"coulomb\" is equal to one ampere-second. On the other hand, in the Gaussian/CGS system, there is no equivalent base electromagnetic unit, and the electrostatic charge unit \"esu\" is equal to one $\\mathrm{cm^{3/2}g^{-1/2}s^{-1}}$ (which does not have any apparent physical significance). `yt` recognizes this difference:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
- "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
- "\n",
- "print (\"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions)\n",
- "print (\"units =\", q2.in_cgs().units, \", dims =\", q2.units.dimensions)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
- "B2 = yt.YTArray(1.0,\"gauss\") # gauss\n",
- "\n",
- "print (\"units =\", B1.in_mks().units, \", dims =\", B1.units.dimensions)\n",
- "print (\"units =\", B2.in_cgs().units, \", dims =\", B2.units.dimensions)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To convert between these two systems, use [Unit Equivalencies](unit_equivalencies.html)."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Working with views and converting to ndarray"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "There are two ways to convert the data into a numpy array. The most straightforward and safe way to do this is to create a copy of the array data. The following cell demonstrates four equivalent ways of doing this, in increasing degree of terseness."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "\n",
- "dens = dd['cell_mass']\n",
- "\n",
- "print (dens.to_ndarray())\n",
- "print (np.array(dens))\n",
- "print (dens.value)\n",
- "print (dens.v)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Since we have a copy of the data, we can mess with it however we wish without disturbing the original data returned by the yt data object.\n",
- "\n",
- "There is yet another way to return a copy of the array data in a `YTArray` or the floating-point value of a `YTQuantity`, which also allows for the possibility to convert to different units. This is done using the `to_value` method:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print(dens.to_value()) # Don't change units\n",
- "print(dens.to_value(\"Msun\")) # Change units to solar masses\n",
- "print(dens[0].to_value(\"lbm\")) # Pick the first value and change its units to pounds"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Another way to touch the raw array data is to get a _view_. A numpy view is a lightweight array interface to a memory buffer. There are four ways to create views of YTArray instances:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd['cell_mass'].ndarray_view())\n",
- "print (dd['cell_mass'].view(np.ndarray))\n",
- "print (dd['cell_mass'].ndview)\n",
- "print (dd['cell_mass'].d)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When working with views, remember that you are touching the raw array data and no longer have any of the unit checking provided by the unit system. This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "density_values = dd['density'].d\n",
- "density_values[0:10] = 0\n",
- "\n",
- "# The original array was updated\n",
- "print (dd['density'])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Round-Trip Conversions to and from Other Unit Systems"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](https://astropy.readthedocs.io/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Some examples of converting from AstroPy units to yt:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from astropy import units as u\n",
- "\n",
- "x = 42.0 * u.meter\n",
- "y = yt.YTQuantity.from_astropy(x)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (x, type(x))\n",
- "print (y, type(y))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "a = np.random.random(size=10) * u.km/u.s\n",
- "b = yt.YTArray.from_astropy(a)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (a, type(a))\n",
- "print (b, type(b))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "temp = dd[\"temperature\"]\n",
- "atemp = temp.to_astropy()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (temp, type(temp))\n",
- "print (atemp, type(atemp))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "and quantities:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from yt.units import kboltz\n",
- "kb = kboltz.to_astropy()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (kboltz, type(kboltz))\n",
- "print (kb, type(kb))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "As a sanity check, you can show that it works round-trip:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "k1 = kboltz.to_astropy()\n",
- "k2 = yt.YTQuantity.from_astropy(kb)\n",
- "print(k1)\n",
- "print(k2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "c = yt.YTArray.from_astropy(a)\n",
- "d = c.to_astropy()\n",
- "print (a == d)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can also do the same thing with unitful quantities from the [Pint package](https://pint.readthedocs.org), using essentially the same procedure:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "from pint import UnitRegistry\n",
- "ureg = UnitRegistry()\n",
- "v = 1000.*ureg.km/ureg.s\n",
- "w = yt.YTQuantity.from_pint(v)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (v, type(v))\n",
- "print (w, type(w))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "ptemp = temp.to_pint()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (temp, type(temp))\n",
- "print (ptemp, type(ptemp))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Defining New Units"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "yt also provides a way to define your own units. Suppose you wanted to define a new unit for \"miles per hour\", the familiar \"mph\", which is not already in yt. One can do this by calling `yt.define_unit()`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "yt.define_unit(\"mph\", (1.0, \"mile/hr\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Once this unit is defined, it can be used in the same way as any other unit:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from yt.units import clight\n",
- "print (clight.to('mph'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If you want to define a new unit which is prefixable (like SI units), you can set `prefixable=True` when defining the unit:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from yt import YTQuantity\n",
- "yt.define_unit(\"L\", (1000.0, \"cm**3\"), prefixable=True)\n",
- "print (YTQuantity(1.0, \"mL\").to(\"cm**3\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`yt.define_unit()` defines new units for all yt operations. However, new units can be defined for particular datasets only as well using `ds.define_unit()`, which has the same signature:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "ds.define_unit(\"M_star\", (2.0e13, \"Msun\"))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "dd = ds.all_data()\n",
- "print(dd.quantities.total_mass().to(\"M_star\"))"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python [default]",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
deleted file mode 100644
index 8f16e1261a0..00000000000
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ /dev/null
@@ -1,433 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "In yt 3.0, we want to make it easier to access \"raw\" simulation data that a code writes directly to disk. The new unit system makes it much simpler to convert back and forth between physical coordinates and the unscaled \"raw\" coordinate system used internally in the simulation code. In some cases, this conversion involves transforming to comoving coordinates, so that is also covered here."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Code units"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import yt\n",
- "ds = yt.load('Enzo_64/DD0043/data0043')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object. Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "reg = ds.unit_registry\n",
- "\n",
- "for un in reg.keys():\n",
- " if un.startswith('code_'):\n",
- " fmt_tup = (un, reg.lut[un][0], str(reg.lut[un][1]))\n",
- " print (\"Unit name: {:<15}\\nCGS conversion: {:<15}\\nDimensions: {:<15}\\n\".format(*fmt_tup))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "fmt_tup"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Most of the time you will not have to deal with the unit registry. For example, the conversion factors to code units are stored as attributes of the dataset object:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (\"Length unit: \", ds.length_unit)\n",
- "print (\"Time unit: \", ds.time_unit)\n",
- "print (\"Mass unit: \", ds.mass_unit)\n",
- "print (\"Velocity unit: \", ds.velocity_unit)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Conversion factors will be supplied in CGS by default. We can also ask what the conversion factors are in code units."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (\"Length unit: \", ds.length_unit.in_units('code_length'))\n",
- "print (\"Time unit: \", ds.time_unit.in_units('code_time'))\n",
- "print (\"Mass unit: \", ds.mass_unit.in_units('code_mass'))\n",
- "print (\"Velocity unit: \", ds.velocity_unit.in_units('code_velocity'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "as expected, all the conversion factors are unity in code units."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can also play with unit conversions on `ds.domain_width`. First, we see for enzo how code length units are defined relative to the domain width:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds.domain_width"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds.domain_width.in_cgs()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds.domain_width.in_units('Mpccm/h')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Comoving units"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "This last example uses a cosmological unit. In English, I asked for the domain width in comoving megaparsecs, scaled as if the hubble constant were 100 km/s/Mpc. Although $h$ isn't really a unit, yt treats it as one for the purposes of the unit system. \n",
- "\n",
- "As an aside, Darren Croton's [research note](https://arxiv.org/abs/1308.4150) on the history, use, and interpretation of $h$ as it appears in the astronomical literature is pretty much required reading for anyone who has to deal with factors of $h$ every now and then.\n",
- "\n",
- "In yt, comoving length unit symbols are named following the pattern `(length symbol)cm`, i.e. `pccm` for comoving parsec or `mcm` for a comoving meter. A comoving length unit is different from the normal length unit by a factor of $(1+z)$:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "z = ds.current_redshift\n",
- " \n",
- "print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpccm'))\n",
- "print (1+z)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "As we saw before, $h$ is treated like any other unit symbol. It has `dimensionless` units, just like a scalar:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpc/h'))\n",
- "print (ds.hubble_constant)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "These units can be used in readily used in plots and anywhere a length unit is appropriate in yt."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc = yt.SlicePlot(ds, 0, 'density', width=(128, 'Mpccm/h'))\n",
- "slc.set_figure_size(6)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### The unit registry"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When you create a `YTArray` without referring to a unit registry, yt uses the default unit registry, which does not include code units or comoving units."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt import YTQuantity\n",
- "\n",
- "a = YTQuantity(3, 'cm')\n",
- "\n",
- "print (a.units.registry.keys())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When a dataset is loaded, yt infers conversion factors from the internal simulation unit system to the CGS unit system. These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols. For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()]))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
- "\n",
- "* `ds.arr()`\n",
- "* `ds.quan()`\n",
- "\n",
- "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units. For example:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "a = ds.quan(3, 'code_length')\n",
- "\n",
- "print (a)\n",
- "print (a.in_cgs())\n",
- "print (a.in_units('Mpccm/h'))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
- "print (b)\n",
- "print (b.in_cgs())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Overriding Code Unit Definitions"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n",
- "print (ds1.length_unit)\n",
- "print (ds1.mass_unit)\n",
- "print (ds1.time_unit)\n",
- "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n",
- "print (sp1[\"density\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n",
- " \"time_unit\":(1.0,\"Myr\"),\n",
- " \"mass_unit\":(1.0e14,\"Msun\")}"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "`units_override` can take the following keys:\n",
- "\n",
- "* `length_unit`\n",
- "* `time_unit`\n",
- "* `mass_unit`\n",
- "* `magnetic_unit`\n",
- "* `temperature_unit`\n",
- "\n",
- "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n",
- "print (ds2.length_unit)\n",
- "print (ds2.mass_unit)\n",
- "print (ds2.time_unit)\n",
- "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n",
- "print (sp2[\"density\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. "
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
deleted file mode 100644
index f24b98057b3..00000000000
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ /dev/null
@@ -1,125 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets. Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional array instances produced by different unit systems. Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
- "\n",
- "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# A high redshift output from z ~ 8\n",
- "import yt\n",
- "\n",
- "ds1 = yt.load('Enzo_64/DD0002/data0002')\n",
- "print (\"z = %s\" % ds1.current_redshift)\n",
- "print (\"Internal length units = %s\" % ds1.length_unit)\n",
- "print (\"Internal length units in cgs = %s\" % ds1.length_unit.in_cgs())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# A low redshift output from z ~ 0\n",
- "ds2 = yt.load('Enzo_64/DD0043/data0043')\n",
- "print (\"z = %s\" % ds2.current_redshift)\n",
- "print (\"Internal length units = %s\" % ds2.length_unit)\n",
- "print (\"Internal length units in cgs = %s\" % ds2.length_unit.in_cgs())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Given that these are from the same simulation in comoving units, the CGS length units are different by a factor of $(1+z_1)/(1+z_2)$:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (ds2.length_unit.in_cgs()/ds1.length_unit.in_cgs() == (1+ds1.current_redshift)/(1+ds2.current_redshift))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "It's not necessary to convert to CGS units either. yt will automatically account for the fact that a comoving megaparsec in the first output is physically different compared to a comoving megaparsec in the second output."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (ds2.length_unit/ds1.length_unit)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Time series analysis is also straightforward. Since dimensional arrays and quantities carry around the conversion factors to CGS with them, we can safely pickle them, share them with other processors, or combine them without worrying about differences in unit definitions.\n",
- "\n",
- "The following snippet, which iterates over a time series and saves the `length_unit` quantity to a storage dictionary. This should work correctly on one core or in a script run in parallel."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import yt\n",
- "yt.enable_parallelism()\n",
- "\n",
- "ts = yt.load(\"Enzo_64/DD????/data????\")\n",
- "\n",
- "storage = {}\n",
- "\n",
- "for sto, ds in ts.piter(storage=storage):\n",
- " sto.result_id = float(ds.current_time.in_units('Gyr'))\n",
- " sto.result = ds.length_unit\n",
- "\n",
- "if yt.is_root():\n",
- " for t in sorted(storage.keys()):\n",
- " print (t, storage[t].in_units('Mpc'))"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3.0
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/doc/source/analyzing/units/5)_Units_and_plotting.ipynb b/doc/source/analyzing/units/5)_Units_and_plotting.ipynb
deleted file mode 100644
index f4fe9b49479..00000000000
--- a/doc/source/analyzing/units/5)_Units_and_plotting.ipynb
+++ /dev/null
@@ -1,185 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "It's now easy to adjust the units of a field you are plotting.\n",
- "\n",
- "> Note: the following examples use `SlicePlot`, but the same thing should work for `ProjectionPlot`, `OffAxisSlicePlot`, and `OffAxisProjectionPlot`."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "First, let's create a new `SlicePlot`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import yt\n",
- "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
- "slc = yt.SlicePlot(ds, 2, 'density', center=[0.5, 0.5, 0.5], width=(15, 'kpc'))\n",
- "slc.set_figure_size(6)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The units used to scale the colorbar can be adjusted by calling the `set_unit` function that is attached to the plot object. This example creates a plot of density in code units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc.set_unit('density', 'code_mass/code_length**3')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "This example creates a plot of gas density in solar masses per cubic parsec:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc.set_unit('density', 'Msun/pc**3')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The `set_unit` function will accept any unit string that is dimensionally equivalent to the plotted field. If it is supplied a unit that is not dimensionally equivalent, it will raise an error:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.utilities.exceptions import YTUnitConversionError\n",
- "\n",
- "try:\n",
- " slc.set_unit('density', 'Msun')\n",
- "except YTUnitConversionError as e:\n",
- " print (e)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Similarly, set_unit is defined for `ProfilePlot` and `PhasePlot` instances as well.\n",
- "\n",
- "To illustrate this point, let's first create a new `ProfilePlot`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "dd = ds.all_data()\n",
- "plot = yt.ProfilePlot(dd, 'density', 'temperature', weight_field='cell_mass')\n",
- "plot.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "And adjust the unit of the y-axis:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "plot.set_unit('density', 'Msun/pc**3')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Similarly for PhasePlot:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "plot = yt.PhasePlot(dd, 'density', 'temperature', 'cell_mass')\n",
- "plot.set_figure_size(6)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "plot.set_unit('cell_mass', 'Msun')\n",
- "plot.set_unit('density', 'Msun/pc**3')"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
deleted file mode 100644
index 62a0617daae..00000000000
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ /dev/null
@@ -1,318 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n",
- "\n",
- "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n",
- "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n",
- "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n",
- "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n",
- "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n",
- "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n",
- "\n",
- "The following unit equivalencies only apply under conditions applicable for an ideal gas with a constant mean molecular weight $\\mu$ and ratio of specific heats $\\gamma$:\n",
- "\n",
- "* `\"number_density\"`: conversions between density and number density ($n = \\rho/\\mu{m_p}$)\n",
- "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n",
- "\n",
- "A `YTArray` or `YTQuantity` can be converted to an equivalent using `in_units` (previously described in [Fields and Unit Conversion](fields_and_unit_conversion.html)), where the unit and the equivalence name are provided as additional arguments:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import yt\n",
- "from yt import YTQuantity\n",
- "import numpy as np\n",
- "\n",
- "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
- "\n",
- "dd = ds.all_data()\n",
- "\n",
- "print (dd[\"temperature\"].in_units(\"erg\", equivalence=\"thermal\"))\n",
- "print (dd[\"temperature\"].in_units(\"eV\", equivalence=\"thermal\"))\n",
- "\n",
- "# Rest energy of the proton\n",
- "from yt.units import mp\n",
- "E_p = mp.in_units(\"GeV\", equivalence=\"mass_energy\")\n",
- "print (E_p)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Most equivalencies can go in both directions, without any information required other than the unit you want to convert to (this is not the case for the electromagnetic equivalencies, which we'll discuss later):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from yt.units import clight\n",
- "v = 0.1*clight\n",
- "g = v.in_units(\"dimensionless\", equivalence=\"lorentz\")\n",
- "print (g)\n",
- "print (g.in_units(\"c\", equivalence=\"lorentz\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The previously described `to_value` method, which works like `in_units` except that it returns a bare NumPy array or floating-point number, also accepts equivalencies:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd[\"temperature\"].to_value(\"erg\", equivalence=\"thermal\"))\n",
- "print (mp.to_value(\"GeV\", equivalence=\"mass_energy\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Special Equivalencies"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Some equivalencies can take supplemental information. The `\"number_density\"` equivalence can take a custom mean molecular weight (default is $\\mu = 0.6$):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd[\"density\"].max())\n",
- "print (dd[\"density\"].in_units(\"cm**-3\", equivalence=\"number_density\").max())\n",
- "print (dd[\"density\"].in_units(\"cm**-3\", equivalence=\"number_density\", mu=0.75).max())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The `\"sound_speed\"` equivalence optionally takes the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$ (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (dd[\"temperature\"].in_units(\"km/s\", equivalence=\"sound_speed\").mean())\n",
- "print (dd[\"temperature\"].in_units(\"km/s\", equivalence=\"sound_speed\", gamma=4./3., mu=0.5).mean())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "These options must be used with caution, and only if you know the underlying data adheres to these assumptions!"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Electromagnetic Equivalencies"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Special, one-way equivalencies exist for converting between electromagnetic units in the cgs and SI unit systems. These exist since in the cgs system, electromagnetic units are comprised of the base units of seconds, grams and centimeters, whereas in the SI system Ampere is a base unit. For example, the dimensions of charge are completely different in the two systems:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "Q1 = YTQuantity(1.0,\"C\")\n",
- "Q2 = YTQuantity(1.0,\"esu\")\n",
- "print (\"Q1 dims =\", Q1.units.dimensions)\n",
- "print (\"Q2 dims =\", Q2.units.dimensions)\n",
- "print (\"Q1 base units =\", Q1.in_mks())\n",
- "print (\"Q2 base units =\", Q2.in_cgs())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To convert from a cgs unit to an SI unit, use the \"SI\" equivalency:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from yt.units import qp # the elementary charge in esu\n",
- "qp_SI = qp.in_units(\"C\", equivalence=\"SI\") # convert to Coulombs\n",
- "print (qp)\n",
- "print (qp_SI)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To convert from an SI unit to a cgs unit, use the \"CGS\" equivalency:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "B = YTQuantity(1.0,\"T\") # magnetic field in Tesla\n",
- "print (B, B.in_units(\"gauss\", equivalence=\"CGS\")) # convert to Gauss"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Equivalencies exist between the SI and cgs dimensions of charge, current, magnetic field, electric potential, and resistance. As a neat example, we can convert current in Amperes and resistance in Ohms to their cgs equivalents, and then use them to calculate the \"Joule heating\" of a conductor with resistance $R$ and current $I$:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "I = YTQuantity(1.0,\"A\")\n",
- "I_cgs = I.in_units(\"statA\", equivalence=\"CGS\")\n",
- "R = YTQuantity(1.0,\"ohm\")\n",
- "R_cgs = R.in_units(\"statohm\", equivalence=\"CGS\")\n",
- "P = I**2*R\n",
- "P_cgs = I_cgs**2*R_cgs"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The dimensions of current and resistance in the two systems are completely different, but the formula gives us the power dissipated dimensions of energy per time, so the dimensions and the result should be the same, which we can check:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (P_cgs.units.dimensions == P.units.dimensions)\n",
- "print (P.in_units(\"W\"), P_cgs.in_units(\"W\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Determining Valid Equivalencies"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If a certain equivalence does not exist for a particular unit, then an error will be thrown:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n",
- "\n",
- "try:\n",
- " x = v.in_units(\"angstrom\", equivalence=\"spectral\")\n",
- "except YTInvalidUnitEquivalence as e:\n",
- " print (e)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "You can check if a `YTArray` has a given equivalence with `has_equivalent`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "print (mp.has_equivalent(\"compton\"))\n",
- "print (mp.has_equivalent(\"thermal\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "E_p.list_equivalencies()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python [default]",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/doc/source/analyzing/units/7)_Unit_Systems.ipynb b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
deleted file mode 100644
index c1fbcd4dfa2..00000000000
--- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb
+++ /dev/null
@@ -1,491 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n",
- "\n",
- "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Unit Systems Available in yt"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Several unit systems are already supplied for use within yt. They are:\n",
- "\n",
- "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n",
- "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n",
- "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n",
- "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n",
- "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n",
- "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n",
- "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n",
- "\n",
- "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import yt\n",
- "yt.unit_system_registry[\"cgs\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n",
- "\n",
- "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "yt.unit_system_registry[\"mks\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can also look at the imperial system:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "yt.unit_system_registry[\"imperial\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "and the \"galactic\" system as well:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "yt.unit_system_registry[\"galactic\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Converting `YTArrays` to the Different Unit Systems"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Choosing a Unit System When Loading a Dataset"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n",
- "\n",
- "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n",
- "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n",
- "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n",
- "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n",
- "print (sp[\"angular_momentum_x\"]) # This is a derived field\n",
- "print (sp[\"kinetic_energy\"]) # This is also a derived field"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n",
- "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can take an `Enzo` dataset and express it in `\"galactic\"` units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n",
- "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n",
- "print (sp[\"density\"])\n",
- "print (sp[\"pressure\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n",
- "dd = ds_chombo.all_data()\n",
- "print (dd[\"density\"])\n",
- "print (dd[\"kinetic_energy\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Defining Fields So That They Can Use the Different Unit Systems"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n",
- "def _momentum_x(field, data):\n",
- " return data[\"density\"]*data[\"velocity_x\"]\n",
- "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n",
- "slc.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import clight\n",
- "\n",
- "def _rest_energy(field, data):\n",
- " return data[\"cell_mass\"]*clight*clight\n",
- "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n",
- "\n",
- "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n",
- "\n",
- "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n",
- "sp[\"rest_energy\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Obtaining Physical Constants in a Specific Unit System"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n",
- " unit_system = yt.unit_system_registry[name]\n",
- " print (name, unit_system.constants.G)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from yt.units import G\n",
- "print (G.in_base(\"mks\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Defining Your Own Unit System"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "yt.unit_system_registry[\"small\"]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n",
- "wacky_unit_system"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n",
- "mksish_unit_system"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "small_unit_system[\"pressure\"] = \"udyne/mm**2\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "small_unit_system"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can do the same for a few other dimensionalities:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n",
- "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n",
- "small_unit_system[\"velocity\"] = \"cm/s\"\n",
- "small_unit_system"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/analyzing/units/comoving_units_and_code_units.rst b/doc/source/analyzing/units/comoving_units_and_code_units.rst
deleted file mode 100644
index aef984b7e6c..00000000000
--- a/doc/source/analyzing/units/comoving_units_and_code_units.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-.. _comoving_units_and_code_units:
-
-Comoving units and code units
-=============================
-
-.. notebook:: 3)_Comoving_units_and_code_units.ipynb
-
-.. _cosmological-units:
-
-Units for Cosmological Datasets
--------------------------------
-
-yt has additional capabilities to handle the comoving coordinate system used
-internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts corresponding to
-comoving units, scaled comoving units, and scaled proper units. In all cases
-'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
-unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.
-
-To access these different units, yt has a common naming system. Scaled units are denoted by
-dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving
-units are denoted by appending ``cm`` to the end of the unit name.
-
-Using the parsec as an example,
-
-``pc``
- Proper parsecs, :math:`\rm{pc}`.
-
-``pccm``
- Comoving parsecs, :math:`\rm{pc}/(1+z)`.
-
-``pccm/h``
- Comoving parsecs normalized by the scaled Hubble constant, :math:`\rm{pc}/h/(1+z)`.
-
-``pc/h``
- Proper parsecs, normalized by the scaled Hubble constant, :math:`\rm{pc}/h`.
diff --git a/doc/source/analyzing/units/comparing_units_from_different_datasets.rst b/doc/source/analyzing/units/comparing_units_from_different_datasets.rst
deleted file mode 100644
index 279e1c7a1a2..00000000000
--- a/doc/source/analyzing/units/comparing_units_from_different_datasets.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _comparing_units_from_different_datasets:
-
-Comparing units from different datasets
-=======================================
-
-.. notebook:: 4)_Comparing_units_from_different_datasets.ipynb
diff --git a/doc/source/analyzing/units/fields_and_unit_conversion.rst b/doc/source/analyzing/units/fields_and_unit_conversion.rst
deleted file mode 100644
index 96515e9f37e..00000000000
--- a/doc/source/analyzing/units/fields_and_unit_conversion.rst
+++ /dev/null
@@ -1,74 +0,0 @@
-.. _fields_and_unit_conversion:
-
-Fields and Unit Conversion
-==========================
-
-.. notebook:: 2)_Fields_and_unit_conversion.ipynb
-
-Derived Fields
---------------
-
-.. This needs to be added outside the notebook since user-defined derived fields
- require a 'fresh' kernel.
-
-The following example creates a derived field for the square root of the cell
-volume.
-
-.. notebook-cell::
-
- import yt
- import numpy as np
-
- # Function defining the derived field
- def root_cell_volume(field, data):
- return np.sqrt(data['cell_volume'])
-
- # Load the dataset
- ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
-
- # Add the field to the dataset, linking to the derived field function and
- # units of the field
- ds.add_field(("gas", "root_cell_volume"), units="cm**(3/2)", function=root_cell_volume)
-
- # Access the derived field like any other field
- ad = ds.all_data()
- ad['root_cell_volume']
-
-No special unit logic needs to happen inside of the function - `np.sqrt` will
-convert the units of the `density` field appropriately:
-
-.. notebook-cell::
- :skip_exceptions:
-
- import yt
- import numpy as np
-
- ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
- ad = ds.all_data()
-
- print(ad['cell_volume'].in_cgs())
- print(np.sqrt(ad['cell_volume'].in_cgs()))
-
-That said, it is necessary to specify the units in the call to the
-:code:`add_field` function. Not only does this ensure the returned units
-will be exactly what you expect, it also allows an in-place conversion of units,
-just in case the function returns a field with dimensionally equivalent units.
-
-For example, let's redo the above example but ask for units of
-:code:`Mpc**(3/2)`:
-
-.. notebook-cell::
-
- import yt
- import numpy as np
-
- def root_cell_volume(field, data):
- return np.sqrt(data['cell_volume'])
-
- ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
-
- # Here we set the default units to Mpc^(3/2)
- ds.add_field(("gas", "root_cell_volume"), units="Mpc**(3/2)", function=root_cell_volume)
-
- ad = ds.all_data()
- ad['root_cell_volume']
diff --git a/doc/source/analyzing/units/index.rst b/doc/source/analyzing/units/index.rst
deleted file mode 100644
index 3e1dc9258fc..00000000000
--- a/doc/source/analyzing/units/index.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-.. _units:
-
-Symbolic Units
-==============
-
-This section describes yt's symbolic unit capabilities. This is provided as
-quick introduction for those who are already familiar with yt but want to learn
-more about the unit system. Please see :ref:`analyzing` and :ref:`visualizing`
-for more detail about querying, analyzing, and visualizing data in yt.
-
-Each subsection is a notebook. To open these notebooks in a "live" IPython session
-and execute the documentation interactively, you need to download the repository
-and start the IPython notebook.
-
-You will then need to navigate to :code:`$YT_GIT/doc/source/units` (where $YT_GIT
-is the location of a clone of the yt git repository), and then start an
-IPython notebook server:
-
-.. code:: bash
-
- $ ipython notebook
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
- yourself!
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
- :maxdepth: 1
-
- symbolic_units
- fields_and_unit_conversion
- comoving_units_and_code_units
- comparing_units_from_different_datasets
- units_and_plotting
- unit_equivalencies
- unit_systems
-
-.. note::
-
- The notebooks use sample datasets that are available for download at
- https://yt-project.org/data. See :ref:`quickstart-introduction` for more
- details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.
diff --git a/doc/source/analyzing/units/symbolic_units.rst b/doc/source/analyzing/units/symbolic_units.rst
deleted file mode 100644
index ad94edf1bcf..00000000000
--- a/doc/source/analyzing/units/symbolic_units.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _symbolic_units:
-
-Symbolic units: :code:`yt.units`
-================================
-
-.. notebook:: 1)_Symbolic_Units.ipynb
- :skip_exceptions:
diff --git a/doc/source/analyzing/units/unit_equivalencies.rst b/doc/source/analyzing/units/unit_equivalencies.rst
deleted file mode 100644
index b09fc2a30b6..00000000000
--- a/doc/source/analyzing/units/unit_equivalencies.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _unit_equivalencies:
-
-Unit Equivalencies
-==================
-
-.. notebook:: 6)_Unit_Equivalencies.ipynb
- :skip_exceptions:
diff --git a/doc/source/analyzing/units/unit_systems.rst b/doc/source/analyzing/units/unit_systems.rst
deleted file mode 100644
index 18c23a37ab7..00000000000
--- a/doc/source/analyzing/units/unit_systems.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _unit_systems:
-
-Unit Systems
-============
-
-.. notebook:: 7)_Unit_Systems.ipynb
-:skip_exceptions:
diff --git a/doc/source/analyzing/units/units_and_plotting.rst b/doc/source/analyzing/units/units_and_plotting.rst
deleted file mode 100644
index f2916c99e42..00000000000
--- a/doc/source/analyzing/units/units_and_plotting.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _units_and_plotting:
-
-Units and Plotting
-==================
-
-.. notebook:: 5)_Units_and_plotting.ipynb
- :skip_exceptions:
diff --git a/doc/source/conf.py b/doc/source/conf.py
index a7397afa81e..71f2c500727 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -68,9 +68,9 @@
# built documents.
#
# The short X.Y version.
-version = '3.7-dev'
+version = '4.0-dev'
# The full version, including alpha/beta/rc tags.
-release = '3.7-dev'
+release = '4.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -252,6 +252,9 @@
'https://matplotlib.org/': None,
'https://docs.astropy.org/en/stable': None,
'https://pandas.pydata.org/pandas-docs/stable': None,
+ 'trident': ('https://trident.readthedocs.io/en/latest/', None),
+ 'yt_astro_analysis': ('https://yt-astro-analysis.readthedocs.io/en/latest/', None),
+ 'yt_attic': ('https://yt-attic.readthedocs.io/en/latest/', None),
}
if not on_rtd:
diff --git a/doc/source/cookbook/Halo_Analysis.ipynb b/doc/source/cookbook/Halo_Analysis.ipynb
deleted file mode 100644
index e5abf17ceec..00000000000
--- a/doc/source/cookbook/Halo_Analysis.ipynb
+++ /dev/null
@@ -1,434 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Full Halo Analysis"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Creating a Catalog"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import yt\n",
- "from yt.analysis_modules.halo_analysis.api import *\n",
- "import tempfile\n",
- "import shutil\n",
- "import os\n",
- "\n",
- "# Create temporary directory for storing files\n",
- "tmpdir = tempfile.mkdtemp()\n",
- "\n",
- "# Load the data set with the full simulation information\n",
- "data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Load the rockstar data files\n",
- "halos_ds = yt.load('rockstar_halos/halos_0.0.bin')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Instantiate a catalog using those two parameter files\n",
- "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
- " output_dir=os.path.join(tmpdir, 'halo_catalog'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": [
- "# Filter out less massive halos\n",
- "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Finding Radial Profiles"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n",
- "hc.add_callback(\"sphere\", factor=2.0)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cumulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n",
- "hc.add_callback(\"profile\", [\"radius\"],\n",
- " [(\"gas\", \"overdensity\")],\n",
- " weight_field=\"cell_volume\", \n",
- " accumulation=True,\n",
- " storage=\"virial_quantities_profiles\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Define a virial radius for the halo.\n",
- "hc.add_callback(\"virial_quantities\", [\"radius\"], \n",
- " profile_storage = \"virial_quantities_profiles\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now that we have calculated the virial radius, we delete the profiles we used to find it."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc.add_callback('delete_attribute','virial_quantities_profiles')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc.add_callback('sphere', radius_field='radius_200', factor=5,\n",
- " field_parameters=dict(virial_radius=('quantity', 'radius_200')))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc.add_callback('profile', 'virial_radius_fraction', [('gas','temperature')],\n",
- " storage='virial_profiles',\n",
- " weight_field='cell_mass', \n",
- " accumulation=False, output_dir='profiles')\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "# Save the profiles\n",
- "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc.create()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Reloading HaloCatalogs"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "halos_ds = yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
- "\n",
- "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
- " output_dir=os.path.join(tmpdir, 'halo_catalog'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- " Just as profiles are saved separately through the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc_reloaded.add_callback('load_profiles', storage='virial_profiles',\n",
- " output_dir='profiles')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Calling `load` is the equivalent of calling `create` earlier, but defaults to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "hc_reloaded.load()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Plotting Radial Profiles"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "halo = hc_reloaded.halo_list[0]\n",
- "\n",
- "radius = halo.virial_profiles[u\"('index', 'virial_radius_fraction')\"]\n",
- "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]\n",
- "\n",
- "# Remove output files, that are no longer needed\n",
- "shutil.rmtree(tmpdir)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline\n",
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "\n",
- "plt.plot(np.array(radius), np.array(temperature))\n",
- "\n",
- "plt.semilogy()\n",
- "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n",
- "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n",
- "\n",
- "plt.show()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/doc/source/cookbook/calculating_information.rst b/doc/source/cookbook/calculating_information.rst
index a9e3804b64d..0872b814f93 100644
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,16 +56,6 @@ information.
.. yt_cookbook:: simulation_analysis.py
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
.. _cookbook-time-series-analysis:
diff --git a/doc/source/cookbook/complex_plots.rst b/doc/source/cookbook/complex_plots.rst
index 4646200a871..11a1d157c6d 100644
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -15,6 +15,73 @@ See :ref:`slice-plots` for more information.
.. yt_cookbook:: multi_width_image.py
+.. _image-resolution-primer:
+
+Varying the resolution of an image
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This illustrates the various parameters that control the resolution
+of an image, including the (deprecated) refinement level, the size of
+the :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`,
+and the number of pixels in the output image.
+
+In brief, there are three parameters that control the final resolution,
+with a fourth entering for particle data that is deposited onto a mesh
+(i.e. pre-4.0). Those are:
+
+1. `buff_size`, which can be altered with
+:meth:`~yt.visualization.plot_window.PlotWindow.set_buff_size`, which
+is inherited by
+:class:`~yt.visualization.plot_window.AxisAlignedSlicePlot`,
+:class:`~yt.visualization.plot_window.OffAxisSlicePlot`,
+:class:`~yt.visualization.plot_window.ProjectionPlot`, and
+:class:`~yt.visualization.plot_window.OffAxisProjectionPlot`. This
+controls the number of resolution elements in the
+:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`,
+which can be thought of as the number of individually colored
+squares (on a side) in a 2D image. `buff_size` can be set
+after creating the image with
+:meth:`~yt.visualization.plot_window.PlotWindow.set_buff_size`,
+or during image creation with the `buff_size` argument to any
+of the four preceding classes.
+
+2. `figure_size`, which can be altered with either
+:meth:`~yt.visualization.plot_container.PlotContainer.set_figure_size`
+or with :meth:`~yt.visualization.plot_container.PlotWindow.set_window_size`
+(the latter simply calls
+:meth:`~yt.visualization.plot_container.PlotContainer.set_figure_size`),
+or can be set during image creation with the `window_size` argument.
+This sets the size of the final image (including the visualization and,
+if applicable, the axes and colorbar as well) in inches.
+
+3. `dpi`, i.e. the dots-per-inch in your final file, which can also
+be thought of as the actual resolution of your image. This can
+only be set on save via the `mpl_kwargs` parameter to
+:meth:`~yt.visualization.plot_container.PlotContainer.save`. The
+`dpi` and `figure_size` together set the true resolution of your
+image (final image will be `dpi` :math:`*` `figure_size` pixels on a
+side), so if these are set too low, then your `buff_size` will not
+matter. On the other hand, increasing these without increasing
+`buff_size` accordingly will simply blow up your resolution
+elements to fill several real pixels.
+
+4. (only for meshed particle data) `n_ref`, the maximum nubmer of
+particles in a cell in the oct-tree allowed before it is refined
+(removed in yt-4.0 as particle data is no longer deposited onto
+an oct-tree). For particle data, `n_ref` effectively sets the
+underlying resolution of your simulation. Regardless, for either
+grid data or deposited particle data, your image will never be
+higher resolution than your simulation data. In other words,
+if you are visualizing a region 50 kpc across that includes
+data that reaches a resolution of 100 pc, then there's no reason
+to set a `buff_size` (or a `dpi` :math:`*` `figure_size`) above
+50 kpc/ 100 pc = 500.
+
+The below script demonstrates how each of these can be varied.
+
+.. yt_cookbook:: image_resolution.py
+
+
Multipanel with Axes Labels
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/cookbook/cosmological_analysis.rst b/doc/source/cookbook/cosmological_analysis.rst
deleted file mode 100644
index a56b692e12a..00000000000
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-Cosmological Analysis
----------------------
-
-These scripts demonstrate some basic and more advanced analysis that can be
-performed on cosmological simulation datasets. Most of the following
-recipes are derived from functionality in yt's :ref:`analysis-modules`.
-
-Plotting Halos
-~~~~~~~~~~~~~~
-
-This is a mechanism for plotting circles representing identified particle halos
-on an image.
-See :ref:`halo-analysis` and :ref:`annotate-halos` for more information.
-
-.. yt_cookbook:: halo_plotting.py
-
-.. _cookbook-rockstar-nested-grid:
-
-Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The version of Rockstar installed with yt does not have the capability
-to work on datasets with particles of different masses. Unfortunately,
-many simulations possess particles of different masses, notably cosmological
-zoom datasets. This recipe uses Rockstar in two different ways to generate a
-HaloCatalog from the highest resolution dark matter particles (the ones
-inside the zoom region). It then overlays some of those halos on a projection
-as a demonstration. See :ref:`rockstar` and :ref:`annotate-halos` for
-more information.
-
-.. yt_cookbook:: rockstar_nest.py
-
-.. _cookbook-halo_finding:
-
-Halo Profiling and Custom Analysis
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This script demonstrates the use of the halo catalog to create radial
-profiles for each halo in a cosmological dataset.
-See :ref:`halo_catalog` for more information.
-
-.. yt_cookbook:: halo_profiler.py
-
-.. _cookbook-light_cone:
-
-Light Cone Projection
-~~~~~~~~~~~~~~~~~~~~~
-
-This script creates a light cone projection, a synthetic observation
-that stacks together projections from multiple datasets to extend over
-a given redshift interval.
-See :ref:`light-cone-generator` for more information.
-
-.. yt_cookbook:: light_cone_projection.py
-
-.. _cookbook-light_ray:
-
-Light Ray
-~~~~~~~~~
-
-This script demonstrates how to make a synthetic quasar sight line that
-extends over multiple datasets and can be used to generate a synthetic
-absorption spectrum.
-See :ref:`light-ray-generator` and :ref:`absorption_spectrum` for more information.
-
-.. yt_cookbook:: light_ray.py
-
-.. _cookbook-single-dataset-light-ray:
-
-Single Dataset Light Ray
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-This script demonstrates how to make a light ray from a single dataset.
-
-.. yt_cookbook:: single_dataset_light_ray.py
-
-Creating and Fitting Absorption Spectra
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This script demonstrates how to use light rays to create corresponding
-absorption spectra and then fit the spectra to find absorbing
-structures.
-See :ref:`light-ray-generator` and :ref:`absorption_spectrum` for more information.
-
-.. yt_cookbook:: fit_spectrum.py
diff --git a/doc/source/cookbook/fit_spectrum.py b/doc/source/cookbook/fit_spectrum.py
deleted file mode 100644
index 3fc11c44851..00000000000
--- a/doc/source/cookbook/fit_spectrum.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import yt
-from yt.analysis_modules.cosmological_observation.light_ray.api import LightRay
-from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
-from yt.analysis_modules.absorption_spectrum.api import generate_total_fit
-
-# Define a field to simulate OVI based on a constant relationship to HI
-# Do *NOT* use this for science, because this is not how OVI actually behaves;
-# it is just an example.
-
-def _OVI_number_density(field, data):
- return data['H_number_density']*2.0
-
-# Define a function that will accept a ds and add the new field
-# defined above. This will be given to the LightRay below.
-def setup_ds(ds):
- ds.add_field(("gas","O_p5_number_density"),
- function=_OVI_number_density,
- units="cm**-3", sampling_type="cell")
-
-# Define species and associated parameters to add to continuum
-# Parameters used for both adding the transition to the spectrum
-# and for fitting
-# Note that for single species that produce multiple lines
-# (as in the OVI doublet), 'numLines' will be equal to the number
-# of lines, and f,gamma, and wavelength will have multiple values.
-
-HI_parameters = {'name': 'HI',
- 'field': 'H_number_density',
- 'f': [.4164],
- 'Gamma': [6.265E8],
- 'wavelength': [1215.67],
- 'mass': 1.00794,
- 'numLines': 1,
- 'maxN': 1E22, 'minN': 1E11,
- 'maxb': 300, 'minb': 1,
- 'maxz': 6, 'minz': 0,
- 'init_b': 30,
- 'init_N': 1E14}
-
-OVI_parameters = {'name': 'OVI',
- 'field': 'O_p5_number_density',
- 'f': [.1325, .06580],
- 'Gamma': [4.148E8, 4.076E8],
- 'wavelength': [1031.9261, 1037.6167],
- 'mass': 15.9994,
- 'numLines': 2,
- 'maxN': 1E17, 'minN': 1E11,
- 'maxb': 300, 'minb': 1,
- 'maxz': 6, 'minz': 0,
- 'init_b': 20,
- 'init_N': 1E12}
-
-species_dicts = {'HI': HI_parameters, 'OVI': OVI_parameters}
-
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay('enzo_cosmology_plus/AMRCosmology.enzo',
- 'Enzo', 0.0, 0.1,
- use_minimum_datasets=True,
- time_data=False
- )
-
-# Get all fields that need to be added to the light ray
-fields = ['temperature']
-for s, params in species_dicts.items():
- fields.append(params['field'])
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456780,
- solution_filename='lightraysolution.txt',
- data_filename='lightray.h5',
- fields=fields, setup_function=setup_ds,
- njobs=-1)
-
-# Create an AbsorptionSpectrum object extending from
-# lambda = 900 to lambda = 1800, with 10000 pixels
-sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
-
-# Iterate over species
-for s, params in species_dicts.items():
- # Iterate over transitions for a single species
- for i in range(params['numLines']):
- # Add the lines to the spectrum
- sp.add_line(s, params['field'],
- params['wavelength'][i], params['f'][i],
- params['Gamma'][i], params['mass'],
- label_threshold=1.e10)
-
-
-# Make and save spectrum
-wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
-
-# Define order to fit species in
-order_fits = ['OVI', 'HI']
-
-# Fit spectrum and save fit
-fitted_lines, fitted_flux = generate_total_fit(wavelength,
- flux, order_fits, species_dicts,
- output_file='spectrum_fit.h5')
diff --git a/doc/source/cookbook/halo_analysis_example.rst b/doc/source/cookbook/halo_analysis_example.rst
deleted file mode 100644
index 08e9c978624..00000000000
--- a/doc/source/cookbook/halo_analysis_example.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _halo-analysis-example:
-
-Worked Example of Halo Analysis
--------------------------------
-
-.. notebook:: Halo_Analysis.ipynb
diff --git a/doc/source/cookbook/halo_plotting.py b/doc/source/cookbook/halo_plotting.py
index 2aceafcc557..c4ee65d7243 100644
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -1,5 +1,4 @@
import yt
-from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
# Load the dataset
ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006")
@@ -7,11 +6,7 @@
# Load the halo list from a rockstar output for this dataset
halos = yt.load('rockstar_halos/halos_0.0.bin')
-# Create the halo catalog from this halo list
-hc = HaloCatalog(halos_ds=halos)
-hc.load()
-
# Create a projection with the halos overplot on top
p = yt.ProjectionPlot(ds, "x", "density")
-p.annotate_halos(hc)
+p.annotate_halos(halos)
p.save()
diff --git a/doc/source/cookbook/halo_profiler.py b/doc/source/cookbook/halo_profiler.py
deleted file mode 100644
index 0a8421f892b..00000000000
--- a/doc/source/cookbook/halo_profiler.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import yt
-from yt.analysis_modules.halo_analysis.api import HaloCatalog
-
-# Load the data set with the full simulation information
-# and rockstar halos
-data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
-
-# Instantiate a catalog using those two parameter files
-hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
-
-# Filter out less massive halos
-hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun")
-
-# This recipe creates a spherical data container, computes
-# radial profiles, and calculates r_200 and M_200.
-hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
-
-# Create a sphere container with radius 5x r_200.
-field_params = dict(virial_radius=('quantity', 'radius_200'))
-hc.add_callback('sphere', radius_field='radius_200', factor=5,
- field_parameters=field_params)
-
-# Compute profiles of T vs. r/r_200
-hc.add_callback('profile', ['virial_radius_fraction'],
- [('gas', 'temperature')],
- storage='virial_profiles',
- weight_field='cell_mass',
- accumulation=False, output_dir='profiles')
-
-# Save the profiles
-hc.add_callback("save_profiles", storage="virial_profiles",
- output_dir="profiles")
-
-hc.create()
diff --git a/doc/source/cookbook/image_resolution.py b/doc/source/cookbook/image_resolution.py
new file mode 100644
index 00000000000..f3a02c367c5
--- /dev/null
+++ b/doc/source/cookbook/image_resolution.py
@@ -0,0 +1,64 @@
+import yt
+import numpy as np
+
+# Load the dataset. We'll work with a some Gadget data to illustrate all
+# the different ways in which the effective resolution can vary. Specifically,
+# we'll use the GadgetDiskGalaxy dataset available at
+# http://yt-project.org/data/GadgetDiskGalaxy.tar.gz
+
+# load the data with a refinement criteria of 2 particle per cell
+# n.b. -- in yt-4.0, n_ref no longer exists as the data is no longer
+# deposited only a grid. At present (03/15/2019), there is no way to
+# handle non-gas data in Gadget snapshots, though that is work in progress
+if int(yt.__version__[0]) < 4:
+ # increasing n_ref will result in a "lower resolution" (but faster) image,
+ # while decreasing it will go the opposite way
+ ds = yt.load("GadgetDiskGalaxy/snapshot_200.hdf5", n_ref=16)
+else:
+ ds = yt.load("GadgetDiskGalaxy/snapshot_200.hdf5")
+
+# Create projections of the density (max value in each resolution element in the image):
+prj = yt.ProjectionPlot(ds, "x", ("gas", "density"), method='mip', center='max', width=(100, 'kpc'))
+
+# nicen up the plot by using a better interpolation:
+plot = prj.plots[list(prj.plots)[0]]
+ax = plot.axes
+img = ax.images[0]
+img.set_interpolation('bicubic')
+
+# nicen up the plot by setting the background color to the minimum of the colorbar
+prj.set_background_color(('gas', 'density'))
+
+# vary the buff_size -- the number of resolution elements in the actual visualization
+# set it to 2000x2000
+buff_size = 2000
+prj.set_buff_size(buff_size)
+
+# set the figure size in inches
+figure_size = 10
+prj.set_figure_size(figure_size)
+
+# if the image does not fill the plot (as is default, since the axes and
+# colorbar contribute as well), then figuring out the proper dpi for a given
+# buff_size and figure_size is non-trivial -- it requires finding the bbox
+# for the actual image:
+bounding_box = ax.get_position()
+# we're going to scale to the larger of the two sides
+image_size = figure_size * max([bounding_box.width, bounding_box.height])
+# now save with a dpi that's scaled to the buff_size:
+dpi = np.rint(np.ceil(buff_size / image_size))
+prj.save('with_axes_colorbar.png', mpl_kwargs=dict(dpi=dpi))
+
+# in the case where the image fills the entire plot (i.e. if the axes and colorbar
+# are turned off), it's trivial to figure out the correct dpi from the buff_size and
+# figure_size (or vice versa):
+
+# hide the colorbar:
+prj.hide_colorbar()
+
+# hide the axes, while still keeping the background color correct:
+prj.hide_axes(draw_frame=True)
+
+# save with a dpi that makes sense:
+dpi = np.rint(np.ceil(buff_size / figure_size))
+prj.save('no_axes_colorbar.png', mpl_kwargs=dict(dpi=dpi))
\ No newline at end of file
diff --git a/doc/source/cookbook/index.rst b/doc/source/cookbook/index.rst
index 1908dfd4ac0..39cdae2f2d2 100644
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -29,7 +29,6 @@ Example Scripts
simple_plots
calculating_information
complex_plots
- cosmological_analysis
constructing_data_objects
.. _example-notebooks:
@@ -44,7 +43,6 @@ Example Notebooks
gadget_notebook
owls_notebook
../visualizing/transfer_function_helper
- ../analyzing/analysis_modules/sunyaev_zeldovich
fits_radio_cubes
fits_xray_images
geographic_projections
diff --git a/doc/source/cookbook/light_cone_projection.py b/doc/source/cookbook/light_cone_projection.py
deleted file mode 100644
index 50760d07ee8..00000000000
--- a/doc/source/cookbook/light_cone_projection.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import yt
-from yt.analysis_modules.cosmological_observation.api import \
- LightCone
-
-# Create a LightCone object extending from z = 0 to z = 0.1.
-
-# We have already set up the redshift dumps to be
-# used for this, so we will not use any of the time
-# data dumps.
-lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
- 'Enzo', 0., 0.1,
- observer_redshift=0.0,
- time_data=False)
-
-# Calculate a randomization of the solution.
-lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt")
-
-# Choose the field to be projected.
-field = 'szy'
-
-# Use the LightCone object to make a projection with a 600 arcminute
-# field of view and a resolution of 60 arcseconds.
-# Set njobs to -1 to have one core work on each projection
-# in parallel.
-lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
- weight_field=None,
- save_stack=True,
- save_final_image=True,
- save_slice_images=True,
- njobs=-1)
-
-# By default, the light cone projections are kept in the LC directory,
-# but this moves them back to the current directory so that they're rendered
-# in our cookbook.
-import shutil, glob
-for file in glob.glob('LC/*png'):
- shutil.move(file, '.')
diff --git a/doc/source/cookbook/light_ray.py b/doc/source/cookbook/light_ray.py
deleted file mode 100644
index 3ef99de5f62..00000000000
--- a/doc/source/cookbook/light_ray.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import os
-import yt
-from yt.analysis_modules.cosmological_observation.api import \
- LightRay
-
-# Create a directory for the light rays
-if not os.path.isdir("LR"):
- os.mkdir('LR')
-
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
- 'Enzo', 0.0, 0.1,
- use_minimum_datasets=True,
- time_data=False)
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456789,
- solution_filename='LR/lightraysolution.txt',
- data_filename='LR/lightray.h5',
- fields=['temperature', 'density'],
- njobs=-1)
-
-# Optionally, we can now overplot the part of this ray that intersects
-# one output from the source dataset in a ProjectionPlot
-ds = yt.load('enzo_tiny_cosmology/RD0004/RD0004')
-p = yt.ProjectionPlot(ds, 'z', 'density')
-p.annotate_ray(lr)
-p.save()
diff --git a/doc/source/cookbook/particle_filter.py b/doc/source/cookbook/particle_filter.py
index c55fff9558a..415a3946d22 100644
--- a/doc/source/cookbook/particle_filter.py
+++ b/doc/source/cookbook/particle_filter.py
@@ -48,8 +48,11 @@ def stars_old(pfilter, data):
print("Mass of old stars = %g Msun" % mass_old)
# Generate 4 projections: gas density, young stars, medium stars, old stars
-fields = [('gas', 'density'), ('deposit', 'stars_young_cic'),
- ('deposit', 'stars_medium_cic'), ('deposit', 'stars_old_cic')]
-
-prj = yt.ProjectionPlot(ds, 'z', fields, center="max", width=(100, 'kpc'))
-prj.save()
+fields = [('stars_young', 'particle_mass'),
+ ('stars_medium', 'particle_mass'),
+ ('stars_old', 'particle_mass')]
+
+prj1 = yt.ProjectionPlot(ds, 'z', ("gas", "density"), center="max", width=(100, "kpc"))
+prj1.save()
+prj2 = yt.ParticleProjectionPlot(ds, 'z', fields, center="max", width=(100, 'kpc'))
+prj2.save()
diff --git a/doc/source/cookbook/rockstar_nest.py b/doc/source/cookbook/rockstar_nest.py
deleted file mode 100644
index 47293aaa094..00000000000
--- a/doc/source/cookbook/rockstar_nest.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# You must run this job in parallel.
-# There are several mpi flags which can be useful in order for it to work OK.
-# It requires at least 3 processors in order to run because of the way in which
-# rockstar divides up the work. Make sure you have mpi4py installed as per
-# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt
-
-# Usage: mpirun -np --mca btl ^openib python this_script.py
-
-import yt
-from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
-from yt.data_objects.particle_filters import add_particle_filter
-from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
-yt.enable_parallelism() # rockstar halofinding requires parallelism
-
-# Create a dark matter particle filter
-# This will be code dependent, but this function here is true for enzo
-
-def DarkMatter(pfilter, data):
- filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2
- return filter
-
-add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \
- requires=["particle_type"])
-
-# First, we make sure that this script is being run using mpirun with
-# at least 3 processors as indicated in the comments above.
-assert(yt.communication_system.communicators[-1].size >= 3)
-
-# Load the dataset and apply dark matter filter
-fn = "Enzo_64/DD0043/data0043"
-ds = yt.load(fn)
-ds.add_particle_filter('dark_matter')
-
-# Determine highest resolution DM particle mass in sim by looking
-# at the extrema of the dark_matter particle_mass field.
-ad = ds.all_data()
-min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0]
-
-# Define a new particle filter to isolate all highest resolution DM particles
-# and apply it to dataset
-def MaxResDarkMatter(pfilter, data):
- return data["particle_mass"] <= 1.01 * min_dm_mass
-
-add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \
- filtered_type='dark_matter', requires=["particle_mass"])
-ds.add_particle_filter('max_res_dark_matter')
-
-# If desired, we can see the total number of DM and High-res DM particles
-#if yt.is_root():
-# print("Simulation has %d DM particles." %
-# ad['dark_matter','particle_type'].shape)
-# print("Simulation has %d Highest Res DM particles." %
-# ad['max_res_dark_matter', 'particle_type'].shape)
-
-# Run the halo catalog on the dataset only on the highest resolution dark matter
-# particles
-hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \
- finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'})
-hc.create()
-
-# Or alternatively, just run the RockstarHaloFinder and later import the
-# output file as necessary. You can skip this step if you've already run it
-# once, but be careful since subsequent halo finds will overwrite this data.
-#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter")
-#rhf.run()
-# Load the halo list from a rockstar output for this dataset
-# Create a projection with the halos overplot on top
-#halos = yt.load('rockstar_halos/halos_0.0.bin')
-#hc = HaloCatalog(halos_ds=halos)
-#hc.load()
-
-# Regardless of your method of creating the halo catalog, use it to overplot the
-# halos on a projection.
-p = yt.ProjectionPlot(ds, "x", "density")
-p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2)
-p.save()
diff --git a/doc/source/cookbook/simple_1d_line_plot.py b/doc/source/cookbook/simple_1d_line_plot.py
index 9663df74483..572b7584fe0 100644
--- a/doc/source/cookbook/simple_1d_line_plot.py
+++ b/doc/source/cookbook/simple_1d_line_plot.py
@@ -5,7 +5,7 @@
# Create a line plot of the variables 'u' and 'v' with 1000 sampling points evenly spaced
# between the coordinates (0, 0, 0) and (0, 1, 0)
-plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0, 0, 0), (0, 1, 0), 1000)
+plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0., 0., 0.), (0., 1., 0.), 1000)
# Add a legend
plot.annotate_legend(('all', 'v'))
diff --git a/doc/source/cookbook/single_dataset_light_ray.py b/doc/source/cookbook/single_dataset_light_ray.py
deleted file mode 100644
index 2fb979b839c..00000000000
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import yt
-from yt.analysis_modules.cosmological_observation.api import \
- LightRay
-
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-lr = LightRay(ds)
-
-# With a single dataset, a start_position and
-# end_position or trajectory must be given.
-# These positions can be defined as xyz coordinates,
-# but here we just use the two opposite corners of the
-# simulation box. Alternatively, trajectory should
-# be given as (r, theta, phi)
-lr.make_light_ray(start_position=ds.domain_left_edge,
- end_position=ds.domain_right_edge,
- solution_filename='lightraysolution.txt',
- data_filename='lightray.h5',
- fields=['temperature', 'density'])
-
-# Optionally, we can now overplot this ray on a projection of the source
-# dataset
-p = yt.ProjectionPlot(ds, 'z', 'density')
-p.annotate_ray(lr)
-p.save()
diff --git a/doc/source/cookbook/smoothed_field.py b/doc/source/cookbook/smoothed_field.py
deleted file mode 100644
index 5885eead6a7..00000000000
--- a/doc/source/cookbook/smoothed_field.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import yt
-
-# Load a Gadget dataset following the demonstration notebook.
-fname = 'GadgetDiskGalaxy/snapshot_200.hdf5'
-
-unit_base = {'UnitLength_in_cm' : 3.08568e+21,
- 'UnitMass_in_g' : 1.989e+43,
- 'UnitVelocity_in_cm_per_s' : 100000}
-
-bbox_lim = 1e5 # kpc
-
-bbox = [[-bbox_lim, bbox_lim],
- [-bbox_lim, bbox_lim],
- [-bbox_lim, bbox_lim]]
-
-ds = yt.load(fname, unit_base=unit_base, bounding_box=bbox)
-
-# Create a derived field, the metal density.
-def _metal_density(field, data):
- density = data['PartType0', 'Density']
- Z = data['PartType0', 'metallicity']
- return density * Z
-
-# Add it to the dataset.
-ds.add_field(('PartType0', 'metal_density'), function=_metal_density,
- units="g/cm**3", particle_type=True)
-
-
-# Add the corresponding smoothed field to the dataset.
-from yt.fields.particle_fields import add_volume_weighted_smoothed_field
-
-add_volume_weighted_smoothed_field('PartType0', 'Coordinates', 'Masses',
- 'SmoothingLength', 'Density',
- 'metal_density', ds.field_info)
-
-# Define the region where the disk galaxy is. (See the Gadget notebook for
-# details. Here I make the box a little larger than needed to eliminate the
-# margin effect.)
-center = ds.arr([31996, 31474, 28970], "code_length")
-box_size = ds.quan(250, "code_length")
-left_edge = center - box_size/2*1.1
-right_edge = center + box_size/2*1.1
-box = ds.box(left_edge=left_edge, right_edge=right_edge)
-
-# And make a projection plot!
-yt.ProjectionPlot(ds, 'z',
- ('deposit', 'PartType0_smoothed_metal_density'),
- center=center, width=box_size, data_source=box).save()
diff --git a/doc/source/developing/building_the_docs.rst b/doc/source/developing/building_the_docs.rst
index 9bfc3df69f3..163629af8e3 100644
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -52,7 +52,7 @@ functionality and pare it down to its minimum. Add some comment lines to
describe what it is that you're doing along the way. Place this ``.py`` file
in the ``source/cookbook/`` directory, and then link to it explicitly in one
of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``,
-``cosmological_analysis.rst``, etc.), and add some description of what the script
+etc.), and add some description of what the script
actually does. We recommend that you use one of the
`sample data sets `_ in your recipe. When the full
docs are built, each of the cookbook recipes is executed dynamically on
diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst
index 184c2ff9291..fb0224a8607 100644
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -6,6 +6,35 @@ Loading Data
This section contains information on how to load data into yt, as well as
some important caveats about different data formats.
+:: _loading-sample-data:
+
+Sample Data
+-----------
+
+The `yt` community has provided a large number of sample datasets, which are
+accessible from https://yt-project.org/data/ . `yt` also provides a helper
+function, `yt.load_sample`, that can load from a set of sample datasets. The
+quickstart notebooks in this documentation utilize this.
+
+The files are, in general, named identically to their listings on the data
+catalog page. For instance, you can load `IsolatedGalaxy` by executing:
+
+.. code-block:: python
+
+ import yt
+
+ ds = yt.load_sample("IsolatedGalaxy")
+
+To find a list of all available datasets, you can call `load_sample` without any arguments, and it will return a list of the names that can be supplied:
+
+.. code-block:: python
+
+ import yt
+
+ yt.load_sample()
+
+This will return a list of possible filenames; more information can be accessed on the data catalog.
+
.. _loading-amrvac-data:
AMRVAC Data
@@ -118,8 +147,6 @@ Appropriate errors are thrown for other combinations.
.. note
Ghost cells exist in .dat files but never read by yt.
-
-
.. _loading-art-data:
ART Data
@@ -300,11 +327,14 @@ larger than this.
Alternative values for the following simulation parameters may be specified
using a ``parameters`` dict, accepting the following keys:
-* ``Gamma``: ratio of specific heats, Type: Float
+* ``gamma``: ratio of specific heats, Type: Float. If not specified,
+ :math:`\gamma = 5/3` is assumed.
* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or
- ``"cylindrical"``
+ ``"cylindrical"``. Default is ``"cartesian"``.
* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values
- corresponding to each dimension
+ corresponding to each dimension. Defaults to ``True`` in all directions.
+* ``mu``: mean molecular weight, Type: Float. If not specified, :math:`\mu = 0.6`
+ (for a fully ionized primordial plasma) is assumed.
.. code-block:: python
@@ -373,6 +403,18 @@ This means that the yt fields, e.g. ``("gas","density")``,
``("athena_pp","density")``, ``("athena_pp","vel1")``, ``("athena_pp","Bcc1")``,
will be in code units.
+Alternative values for the following simulation parameters may be specified
+using a ``parameters`` dict, accepting the following keys:
+
+* ``gamma``: ratio of specific heats, Type: Float. If not specified,
+ :math:`\gamma = 5/3` is assumed.
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or
+ ``"cylindrical"``. Default is ``"cartesian"``.
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values
+ corresponding to each dimension. Defaults to ``True`` in all directions.
+* ``mu``: mean molecular weight, Type: Float. If not specified, :math:`\mu = 0.6`
+ (for a fully ionized primordial plasma) is assumed.
+
.. rubric:: Caveats
* yt primarily works with primitive variables. If the Athena++ dataset contains
@@ -812,46 +854,91 @@ can read FITS image files that have the following (case-insensitive) suffixes:
* fts.gz
yt can currently read two kinds of FITS files: FITS image files and FITS
-binary table files containing positions, times, and energies of X-ray events.
+binary table files containing positions, times, and energies of X-ray
+events. These are described in more detail below.
-Though a FITS image is composed of a single array in the FITS file,
-upon being loaded into yt it is automatically decomposed into grids:
+Types of FITS Datasets Supported by yt
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. code-block:: python
+yt FITS Data Standard
+"""""""""""""""""""""
- import yt
- ds = yt.load("m33_hi.fits")
- ds.print_stats()
+yt has facilities for creating 2 and 3-dimensional FITS images from derived,
+fixed-resolution data products from other datasets. These include images
+produced from slices, projections, and 3D covering grids. The resulting
+FITS images are fully-describing in that unit, parameter, and coordinate
+information is passed from the original dataset. These can be created via the
+:class:`~yt.visualization.fits_image.FITSImageData` class and its subclasses.
+For information about how to use these special classes, see
+:ref:`writing_fits_images`.
-.. parsed-literal::
+Once you have produced a FITS file in this fashion, you can load it using
+yt and it will be detected as a ``YTFITSDataset`` object, and it can be analyzed
+in the same way as any other dataset in yt.
- level # grids # cells # cells^3
- ----------------------------------------------
- 0 512 981940800 994
- ----------------------------------------------
- 512 981940800
+Astronomical Image Data
+"""""""""""""""""""""""
-yt will generate its own domain decomposition, but the number of grids can be
-set manually by passing the ``nprocs`` parameter to the ``load`` call:
+These files are one of three types:
+
+* Generic two-dimensional FITS images in sky coordinates
+* Three or four-dimensional "spectral cubes"
+* *Chandra* event files
+
+These FITS images typically are in celestial or galactic coordinates, and
+for 3D spectral cubes the third axis is typically in velocity, wavelength,
+or frequency units. For these datasets, since yt does not yet recognize
+non-spatial axes, the coordinates are in units of the image pixels. The
+coordinates of these pixels in the WCS coordinate systems will be available
+in separate fields.
+
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt
+sets the pixel scale as the ``code_length``, certain visualizations (such as
+volume renderings) may look extended or distended in ways that are
+undesirable. To adjust the width in ``code_length`` of the spectral axis, set
+``spectral_factor`` equal to a constant which gives the desired scaling, or set
+it to ``"auto"`` to make the width the same as the largest axis in the sky
+plane:
.. code-block:: python
- ds = load("m33_hi.fits", nprocs=1024)
+ ds = yt.load("m33_hi.fits.gz", spectral_factor=0.1)
+
+For 4D spectral cubes, the fourth axis is assumed to be composed of different
+fields altogether (e.g., Stokes parameters for radio data).
+
+*Chandra* X-ray event data, which is in tabular form, will be loaded as
+particle fields in yt, but a grid will be constructed from the WCS
+information in the FITS header. There is a helper function,
+``setup_counts_fields``, which may be used to make deposited image fields
+from the event data for different energy bands (for an example see
+:ref:`xray_fits`).
+
+Generic FITS Images
+"""""""""""""""""""
+
+If the FITS file contains images but does not have adequate header information
+to fall into one of the above categories, yt will still load the data, but
+the resulting field and/or coordinate information will necessarily be
+incomplete. Field names may not be descriptive, and units may be incorrect. To
+get the full use out of yt for FITS files, make sure that the file is sufficiently
+self-descripting to fall into one of the above categories.
Making the Most of yt for FITS Data
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-yt will load data without WCS information and/or some missing header keywords, but the resulting
-field information will necessarily be incomplete. For example, field names may not be descriptive,
-and units will not be correct. To get the full use out of yt for FITS files, make sure that for
-each image the following header keywords have sensible values:
+yt will load data without WCS information and/or some missing header keywords,
+but the resulting field and/or coordinate information will necessarily be
+incomplete. For example, field names may not be descriptive, and units will not
+be correct. To get the full use out of yt for FITS files, make sure that for
+each image HDU the following standard header keywords have sensible values:
* ``CDELTx``: The pixel width in along axis ``x``
* ``CRVALx``: The coordinate value at the reference position along axis ``x``
* ``CRPIXx``: The reference pixel along axis ``x``
* ``CTYPEx``: The projection type of axis ``x``
* ``CUNITx``: The units of the coordinate along axis ``x``
-* ``BTYPE``: The type of the image
+* ``BTYPE``: The type of the image, this will be used as the field name
* ``BUNIT``: The units of the image
FITS header keywords can easily be updated using AstroPy. For example,
@@ -859,43 +946,13 @@ to set the ``BTYPE`` and ``BUNIT`` keywords:
.. code-block:: python
- import astropy.io.fits as pyfits
- f = pyfits.open("xray_flux_image.fits", mode="update")
+ from astropy.io import fits
+ f = fits.open("xray_flux_image.fits", mode="update")
f[0].header["BUNIT"] = "cts/s/pixel"
f[0].header["BTYPE"] = "flux"
f.flush()
f.close()
-FITS Coordinates
-^^^^^^^^^^^^^^^^
-
-For FITS datasets, the unit of ``code_length`` is always the width of one
-pixel. yt will attempt to use the WCS information in the FITS header to
-construct information about the coordinate system, and provides support for
-the following dataset types:
-
-1. Rectilinear 2D/3D images with length units (e.g., Mpc, AU,
- etc.) defined in the ``CUNITx`` keywords
-2. 2D images in some celestial coordinate systems (RA/Dec,
- galactic latitude/longitude, defined in the ``CTYPEx``
- keywords), and X-ray binary table event files
-3. 3D images with celestial coordinates and a third axis for another
- quantity, such as velocity, frequency, wavelength, etc.
-4. 4D images with the first three axes like Case 3, where the slices
- along the 4th axis are interpreted as different fields.
-
-If your data is of the first case, yt will determine the length units based
-on the information in the header. If your data is of the second or third
-cases, no length units will be assigned, but the world coordinate information
-about the axes will be stored in separate fields. If your data is of the
-fourth type, the coordinates of the first three axes will be determined
-according to cases 1-3.
-
-.. note::
-
- Linear length-based coordinates (Case 1 above) are only supported if all
- dimensions have the same value for ``CUNITx``. WCS coordinates are only
- supported for Cases 2-4.
FITS Data Decomposition
^^^^^^^^^^^^^^^^^^^^^^^
@@ -926,8 +983,7 @@ set manually by passing the ``nprocs`` parameter to the ``load`` call:
.. code-block:: python
- ds = load("m33_hi.fits", nprocs=64)
-
+ ds = yt.load("m33_hi.fits", nprocs=64)
Fields in FITS Datasets
^^^^^^^^^^^^^^^^^^^^^^^
@@ -947,7 +1003,7 @@ The third way is if auxiliary files are included along with the main file, like
.. code-block:: python
- ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
+ ds = yt.load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
The image blocks in each of these files will be loaded as a separate field,
provided they have the same dimensions as the image blocks in the main file.
@@ -957,12 +1013,6 @@ based on the corresponding ``CTYPEx`` keywords. When queried, these fields
will be generated from the pixel coordinates in the file using the WCS
transformations provided by AstroPy.
-X-ray event data will be loaded as particle fields in yt, but a grid will be
-constructed from the WCS information in the FITS header. There is a helper
-function, ``setup_counts_fields``, which may be used to make deposited image
-fields from the event data for different energy bands (for an example see
-:ref:`xray_fits`).
-
.. note::
Each FITS image from a single dataset, whether from one file or from one of
@@ -988,11 +1038,11 @@ containing different mask values for different fields:
.. code-block:: python
- # passing a single float
- ds = load("m33_hi.fits", nan_mask=0.0)
+ # passing a single float for all images
+ ds = yt.load("m33_hi.fits", nan_mask=0.0)
# passing a dict
- ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+ ds = yt.load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
``suppress_astropy_warnings``
"""""""""""""""""""""""""""""
@@ -1001,17 +1051,6 @@ Generally, AstroPy may generate a lot of warnings about individual FITS
files, many of which you may want to ignore. If you want to see these
warnings, set ``suppress_astropy_warnings = False``.
-``spectral_factor``
-"""""""""""""""""""
-
-Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt
-sets the pixel scale as the ``code_length``, certain visualizations (such as
-volume renderings) may look extended or distended in ways that are
-undesirable. To adjust the width in ``code_length`` of the spectral axis, set
-``spectral_factor`` equal to a constant which gives the desired scaling, or set
-it to ``"auto"`` to make the width the same as the largest axis in the sky
-plane.
-
Miscellaneous Tools for Use with FITS Data
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1064,7 +1103,7 @@ version of AstroPy >= 1.3 must be installed.
.. code-block:: python
wcs_slc = PlotWindowWCS(slc)
- wcs_slc.show() # for the IPython notebook
+ wcs_slc.show() # for Jupyter notebooks
wcs_slc.save()
``WCSAxes`` is still in an experimental state, but as its functionality
@@ -1092,8 +1131,8 @@ individual lines from an intensity cube:
'CH3NH2': (218.40956, 'GHz')}
slab_width = (0.05, "GHz")
ds = create_spectral_slabs("intensity_cube.fits",
- slab_centers, slab_width,
- nan_mask=0.0)
+ slab_centers, slab_width,
+ nan_mask=0.0)
All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when
creating the dataset (see :ref:`additional_fits_options` above). In the
@@ -1106,11 +1145,12 @@ zero, and the left and right edges of the domain along this axis are
Examples of Using FITS Data
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The following IPython notebooks show examples of working with FITS data in yt,
+The following Jupyter notebooks show examples of working with FITS data in yt,
which we recommend you look at in the following order:
* :ref:`radio_cubes`
* :ref:`xray_fits`
+* :ref:`writing_fits_images`
.. _loading-flash-data:
@@ -1148,7 +1188,31 @@ grid structure and are at the same simulation time, the particle data may be loa
However, if you don't have a corresponding plotfile for a particle file, but would still
like to load the particle data, you can still call ``yt.load`` on the file. However, the
grid information will not be available, and the particle data will be loaded in a fashion
-similar to SPH data.
+similar to other particle-based datasets in yt.
+
+Mean Molecular Weight and Number Density Fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The way the mean molecular weight and number density fields are defined depends on
+what type of simulation you are running. If you are running a simulation without
+species and a :math:`\gamma`-law equation of state, then the mean molecular weight
+is defined using the ``eos_singleSpeciesA`` parameter in the FLASH dataset. If you
+have multiple species and your dataset contains the FLASH field ``"abar"``, then
+this is used as the mean molecular weight. In either case, the number density field
+is calculated using this weight.
+
+If you are running a FLASH simulation where the fields ``"sumy"`` and ``"ye"`` are
+present, Then the mean molecular weight is the inverse of ``"sumy"``, and the fields
+``"El_number_density"``, ``"ion_number_density"``, and ``"number_density"`` are
+defined using the following mathematical definitions:
+
+* ``"El_number_density"`` :math:`n_e = N_AY_e\rho`
+* ``"ion_number_density"`` :math:`n_i = N_A\rho/\bar{A}`
+* ``"number_density"`` :math:`n = n_e + n_i`
+
+where :math:`n_e` and :math:`n_i` are the electron and ion number densities,
+:math:`\rho` is the mass density, :math:`Y_e` is the electron number per baryon,
+:math:`\bar{A}` is the mean molecular weight, and :math:`N_A` is Avogadro's number.
.. rubric:: Caveats
@@ -1170,9 +1234,11 @@ from the OWLS project can be found at :ref:`owls-notebook`.
.. note::
- If you are loading a multi-file dataset with Gadget, supply the *zeroth*
- file to the ``load`` command. For instance,
- ``yt.load("snapshot_061.0.hdf5")`` .
+ If you are loading a multi-file dataset with Gadget, you can either supply the *zeroth*
+ file to the ``load`` command or the directory containing all of the files.
+ For instance, to load the *zeroth* file: ``yt.load("snapshot_061.0.hdf5")`` . To
+ give just the directory, if you have all of your ``snapshot_000.*`` files in a directory
+ called ``snapshot_000``, do: ``yt.load("/path/to/snapshot_000")``.
Gadget data in HDF5 format can be loaded with the ``load`` command:
@@ -1255,21 +1321,6 @@ The number of cells in an oct is defined by the expression
It's recommended that if you want higher-resolution, try reducing the value of
``n_ref`` to 32 or 16.
-Also yt can be set to generate the global mesh index according to a specific
-type of particles instead of all the particles through the parameter
-``index_ptype``. For example, to build the octree only according to the
-``"PartType0"`` particles, you can do:
-
-.. code-block:: python
-
- ds = yt.load("snapshot_061.hdf5", index_ptype="PartType0")
-
-By default, ``index_ptype`` is set to ``"all"``, which means all the particles.
-For Gadget binary outputs, ``index_ptype`` should be set using the particle type
-names yt uses internally (e.g. ``'Gas'``, ``'Halo'``, ``'Disk'``, etc). For
-Gadget HDF5 outputs the particle type names come from the HDF5 output and so
-should be referred to using names like ``'PartType0'``.
-
.. _gadget-field-spec:
Field Specifications
@@ -1422,20 +1473,70 @@ argument of this form:
yt will utilize length, mass and time to set up all other units.
+.. _loading-swift-data:
+
+SWIFT Data
+----------
+
+yt has support for reading in SWIFT data from the HDF5 file format. It is able
+to access all particles and fields which are stored on-disk and it is also able
+to generate derived fields, i.e, linear momentum from on-disk fields.
+
+It is also possible to smooth the data onto a grid or an octree. This
+interpolation can be done using an SPH kernel using either the scatter or gather
+approach. The SWIFT frontend is supported and cared for by Ashley Kelly.
+
+SWIFT data in HDF5 format can be loaded with the ``load`` command:
+
+.. code-block:: python
+
+ import yt
+ ds = yt.load("EAGLE_6/eagle_0005.hdf5")
+
+.. _arepo-data:
+
+Arepo Data
+----------
+
+Arepo data is currently treated as SPH data. The gas cells have smoothing lengths
+assigned using the following prescription for a given gas cell :math:`i`:
+
+.. math::
+
+ h_{\rm sml} = \alpha\left(\frac{3}{4\pi}\frac{m_i}{\rho_i}\right)^{1/3}
+
+where :math:`\alpha` is a constant factor. By default, :math:`\alpha = 2`. In
+practice, smoothing lengths are only used for creating slices and projections,
+and this value of :math:`\alpha` works well for this purpose. However, this
+value can be changed when loading an Arepo dataset by setting the
+``smoothing_factor`` parameter:
+
+.. code-block:: python
+
+ import yt
+ ds = yt.load("snapshot_100.hdf5", smoothing_factor=1.5)
+
+Currently, only Arepo HDF5 snapshots are supported. If the "GFM" metal fields are
+present in your dataset, they will be loaded in and aliased to the appropriate
+species fields in the `"GFM_Metals"` field on-disk. For more information, see
+the `Illustris TNG documentation `_.
+
.. _loading-gamer-data:
GAMER Data
----------
-GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this:
+GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the
+data like this:
.. code-block:: python
import yt
ds = yt.load("InteractingJets/jet_000002")
-For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions for
-length, time, and mass to ``load`` using the ``units_override`` functionality:
+For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions
+for length, time, and mass to ``load`` using the ``units_override``
+functionality:
.. code-block:: python
@@ -1445,14 +1546,16 @@ length, time, and mass to ``load`` using the ``units_override`` functionality:
"mass_unit" :(1.4690033e+36,"g") }
ds = yt.load("InteractingJets/jet_000002", units_override=code_units)
-This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
-e.g., ``("gamer","Dens")``, will be in code units.
+This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units,
+but the GAMER fields, e.g., ``("gamer","Dens")``, will be in code units.
-Particle data are supported and are always stored in the same file as the grid data.
+Particle data are supported and are always stored in the same file as the grid
+data.
.. rubric:: Caveats
-* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
+* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not
+supported.
.. _loading-amr-data:
@@ -1768,6 +1871,41 @@ The ``load_particles`` function also accepts the following keyword parameters:
``bbox``
The bounding box for the particle positions.
+A novel use of the ``load_particles`` function is to facilitate SPH
+visualization of non-SPH particles. See the example below:
+
+.. code-block:: python
+
+ import yt
+
+ # Load dataset and center on the dense region
+ ds = yt.load('FIRE_M12i_ref11/snapshot_600.hdf5')
+ _, center = ds.find_max(('PartType0', 'density'))
+
+ # Reload DM particles into a stream dataset
+ ad = ds.all_data()
+ pt = 'PartType1'
+ fields = ['particle_mass'] + [f'particle_position_{ax}' for ax in 'xyz']
+ data = {field: ad[pt, field] for field in fields}
+ ds_dm = yt.load_particles(data, data_source=ad)
+
+ # Generate the missing SPH fields
+ ds_dm.add_sph_fields()
+
+ # Make the SPH projection plot
+ p = yt.ProjectionPlot(ds_dm, 'z', ('io', 'density'),
+ center=center, width=(1, 'Mpc'))
+ p.set_unit('density', 'Msun/kpc**2')
+ p.show()
+
+Here we see two new things. First, ``load_particles`` accepts a ``data_source``
+argument to infer parameters like code units, which could be tedious to provide
+otherwise. Second, the returned
+:class:`~yt.frontends.stream.data_structures.StreamParticleDataset` has an
+:meth:`~yt.frontends.stream.data_structures.StreamParticleDataset.add_sph_fields`
+method, to create the ``smoothing_length`` and ``density`` fields required for
+SPH visualization to work.
+
.. _loading-gizmo-data:
Gizmo Data
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 3339c93afca..ece8a940efa 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -103,11 +103,11 @@ Table of Contents
Track halos, make synthetic observations, find clumps, and more
+
Astrophysical analysis, clump finding, cosmology calculations, and more
diff --git a/doc/source/installing.rst b/doc/source/installing.rst
index cdd5379c17e..63f535640f0 100644
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -374,60 +374,6 @@ most up-to-date source code.
Alternatively, you can replace ``pip install -e .`` with ``conda develop -b .``.
-
-Installing Support for the Rockstar Halo Finder
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The easiest way to set rockstar up in a conda-based python environment is to run
-the install script with ``INST_ROCKSTAR=1``.
-
-If you want to do this manually, you will need to follow these
-instructions. First, clone Matt Turk's fork of rockstar and compile it:
-
-.. code-block:: bash
-
- $ git clone https://github.com/yt-project/rockstar
- $ cd rockstar
- $ make lib
-
-Next, copy `librockstar.so` into the `lib` folder of your anaconda installation:
-
-.. code-block:: bash
-
- $ cp librockstar.so /path/to/anaconda/lib
-
-Finally, you will need to recompile yt to enable the rockstar interface. Clone a
-copy of the yt git repository (see :ref:`conda-source-build`), or navigate
-to a clone that you have already made, and do the following:
-
-.. code-block:: bash
-
- $ cd /path/to/yt-git
- $ ./clean.sh
- $ echo /path/to/rockstar > rockstar.cfg
- $ pip install -e .
-
-Here ``/path/to/yt-git`` is the path to your clone of the yt git repository
-and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of
-rockstar.
-
-Finally, to actually use rockstar, you will need to ensure the folder containing
-`librockstar.so` is in your LD_LIBRARY_PATH:
-
-.. code-block:: bash
-
- $ export LD_LIBRARY_PATH=/path/to/anaconda/lib
-
-You should now be able to enter a python session and import the rockstar
-interface:
-
-.. code-block:: python
-
- >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface
-
-If this python import fails, then you have not installed rockstar and yt's
-rockstar interface correctly.
-
.. _windows-installation:
Installing yt on Windows
diff --git a/doc/source/quickstart/1)_Introduction.ipynb b/doc/source/quickstart/1)_Introduction.ipynb
index 7fe228c9f2f..cf9615fa362 100644
--- a/doc/source/quickstart/1)_Introduction.ipynb
+++ b/doc/source/quickstart/1)_Introduction.ipynb
@@ -8,55 +8,53 @@
"\n",
"In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
"\n",
- "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at https://yt-project.org/doc/. If you encounter problems, look for help here: https://yt-project.org/doc/help/.\n",
- "\n",
+ "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at https://yt-project.org/doc/. If you encounter problems, look for help here: https://yt-project.org/doc/help/index.html."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
"## Acquiring the datasets for this tutorial\n",
"\n",
- "If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at https://yt-project.org/data/. The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
+ "If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at https://yt-project.org/data/, or you can use the built-in yt sample data loader (using [pooch](https://www.fatiando.org/pooch/latest/api/index.html) under the hood) to automatically download the data for you.\n",
+ "\n",
+ "The datasets necessary for each lesson are noted next to the corresponding tutorial, and by default it will use the pooch-based dataset downloader. If you would like to supply your own paths, you can choose to do so.\n",
"\n",
+ "## Using the Automatic Downloader\n",
+ "\n",
+ "For the purposes of this tutorial, or whenever you want to use sample data, you can use the `load_sample` command to utilize the pooch auto-downloader. For instance:\n",
+ "\n",
+ "```python\n",
+ "ds = yt.load_sample(\"IsolatedGalaxy\")\n",
+ "```\n",
+ "\n",
+ "## Using manual loading\n",
+ "\n",
+ "The way you will *most frequently* interact with `yt` is using the standard `load` command. This accepts a path and optional arguments. For instance:\n",
+ "\n",
+ "```python\n",
+ "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+ "```\n",
+ "\n",
+ "would load the `IsolatedGalaxy` dataset by supplying the full path to the parameter file."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
"## What's Next?\n",
"\n",
"The Notebooks are meant to be explored in this order:\n",
"\n",
- "1. Introduction\n",
+ "1. Introduction (this file!)\n",
"2. Data Inspection (IsolatedGalaxy dataset)\n",
"3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
"4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
"5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
"6. Volume Rendering (IsolatedGalaxy dataset)"
]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "download_datasets = False\n",
- "if download_datasets:\n",
- " !curl -sSO https://yt-project.org/data/enzo_tiny_cosmology.tar.gz\n",
- " print (\"Got enzo_tiny_cosmology\")\n",
- " !tar xzf enzo_tiny_cosmology.tar.gz\n",
- " \n",
- " !curl -sSO https://yt-project.org/data/Enzo_64.tar.gz\n",
- " print (\"Got Enzo_64\")\n",
- " !tar xzf Enzo_64.tar.gz\n",
- " \n",
- " !curl -sSO https://yt-project.org/data/IsolatedGalaxy.tar.gz\n",
- " print (\"Got IsolatedGalaxy\")\n",
- " !tar xzf IsolatedGalaxy.tar.gz\n",
- " \n",
- " print (\"All done!\")"
- ]
}
],
"metadata": {
@@ -75,7 +73,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.3"
+ "version": "3.7.3"
}
},
"nbformat": 4,
diff --git a/doc/source/quickstart/2)_Data_Inspection.ipynb b/doc/source/quickstart/2)_Data_Inspection.ipynb
index 1118b256040..baf00f0c513 100644
--- a/doc/source/quickstart/2)_Data_Inspection.ipynb
+++ b/doc/source/quickstart/2)_Data_Inspection.ipynb
@@ -13,7 +13,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -31,11 +34,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+ "ds = yt.load_sample(\"IsolatedGalaxy\")"
]
},
{
@@ -51,7 +57,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -69,7 +78,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -87,7 +99,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -105,7 +120,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -123,7 +141,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -141,7 +162,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -161,7 +185,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -192,7 +219,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -210,7 +240,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -221,7 +254,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -240,7 +276,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -251,7 +290,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -262,7 +304,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -273,7 +318,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -295,7 +343,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -306,7 +357,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -320,7 +374,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -331,7 +388,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -343,7 +403,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -368,7 +431,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -379,7 +445,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -397,7 +466,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -415,7 +487,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -439,9 +514,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.3"
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/doc/source/quickstart/3)_Simple_Visualization.ipynb b/doc/source/quickstart/3)_Simple_Visualization.ipynb
index 945047a1280..0a08ce3b800 100644
--- a/doc/source/quickstart/3)_Simple_Visualization.ipynb
+++ b/doc/source/quickstart/3)_Simple_Visualization.ipynb
@@ -13,7 +13,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -31,11 +34,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+ "ds = yt.load_sample(\"enzo_tiny_cosmology\", \"DD0046/DD0046\")\n",
"print (\"Redshift =\", ds.current_redshift)"
]
},
@@ -52,7 +58,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -73,7 +82,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -84,7 +96,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -95,7 +110,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -106,7 +124,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -117,7 +138,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -135,7 +159,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -154,7 +181,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -172,7 +202,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -192,11 +225,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
+ "ds = yt.load_sample(\"Enzo_64\", \"DD0043/data0043\")\n",
"s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
"s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
"s.zoom(10.0)"
@@ -213,7 +249,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -231,7 +270,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -249,7 +291,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -269,7 +314,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -293,9 +341,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.3"
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb
index eaac3c15bca..86ccb0fd6e4 100644
--- a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb
@@ -13,7 +13,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -21,7 +24,7 @@
"import yt\n",
"import numpy as np\n",
"from matplotlib import pylab\n",
- "from yt.analysis_modules.halo_finding.api import HaloFinder"
+ "from yt.extensions.astro_analysis.halo_finding.api import HaloFinder"
]
},
{
@@ -39,11 +42,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ts = yt.load(\"enzo_tiny_cosmology/DD????/DD????\")"
+ "ts = yt.load_sample(\"enzo_tiny_cosmology\", \"DD????/DD????\")"
]
},
{
@@ -59,7 +65,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -84,7 +93,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -105,14 +117,19 @@
"\n",
"Let's do something a bit different. Let's calculate the total mass inside halos and outside halos.\n",
"\n",
- "This actually touches a lot of different pieces of machinery in yt. For every dataset, we will run the halo finder HOP. Then, we calculate the total mass in the domain. Then, for each halo, we calculate the sum of the baryon mass in that halo. We'll keep running tallies of these two things."
+ "This actually touches a lot of different pieces of machinery in yt. For every dataset, we will run the halo finder HOP. Then, we calculate the total mass in the domain. Then, for each halo, we calculate the sum of the baryon mass in that halo. We'll keep running tallies of these two things.\n",
+ "\n",
+ "Note, that the halo finding machinery requires the additional [yt_astro_analysis](https://github.com/yt-project/yt_astro_analysis) package. Installation instructions can be found at https://yt-astro-analysis.readthedocs.io/."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -143,7 +160,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -175,7 +195,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -187,7 +210,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -198,7 +224,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -209,7 +238,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -229,11 +261,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+ "ds = yt.load_sample(\"IsolatedGalaxy\")\n",
"v, c = ds.find_max(\"density\")\n",
"sl = ds.slice(2, c[0])\n",
"print (sl[\"index\", \"x\"])\n",
@@ -253,7 +288,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -272,7 +310,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -296,7 +337,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -315,7 +359,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -326,7 +373,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -344,7 +394,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -370,7 +423,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -389,7 +445,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -414,9 +473,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.3"
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb
index 363fcd0b3b8..51021789c2e 100644
--- a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb
+++ b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb
@@ -13,7 +13,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -37,7 +40,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -57,11 +63,14 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
- "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+ "ds = yt.load_sample(\"IsolatedGalaxy\")\n",
"dd = ds.all_data()\n",
"print (list(dd.quantities.keys()))"
]
@@ -77,7 +86,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -95,7 +107,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -115,7 +130,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -145,7 +163,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -167,7 +188,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -187,7 +211,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -209,7 +236,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -231,7 +261,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -267,9 +300,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.3"
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/doc/source/quickstart/6)_Volume_Rendering.ipynb b/doc/source/quickstart/6)_Volume_Rendering.ipynb
index 069b431123c..581b8358a13 100644
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -13,12 +13,15 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
"import yt\n",
- "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+ "ds = yt.load_sample(\"IsolatedGalaxy\")"
]
},
{
@@ -36,7 +39,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -65,7 +71,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -83,7 +92,10 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
- "collapsed": false
+ "collapsed": false,
+ "jupyter": {
+ "outputs_hidden": false
+ }
},
"outputs": [],
"source": [
@@ -120,9 +132,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1"
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 4
}
diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst
index c4537d1b11c..2c1aca5a65a 100644
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -494,62 +494,16 @@ of topologically disconnected structures, i.e., clump finding.
~yt.data_objects.level_sets.clump_info_items.add_clump_info
~yt.data_objects.level_sets.clump_validators.add_validator
-.. _halo_analysis_ref:
-
-Halo Analysis
-^^^^^^^^^^^^^
-
-The ``HaloCatalog`` object is the primary means for performing custom analysis
-on cosmological halos. It is also the primary interface for halo finding.
-
-.. autosummary::
-
- ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog
- ~yt.analysis_modules.halo_analysis.halo_finding_methods.HaloFindingMethod
- ~yt.analysis_modules.halo_analysis.halo_callbacks.HaloCallback
- ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute
- ~yt.analysis_modules.halo_analysis.halo_callbacks.halo_sphere
- ~yt.analysis_modules.halo_analysis.halo_callbacks.iterative_center_of_mass
- ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles
- ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot
- ~yt.analysis_modules.halo_analysis.halo_callbacks.profile
- ~yt.analysis_modules.halo_analysis.halo_callbacks.save_profiles
- ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity
- ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter
- ~yt.analysis_modules.halo_analysis.halo_callbacks.virial_quantities
- ~yt.analysis_modules.halo_analysis.halo_filters.HaloFilter
- ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo
- ~yt.analysis_modules.halo_analysis.halo_filters.quantity_value
- ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity
- ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
- ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass
- ~yt.analysis_modules.halo_analysis.halo_recipes.HaloRecipe
- ~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities
-
-Halo Finding
-^^^^^^^^^^^^
-
-These provide direct access to the halo finders. However, it is strongly recommended
-to use the ``HaloCatalog``.
-
-.. autosummary::
-
- ~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder
- ~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder
- ~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder
-
-Two Point Functions
-^^^^^^^^^^^^^^^^^^^
-
-These functions are designed to create correlations or other results of
-operations acting on two spatially-distinct points in a data source. See also
-:ref:`two_point_functions`.
+X-ray Emission Fields
+^^^^^^^^^^^^^^^^^^^^^
+This can be used to create derived fields of X-ray emission in
+different energy bands.
.. autosummary::
- ~yt.analysis_modules.two_point_functions.two_point_functions.TwoPointFunctions
- ~yt.analysis_modules.two_point_functions.two_point_functions.FcnSet
+ ~yt.fields.xray_emission_fields.XrayEmissivityIntegrator
+ ~yt.fields.xray_emission_fields.add_xray_emissivity_field
Field Types
-----------
@@ -592,57 +546,6 @@ writing to bitmaps.
~yt.data_objects.image_array.ImageArray
-Extension Types
----------------
-
-Cosmology, Star Particle Analysis, and Simulated Observations
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For the generation of stellar SEDs. (See also :ref:`star_analysis`.)
-
-
-.. autosummary::
-
- ~yt.analysis_modules.star_analysis.sfr_spectrum.StarFormationRate
- ~yt.analysis_modules.star_analysis.sfr_spectrum.SpectrumBuilder
-
-Light cone generation and simulation analysis. (See also
-:ref:`light-cone-generator`.)
-
-
-.. autosummary::
-
- ~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone
- ~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay
-
-Absorption and X-ray spectra and spectral lines:
-
-.. autosummary::
-
- ~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum
- ~yt.fields.xray_emission_fields.XrayEmissivityIntegrator
- ~yt.fields.xray_emission_fields.add_xray_emissivity_field
-
-Absorption spectra fitting:
-
-.. autosummary::
-
- ~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit
-
-Sunrise exporting:
-
-.. autosummary::
-
- ~yt.analysis_modules.sunrise_export.sunrise_exporter.export_to_sunrise
- ~yt.analysis_modules.sunrise_export.sunrise_exporter.export_to_sunrise_from_halolist
-
-RADMC-3D exporting:
-
-.. autosummary::
-
- ~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DLayer
- ~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter
-
Volume Rendering
^^^^^^^^^^^^^^^^
diff --git a/doc/source/visualizing/FITSImageData.ipynb b/doc/source/visualizing/FITSImageData.ipynb
index 2d676a74275..b0a2912397d 100644
--- a/doc/source/visualizing/FITSImageData.ipynb
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -10,9 +10,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"import yt"
@@ -21,14 +19,13 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override={\"length_unit\":(1.0,\"Mpc\"),\n",
- " \"mass_unit\":(1.0e14,\"Msun\"),\n",
- " \"time_unit\":(1.0,\"Myr\")})"
+ "units_override = {\"length_unit\": (1.0, \"Mpc\"),\n",
+ " \"mass_unit\": (1.0e14, \"Msun\"),\n",
+ " \"time_unit\": (1.0, \"Myr\")}\n",
+ "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)"
]
},
{
@@ -48,12 +45,11 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500., \"kpc\"))\n",
+ "prj = yt.ProjectionPlot(ds, \"z\", (\"gas\", \"temperature\"), \n",
+ " weight_field=(\"gas\", \"density\"), width=(500., \"kpc\"))\n",
"prj.show()"
]
},
@@ -67,12 +63,10 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+ "prj_fits = yt.FITSProjection(ds, \"z\", (\"gas\", \"temperature\"), weight_field=(\"gas\", \"density\"))"
]
},
{
@@ -92,12 +86,11 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500., \"kpc\"))"
+ "prj_fits = yt.FITSProjection(ds, \"z\", (\"gas\", \"temperature\"), \n",
+ " weight_field=(\"gas\", \"density\"), width=(500., \"kpc\"))"
]
},
{
@@ -110,9 +103,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits.info()"
@@ -128,9 +119,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits[\"temperature\"].header"
@@ -140,20 +129,32 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. If we want the raw image data with units, we can use the `data` attribute of this field:"
+ "where we can see that the units of the temperature field are Kelvin and the cell widths are in kiloparsecs. Note that the length, time, mass, velocity, and magnetic field units of the dataset have been copied into the header "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " If we want the raw image data with units, we can use the `data` attribute of this field:"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits[\"temperature\"].data"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Changing Aspects of the Images"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -164,15 +165,54 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj_fits.set_unit(\"temperature\",\"R\")\n",
+ "prj_fits.set_unit(\"temperature\", \"R\")\n",
"prj_fits[\"temperature\"].data"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The length units of the image (and its coordinate system), as well as the resolution of the image, can be adjusted when creating it using the `length_unit` and `image_res` keyword arguments, respectively:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# length_unit defaults to that from the dataset\n",
+ "# image_res defaults to 512\n",
+ "slc_fits = yt.FITSSlice(ds, \"z\", (\"gas\", \"density\"), width=(500,\"kpc\"), length_unit=\"ly\", image_res=256)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can now check that this worked by looking at the header, notice in particular the `NAXIS[12]` and `CUNIT[12]` keywords (the `CDELT[12]` and `CRPIX[12]` values also change):"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "slc_fits[\"density\"].header"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Saving and Loading Images"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -183,27 +223,23 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+ "prj_fits.writeto(\"sloshing.fits\", overwrite=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+ "Since yt can read FITS image files, it can be loaded up just like any other dataset. Since we created this FITS file with `FITSImageData`, the image will contain information about the units and the current time of the dataset:"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"ds2 = yt.load(\"sloshing.fits\")"
@@ -219,12 +255,10 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+ "slc2 = yt.SlicePlot(ds2, \"z\", (\"gas\", \"temperature\"), width=(500.,\"kpc\"))\n",
"slc2.set_log(\"temperature\", True)\n",
"slc2.show()"
]
@@ -233,7 +267,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Using `FITSImageData` directly"
+ "## Creating `FITSImageData` Instances Directly from FRBs, PlotWindow instances, and 3D Grids"
]
},
{
@@ -246,33 +280,48 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"slc3 = ds.slice(0, 0.0)\n",
"frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
- "fid_frb = yt.FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+ "fid_frb = frb.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], length_unit=\"pc\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "A 3D FITS cube can also be created from a covering grid:"
+ "If one creates a `PlotWindow` instance, e.g. `SlicePlot`, `ProjectionPlot`, etc., you can also call this same method there:"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fid_pw = prj.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], \n",
+ " length_unit=\"pc\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A 3D FITS cube can also be created from regularly gridded 3D data. In yt, there are covering grids and \"arbitrary grids\". The easiest way to make an arbitrary grid object is using `ds.r`, where we can index the dataset like a NumPy array, creating a grid of 1.0 Mpc on a side, centered on the origin, with 64 cells on a side:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
"outputs": [],
"source": [
- "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
- "fid_cvg = yt.FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+ "grid = ds.r[(-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j,\n",
+ " (-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j,\n",
+ " (-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j]\n",
+ "fid_grid = grid.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], length_unit=\"Mpc\")"
]
},
{
@@ -282,6 +331,13 @@
"## Other `FITSImageData` Methods"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Creating Images from Others"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -292,9 +348,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"fid = yt.FITSImageData.from_file(\"sloshing.fits\")\n",
@@ -311,12 +365,10 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "prj_fits2 = yt.FITSProjection(ds, \"z\", [\"density\"])\n",
+ "prj_fits2 = yt.FITSProjection(ds, \"z\", (\"gas\", \"density\"), width=(500.0, \"kpc\"))\n",
"prj_fits3 = yt.FITSImageData.from_images([prj_fits, prj_fits2])\n",
"prj_fits3.info()"
]
@@ -331,9 +383,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"dens_fits = prj_fits3.pop(\"density\")"
@@ -349,9 +399,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"dens_fits.info()"
@@ -367,14 +415,19 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits3.info()"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Adding Sky Coordinates to Images"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -385,9 +438,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits[\"temperature\"].header"
@@ -405,9 +456,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"sky_center = [30.,45.] # in degrees\n",
@@ -425,9 +474,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits[\"temperature\"].header"
@@ -461,9 +508,7 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"prj_fits3[\"temperature\"].header"
@@ -480,15 +525,20 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
+ "### Updating Header Parameters"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can also add header keywords to a single field or for all fields in the FITS image using `update_header`:"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"fid_frb.update_header(\"all\", \"time\", 0.1) # Update all the fields\n",
@@ -498,24 +548,123 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"print (fid_frb[\"density\"].header[\"time\"])\n",
"print (fid_frb[\"temperature\"].header[\"scale\"])"
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Changing Image Names"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can use the `change_image_name` method to change the name of an image in a `FITSImageData` instance:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fid_frb.change_image_name(\"density\", \"mass_per_volume\")\n",
+ "fid_frb.info() # now \"density\" should be gone and \"mass_per_volume\" should be in its place"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Convolving FITS Images"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Finally, you can convolve an image inside a `FITSImageData` instance with a kernel, either a Gaussian with a specific standard deviation, or any kernel provided by AstroPy. See AstroPy's [Convolution and filtering](http://docs.astropy.org/en/stable/convolution/index.html) for more details."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dens_fits.writeto(\"not_convolved.fits\", overwrite=True)\n",
+ "# Gaussian kernel with standard deviation of 3.0 kpc\n",
+ "dens_fits.convolve(\"density\", 3.0)\n",
+ "dens_fits.writeto(\"convolved.fits\", overwrite=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now let's load these up as datasets and see the difference:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ds0 = yt.load(\"not_convolved.fits\")\n",
+ "dsc = yt.load(\"convolved.fits\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "slc3 = yt.SlicePlot(ds0, \"z\", (\"gas\", \"density\"), width=(500.,\"kpc\"))\n",
+ "slc3.set_log(\"density\", True)\n",
+ "slc3.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "slc4 = yt.SlicePlot(dsc, \"z\", (\"gas\", \"density\"), width=(500.,\"kpc\"))\n",
+ "slc4.set_log(\"density\", True)\n",
+ "slc4.show()"
+ ]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python [default]",
+ "display_name": "Python 3",
"language": "python",
"name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 2
}
diff --git a/doc/source/visualizing/callbacks.rst b/doc/source/visualizing/callbacks.rst
index 8011e33bf29..c49a567a7b4 100644
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -123,7 +123,7 @@ The underlying functions are more thoroughly documented in :ref:`callback-api`.
.. _annotate-clear:
Clear Callbacks (Some or All)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. function:: annotate_clear(index=None)
@@ -145,6 +145,27 @@ Clear Callbacks (Some or All)
p.annotate_clear()
p.save()
+.. _annotate-list:
+
+List Currently Applied Callbacks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: list_annotations()
+
+ This function will print a list of each of the currently applied
+ callbacks together with their index. The index can be used with
+ :ref:`annotate_clear() function ` to remove a
+ specific callback.
+
+.. python-script::
+
+ import yt
+ ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ p = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+ p.annotate_scale()
+ p.annotate_timestamp()
+ p.list_annotations()
+
.. _annotate-arrow:
Overplot Arrow
@@ -357,7 +378,7 @@ Overplot Halo Annotations
(This is a proxy for
:class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
- Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+ Accepts a :class:`~yt_astro_analysis.halo_analysis.halo_catalog.HaloCatalog`
and plots a circle at the location of each halo with the radius of the
circle corresponding to the virial radius of the halo. Also accepts a
:ref:`loaded halo catalog dataset ` or a data
@@ -805,7 +826,7 @@ Overplot the Path of a Ray
ray can be either a
:class:`~yt.data_objects.selection_data_containers.YTOrthoRay`,
:class:`~yt.data_objects.selection_data_containers.YTRay`, or a
- :class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+ :class:`~trident.light_ray.LightRay`
object. annotate_ray() will properly account for periodic rays across the
volume.
diff --git a/doc/source/visualizing/plots.rst b/doc/source/visualizing/plots.rst
index 6a4b39256db..abe6440c74b 100644
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -159,6 +159,14 @@ where for the last two objects any spatial field, such as ``"density"``,
``"velocity_z"``,
etc., may be used, e.g. ``center=("min","temperature")``.
+The effective resolution of the plot (i.e. the number of resolution elements
+in the image itself) can be controlled with the ``buff_size`` argument:
+
+.. code-block:: python
+
+ yt.SlicePlot(ds, 'z', 'density', buff_size=(1000, 1000))
+
+
Here is an example that combines all of the options we just discussed.
.. python-script::
@@ -167,7 +175,7 @@ Here is an example that combines all of the options we just discussed.
from yt.units import kpc
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
slc = yt.SlicePlot(ds, 'z', 'density', center=[0.5, 0.5, 0.5],
- width=(20,'kpc'))
+ width=(20,'kpc'), buff_size=(1000, 1000))
slc.save()
The above example will display an annotated plot of a slice of the
@@ -275,11 +283,12 @@ example:
from yt.units import kpc
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
prj = yt.ProjectionPlot(ds, 2, 'temperature', width=25*kpc,
- weight_field='density')
+ weight_field='density', buff_size=(1000, 1000))
prj.save()
-will create a density-weighted projection of the temperature field along the x
-axis, plot it, and then save the plot to a png image file.
+will create a density-weighted projection of the temperature field along
+the x axis with 1000 resolution elements per side, plot it, and then save
+the plot to a png image file.
Like :ref:`slice-plots`, annotations and modifications can be applied
after creating the ``ProjectionPlot`` object. Annotations are
@@ -770,8 +779,8 @@ from black to white depending on the AMR level of the grid.
Annotations are described in :ref:`callbacks`.
-Set the size of the plot
-~~~~~~~~~~~~~~~~~~~~~~~~
+Set the size and resolution of the plot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To set the size of the plot, use the
:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_figure_size` function. The argument
@@ -797,6 +806,9 @@ To change the resolution of the image, call the
slc.set_buff_size(1600)
slc.save()
+Also see cookbook recipe :ref:`image-resolution-primer` for more information
+about the parameters that determine the resolution of your images.
+
Turning off minorticks
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/yt4differences.rst b/doc/source/yt4differences.rst
new file mode 100644
index 00000000000..34d9fc10a74
--- /dev/null
+++ b/doc/source/yt4differences.rst
@@ -0,0 +1,277 @@
+.. _yt4differences:
+
+What's New and Different in yt 4.0?
+===================================
+
+If you are new to yt, welcome! If you're coming to yt 4.0 from an older
+version, however, there may be a few things in this version that are different
+than what you are used to. We have tried to build compatibility layers to
+minimize disruption to existing scripts, but necessarily things will be
+different in some ways.
+
+.. contents::
+ :depth: 2
+ :local:
+ :backlinks: none
+
+Updating to yt 4.0 from Old Versions (and going back)
+-----------------------------------------------------
+
+
+.. _transitioning-to-4.0:
+
+Converting Old Scripts to Work with yt 4.0
+------------------------------------------
+
+
+Cool New Things
+---------------
+
+Changes for working with SPH Data
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In yt-3.0 most user-facing operations on SPH data are produced by interpolating
+SPH data onto a volume-filling octree mesh. Historically this was easier to
+implement When support for SPH data was added to yt as it allowed re-using a lot
+of the existing infrastructure. This had some downsides because the octree was a
+single, global object, the memory and CPU overhead of smoothing SPH data onto
+the octree can be prohibitive on particle datasets produced by large
+simulations. Constructing the octree during the initial indexing phase also
+required each particle (albeit, in a 64-bit integer) to be present in memory
+simultaneously for a sorting operation, which was memory prohibitive.
+Visualizations of slices and projections produced by yt using the default
+settings are somewhat blocky since by default we use a relatively coarse octree
+to preserve memory.
+
+In yt-4.0 this has all changed! Over the past two years, Nathan Goldbaum, Meagan
+Lang and Matt Turk implemented a new approach for handling I/O of particle data,
+based on storing compressed bitmaps containing Morton indices instead of an
+in-memory octree. This new capability means that the global octree index is now
+no longer necessary to enable I/O chunking and spatial indexing of particle data
+in yt.
+
+The new I/O method has opened up a new way of dealing with the particle data and
+in particular, SPH data.
+
+Scatter and gather approach for SPH data
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned, previously operations such as slice, projection and arbitrary
+grids would smooth the particle data onto the global octree. As this is no
+longer used, a different approach was required to visualize the SPH data. Using
+SPLASH as inspiration, SPH smoothing pixelization operations were created using
+smooting operations via "scatter" and "gather" approaches. We estimate the
+contributions of a particle to a single pixel by considering the point at the
+centre of the pixel and using the standard SPH smoothing formula. The heavy
+lifting in these functions is undertaken by cython functions.
+
+It is now possible to generate slice plots, projection plots, covering grids and
+arbitrary grids of smoothed quanitities using these operations. The following
+code demonstrates how this could be achieved. The following would use the scatter
+method:
+
+.. code-block:: python
+
+ import yt
+
+ ds = yt.load('snapshot_033/snap_033.0.hdf5')
+
+ plot = yt.SlicePlot(ds, 2, ('gas', 'density'))
+ plot.save()
+
+ plot = yt.ProjectionPlot(ds, 2, ('gas', 'density'))
+ plot.save()
+
+ arbitrary_grid = ds.arbitrary_grid([0.0, 0.0, 0.0], [25, 25, 25],
+ dims=[16, 16, 16])
+ ag_density = arbitrary_grid[('gas', 'density')]
+
+ covering_grid = ds.covering_grid(4, 0, 16)
+ cg_density = covering_grid[('gas', 'density')]
+
+In the above example the ``covering_grid`` and the ``arbitrary_grid`` will return
+the same data. In fact, these containers are very similar but provide a
+slighlty different API.
+
+The above code can be modified to use the gather approach by changing a global
+setting for the dataset. This can be achieved with
+``ds.sph_smoothing_style = "gather"``, so far, the gather approach is not
+supported for projections.
+
+The default behaviour for SPH interpolation is that the values are normalized
+inline with Eq. 9 in `SPLASH, Price (2009) `_.
+This can be disabled with ``ds.use_sph_normalization = False``. This will
+disable the normalization for all future interpolations.
+
+The gather approach requires finding nearest neighbors using the KDTree. The
+first call will generate a KDTree for the entire dataset which will be stored in
+a sidecar file. This will be loaded whenever neccesary.
+
+Off-Axis Projection for SPH Data
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The current ``OffAxisProjectionPlot`` class will now support SPH projection plots.
+
+The following is a code example:
+
+.. code-block:: python
+
+ import yt
+
+ ds = yt.load('Data/GadgetDiskGalaxy/snapshot_200.hdf5')
+
+ smoothing_field = ('gas', 'density')
+
+ _, center = ds.find_max(smoothing_field)
+
+ sp = ds.sphere(center, (10, 'kpc'))
+
+ normal_vector = sp.quantities.angular_momentum_vector()
+
+ prj = yt.OffAxisProjectionPlot(ds, normal_vector, smoothing_field, center, (20, 'kpc'))
+
+ prj.save()
+
+Smoothing data onto an Octree
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Whilst the move away from the global octree is a promising one in terms of
+perfomance and dealing with SPH data in a more intuitive manner, it does remove
+a useful feature. We are aware that many uses will have older scripts which take
+advantage of the global octree.
+
+As such, we have added support to smooth SPH data onto an octree when desired by
+the users. The new octree is designed to give results consistent with those of
+the previous octree, but the new octree takes advantage of the scatter and
+gather machinery also added.
+
+It should be noted that the
+
+.. code-block:: python
+
+ import yt
+ import numpy as np
+
+ ds = yt.load('GadgetDiskGalaxy/snapshot_200.hdf5')
+ left = np.array([0, 0, 0], dtype='float64')
+ right = np.array([64000, 64000, 64000], dtype='float64')
+
+ # generate an octree
+ octree = ds.octree(left, right, n_ref=64)
+
+ # the density will be calculated using SPH scatter
+ density = octree[('PartType0', 'density')]
+
+ # this will return the x positions of the octs
+ x = octree[('index', 'x')]
+
+The above code can be modified to use the scatter approach by using
+``ds.sph_smoothing_style = 'gather'`` before any field access. The octree also
+accepts ``over_refine_factor`` which works just like the ``over_refine_factor``
+parameter in yt-3.0 that could be passed to ``yt.load``, and determines how many
+particles are in each leaf.
+
+The ``density_factor`` keyword allows the construction of dense octrees
+trees. In a traditional octree, if a leaf has more particles that a critical
+value `n_ref`, then it divides into 8 new children (hence the name oct). The
+value of `density_factor` allows the node to divide into 2^(3*density_factor)
+zones instead. This creates an octree structure similar that used by AMR codes
+like FLASH that make use of an octree of grid patches.
+
+``yt.units`` is now a wrapper for ``unyt``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have extracted ``yt.units`` into ``unyt``, its own library that you can
+install separately from yt from ``pypi`` and ``conda-forge``. You can find out
+more about using ``unyt`` in `its documentation
+`_ and in `a paper in the Journal of
+Open Source Software `_.
+
+From the perspective of a user of yt, very little should change. While things in
+``unyt`` have different names -- for example ``YTArray`` is now called
+``unyt_array`` -- we have provided wrappers in ``yt.units`` so imports in your
+old scripts should continue to work without issue. If you have any old scripts
+that don't work due to issues with how yt is using ``unyt`` or units issues in
+general please let us know by `filing an issue on GitHub
+`_.
+
+Moving ``unyt`` into its own library has made it much easier to add some cool
+new features, which we detail below.
+
+``ds.units``
+~~~~~~~~~~~~
+
+Each dataset now has a set of unit symbols and physical constants associated
+with it, allowing easier customization and smoother interaction, especially in
+workflows that need to use code units or cosmological units. The ``ds.units``
+object has a large number of attributes corresponding to the names of units and
+physical constants. All units known to the dataset will be available, including
+custom units. In situations where you might have used ``ds.arr`` or ``ds.quan``
+before, you can now safely use ``ds.units``:
+
+ >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+ >>> u = ds.units
+ >>> ad = ds.all_data()
+ >>> data = ad['Enzo', 'Density']
+ >>> data + 12*u.code_mass/u.code_length**3
+ unyt_array([1.21784693e+01, 1.21789148e+01, 1.21788494e+01, ...,
+ 4.08936836e+04, 5.78006836e+04, 3.97766906e+05], 'code_mass/code_length**3')
+ >>> data + .0001*u.mh/u.cm**3
+ unyt_array([6.07964513e+01, 6.07968968e+01, 6.07968314e+01, ...,
+ 4.09423016e+04, 5.78493016e+04, 3.97815524e+05], 'code_mass/code_length**3')
+
+
+Automatic Unit Simplification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Often the results of an operation will result in a unit expression that can be
+simplified by cancelling pairs of factors. Before yt 4.0, these pairs of factors
+were only cancelled if the same unit appeared in both the numerator and
+denominator of an expression. Now, all pairs of factors have have inverse
+dimensions are cancelled, and the appropriate scaling factor is incorporated
+into the result. For example, ``Hz`` and ``s`` will now appropriately be recognized
+as inverses:
+
+ >>> from yt.units import Hz, s
+ >>> frequency = 60*Hz
+ >>> time = 60*s
+ >>> frequency*time
+ unyt_quantity(3600, '(dimensionless)')
+
+Similar simplifications will happen even if units aren't reciprocals of each
+other, for example here ``hour`` and ``minute`` automatically cancel each other:
+
+ >>> from yt.units import erg, minute, hour
+ >>> power = [20, 40, 80] * erg / minute
+ >>> elapsed_time = 3*hour
+ >>> print(power*elapsed_time)
+ [ 3600. 7200. 14400.] erg
+
+Alternate Unit Name Resolution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It's now possible to use a number of common alternate spellings for unit names
+and if ``unyt`` knows about the alternate spelling it will automatically resolve
+alternate spellings to a canonical name. For example, it's now possible to do
+things like this:
+
+ >>> import yt.units as u
+ >>> d = 20*u.mile
+ >>> d.to('km')
+ unyt_quantity(32.18688, 'km')
+ >>> d.to('kilometer')
+ unyt_quantity(32.18688, 'km')
+ >>> d.to('kilometre')
+ unyt_quantity(32.18688, 'km')
+
+You can also use alternate unit names in more complex algebraic unit expressions:
+
+ >>> v = d / (20*u.minute)
+ >>> v.to('kilometre/hour')
+ unyt_quantity(96.56064, 'km/hr')
+
+In this example the common british spelling ``"kilometre"`` is resolved to
+``"km"`` and ``"hour"`` is resolved to ``"hr"``.
+
+API Changes
+-----------
diff --git a/scripts/pr_backport.py b/scripts/pr_backport.py
index 22b6c80c27a..b3f521f5b11 100644
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -4,8 +4,6 @@
import shutil
import tempfile
-from yt.extern.six.moves import input
-
API_URL = 'https://api.github.com/graphql'
YT_REPO = "https://github.com/yt-project/yt"
diff --git a/setup.cfg b/setup.cfg
index f4cdfa99bdc..706af9c14df 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,7 +8,7 @@
# unused import errors
# autogenerated __config__.py files
# vendored libraries
-exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
+exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py,yt/units/*
max-line-length=999
ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E722,E741,E731,W291,W292,W293,W391,W503,W504,W605
jobs=8
diff --git a/setup.py b/setup.py
index b18c07ee9a9..0c3f8fea9d0 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,6 @@
import os
+import platform
+from concurrent.futures import ThreadPoolExecutor as Pool
import glob
import sys
from sys import platform as _platform
@@ -12,11 +14,40 @@
read_embree_location, \
in_conda_env
from distutils.version import LooseVersion
+from distutils.ccompiler import CCompiler
import pkg_resources
-if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 5):
- print("yt currently supports Python 2.7 or versions newer than Python 3.5")
+def _get_cpu_count():
+ if platform.system() != "Windows":
+ return os.cpu_count()
+ return 0
+
+
+def _compile(
+ self, sources, output_dir=None, macros=None, include_dirs=None,
+ debug=0, extra_preargs=None, extra_postargs=None, depends=None,
+):
+ """Function to monkey-patch distutils.ccompiler.CCompiler"""
+ macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
+ output_dir, macros, include_dirs, sources, depends, extra_postargs
+ )
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+CCompiler.compile = _compile
+
+if sys.version_info < (3, 5):
+ print("yt currently supports versions newer than Python 3.5")
print("certain features may fail unexpectedly and silently with older "
"versions.")
sys.exit(1)
@@ -34,7 +65,7 @@
except pkg_resources.DistributionNotFound:
pass # yay!
-VERSION = "3.7.dev0"
+VERSION = "4.0.dev0"
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
@@ -53,12 +84,6 @@
std_libs = ["m"]
cython_extensions = [
- Extension("yt.analysis_modules.photon_simulator.utils",
- ["yt/analysis_modules/photon_simulator/utils.pyx"],
- include_dirs=["yt/utilities/lib"]),
- Extension("yt.analysis_modules.ppv_cube.ppv_utils",
- ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
- libraries=std_libs),
Extension("yt.geometry.grid_visitors",
["yt/geometry/grid_visitors.pyx"],
include_dirs=["yt/utilities/lib"],
@@ -78,8 +103,11 @@
libraries=std_libs),
Extension("yt.geometry.particle_oct_container",
["yt/geometry/particle_oct_container.pyx"],
- include_dirs=["yt/utilities/lib/"],
- libraries=std_libs),
+ include_dirs=["yt/utilities/lib/",
+ "yt/utilities/lib/ewahboolarray"],
+ language="c++",
+ libraries=std_libs,
+ extra_compile_args=["-std=c++11"]),
Extension("yt.geometry.selection_routines",
["yt/geometry/selection_routines.pyx"],
include_dirs=["yt/utilities/lib/"],
@@ -114,6 +142,28 @@
include_dirs=["yt/utilities/lib/",
"yt/geometry/"],
libraries=std_libs),
+ Extension("yt.utilities.lib.cykdtree.kdtree",
+ [
+ "yt/utilities/lib/cykdtree/kdtree.pyx",
+ "yt/utilities/lib/cykdtree/c_kdtree.cpp",
+ "yt/utilities/lib/cykdtree/c_utils.cpp",
+ ],
+ depends=[
+ "yt/utilities/lib/cykdtree/c_kdtree.hpp",
+ "yt/utilities/lib/cykdtree/c_utils.hpp",
+ ],
+ libraries=std_libs,
+ language="c++",
+ extra_compile_args=["-std=c++03"]),
+ Extension("yt.utilities.lib.cykdtree.utils",
+ [
+ "yt/utilities/lib/cykdtree/utils.pyx",
+ "yt/utilities/lib/cykdtree/c_utils.cpp",
+ ],
+ depends=["yt/utilities/lib/cykdtree/c_utils.hpp"],
+ libraries=std_libs,
+ language="c++",
+ extra_compile_args=["-std=c++03"]),
Extension("yt.utilities.lib.fnv_hash",
["yt/utilities/lib/fnv_hash.pyx"],
include_dirs=["yt/utilities/lib/"],
@@ -132,12 +182,26 @@
Extension("yt.utilities.lib.mesh_triangulation",
["yt/utilities/lib/mesh_triangulation.pyx"],
depends=["yt/utilities/lib/mesh_triangulation.h"]),
+ Extension("yt.utilities.lib.particle_kdtree_tools",
+ ["yt/utilities/lib/particle_kdtree_tools.pyx"],
+ language="c++"),
+ Extension("yt.utilities.lib.bounded_priority_queue",
+ ["yt/utilities/lib/bounded_priority_queue.pyx"]),
Extension("yt.utilities.lib.pixelization_routines",
["yt/utilities/lib/pixelization_routines.pyx",
"yt/utilities/lib/pixelization_constants.c"],
include_dirs=["yt/utilities/lib/"],
+ extra_compile_args=omp_args,
+ extra_link_args=omp_args,
+ language='c++',
libraries=std_libs,
depends=["yt/utilities/lib/pixelization_constants.h"]),
+ Extension("yt.utilities.lib.cyoctree",
+ ["yt/utilities/lib/cyoctree.pyx"],
+ extra_compile_args=omp_args,
+ extra_link_args=omp_args,
+ libraries=std_libs,
+ language='c++'),
Extension("yt.utilities.lib.primitives",
["yt/utilities/lib/primitives.pyx"],
libraries=std_libs),
@@ -154,6 +218,11 @@
include_dirs=["yt/utilities/lib/"],
libraries=std_libs,
depends=["yt/utilities/lib/fixed_interpolator.h"]),
+ Extension("yt.utilities.lib.ewah_bool_wrap",
+ ["yt/utilities/lib/ewah_bool_wrap.pyx"],
+ include_dirs=["yt/utilities/lib/",
+ "yt/utilities/lib/ewahboolarray"],
+ language="c++"),
Extension("yt.utilities.lib.image_samplers",
["yt/utilities/lib/image_samplers.pyx",
"yt/utilities/lib/fixed_interpolator.c"],
@@ -192,7 +261,7 @@
"particle_mesh_operations", "depth_first_octree", "fortran_reader",
"interpolators", "basic_octree", "image_utilities",
"points_in_volume", "quad_tree", "mesh_utilities",
- "amr_kdtools", "lenses", "distance_queue", "allocation_container"
+ "amr_kdtools", "lenses", "distance_queue", "allocation_container",
]
for ext_name in lib_exts:
cython_extensions.append(
@@ -207,12 +276,6 @@
["yt/utilities/lib/{}.pyx".format(ext_name)]))
extensions = [
- Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF",
- ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c",
- "yt/analysis_modules/halo_finding/fof/kd.c"],
- libraries=std_libs),
- Extension("yt.analysis_modules.halo_finding.hop.EnzoHop",
- sorted(glob.glob("yt/analysis_modules/halo_finding/hop/*.c"))),
Extension("yt.frontends.artio._artio_caller",
["yt/frontends/artio/_artio_caller.pyx"] +
sorted(glob.glob("yt/frontends/artio/artio_headers/*.c")),
@@ -258,44 +321,6 @@
cython_extensions += embree_extensions
-# ROCKSTAR
-if os.path.exists("rockstar.cfg"):
- try:
- rd = open("rockstar.cfg").read().strip()
- except IOError:
- print("Reading Rockstar location from rockstar.cfg failed.")
- print("Please place the base directory of your")
- print("Rockstar install in rockstar.cfg and restart.")
- print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )")
- sys.exit(1)
-
- rockstar_extdir = "yt/analysis_modules/halo_finding/rockstar"
- rockstar_extensions = [
- Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_interface",
- sources=[os.path.join(rockstar_extdir, "rockstar_interface.pyx")]),
- Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_groupies",
- sources=[os.path.join(rockstar_extdir, "rockstar_groupies.pyx")])
- ]
- for ext in rockstar_extensions:
- ext.library_dirs.append(rd)
- ext.libraries.append("rockstar")
- ext.define_macros.append(("THREADSAFE", ""))
- ext.include_dirs += [rd,
- os.path.join(rd, "io"), os.path.join(rd, "util")]
- extensions += rockstar_extensions
-
-if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
- gpd = os.environ["GPERFTOOLS"]
- idir = os.path.join(gpd, "include")
- ldir = os.path.join(gpd, "lib")
- print(("INCLUDE AND LIB DIRS", idir, ldir))
- cython_extensions.append(
- Extension("yt.utilities.lib.perftools_wrap",
- ["yt/utilities/lib/perftools_wrap.pyx"],
- libraries=["profiler"],
- library_dirs=[ldir],
- include_dirs=[idir]))
-
class build_ext(_build_ext):
# subclass setuptools extension builder to avoid importing cython and numpy
# at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
@@ -308,22 +333,24 @@ def finalize_options(self):
"""Could not import cython or numpy. Building yt from source requires
cython and numpy to be installed. Please install these packages using
the appropriate package manager for your python environment.""")
- if LooseVersion(cython.__version__) < LooseVersion('0.24'):
+ if LooseVersion(cython.__version__) < LooseVersion('0.26.1'):
raise RuntimeError(
-"""Building yt from source requires Cython 0.24 or newer but
+"""Building yt from source requires Cython 0.26.1 or newer but
Cython %s is installed. Please update Cython using the appropriate
package manager for your python environment.""" %
cython.__version__)
- if LooseVersion(numpy.__version__) < LooseVersion('1.10.4'):
+ if LooseVersion(numpy.__version__) < LooseVersion('1.13.3'):
raise RuntimeError(
-"""Building yt from source requires NumPy 1.10.4 or newer but
+"""Building yt from source requires NumPy 1.13.3 or newer but
NumPy %s is installed. Please update NumPy using the appropriate
package manager for your python environment.""" %
numpy.__version__)
from Cython.Build import cythonize
self.distribution.ext_modules[:] = cythonize(
self.distribution.ext_modules,
- compiler_directives={'language_level': 2})
+ compiler_directives={'language_level': 2},
+ nthreads=_get_cpu_count(),
+ )
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process
# see http://stackoverflow.com/a/21621493/1382869
@@ -333,8 +360,20 @@ def finalize_options(self):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
+ import numpy
self.include_dirs.append(numpy.get_include())
+ def build_extensions(self):
+ self.check_extensions_list(self.extensions)
+
+ ncpus = _get_cpu_count()
+ if ncpus > 0:
+ with Pool(ncpus) as pool:
+ pool.map(self.build_extension, self.extensions)
+ else:
+ super().build_extensions()
+
+
class sdist(_sdist):
# subclass setuptools source distribution builder to ensure cython
# generated C files are included in source distribution.
@@ -345,69 +384,70 @@ def run(self):
cythonize(
cython_extensions,
compiler_directives={'language_level': 2},
+ nthreads=_get_cpu_count(),
)
_sdist.run(self)
-setup(
- name="yt",
- version=VERSION,
- description="An analysis and visualization toolkit for volumetric data",
- long_description = long_description,
- long_description_content_type='text/markdown',
- classifiers=["Development Status :: 5 - Production/Stable",
- "Environment :: Console",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: BSD License",
- "Operating System :: MacOS :: MacOS X",
- "Operating System :: POSIX :: AIX",
- "Operating System :: POSIX :: Linux",
- "Programming Language :: C",
- "Programming Language :: Python :: 2",
- "Programming Language :: Python :: 2.7",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Topic :: Scientific/Engineering :: Astronomy",
- "Topic :: Scientific/Engineering :: Physics",
- "Topic :: Scientific/Engineering :: Visualization",
- "Framework :: Matplotlib"],
- keywords='astronomy astrophysics visualization ' +
- 'amr adaptivemeshrefinement',
- entry_points={'console_scripts': [
- 'yt = yt.utilities.command_line:run_main',
- ],
- 'nose.plugins.0.10': [
- 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
- ]
- },
- packages=find_packages(),
- include_package_data = True,
- install_requires=[
- 'matplotlib>=1.5.3',
- 'setuptools>=19.6',
- 'sympy>=1.0',
- 'numpy>=1.10.4',
- 'IPython>=1.0',
- ],
- extras_require = {
- 'hub': ["girder_client"],
- 'mapserver': ["bottle"]
- },
- cmdclass={'sdist': sdist, 'build_ext': build_ext},
- author="The yt project",
- author_email="yt-dev@python.org",
- url="https://github.com/yt-project/yt",
- project_urls={
- 'Homepage': 'https://yt-project.org/',
- 'Documentation': 'https://yt-project.org/doc/',
- 'Source': 'https://github.com/yt-project/yt/',
- 'Tracker': 'https://github.com/yt-project/yt/issues'
- },
- license="BSD 3-Clause",
- zip_safe=False,
- scripts=["scripts/iyt"],
- ext_modules=cython_extensions + extensions,
- python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
-)
+if __name__ == "__main__":
+ setup(
+ name="yt",
+ version=VERSION,
+ description="An analysis and visualization toolkit for volumetric data",
+ long_description = long_description,
+ long_description_content_type='text/markdown',
+ classifiers=["Development Status :: 5 - Production/Stable",
+ "Environment :: Console",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: BSD License",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: POSIX :: AIX",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: C",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Topic :: Scientific/Engineering :: Astronomy",
+ "Topic :: Scientific/Engineering :: Physics",
+ "Topic :: Scientific/Engineering :: Visualization",
+ "Framework :: Matplotlib"],
+ keywords='astronomy astrophysics visualization ' +
+ 'amr adaptivemeshrefinement',
+ entry_points={'console_scripts': [
+ 'yt = yt.utilities.command_line:run_main',
+ ],
+ 'nose.plugins.0.10': [
+ 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+ ]
+ },
+ packages=find_packages(),
+ include_package_data = True,
+ install_requires=[
+ 'matplotlib>=1.5.3',
+ 'setuptools>=19.6',
+ 'sympy>=1.2',
+ 'numpy>=1.10.4',
+ 'IPython>=1.0',
+ 'unyt>=2.2.2',
+ ],
+ extras_require = {
+ 'hub': ["girder_client"],
+ 'mapserver': ["bottle"]
+ },
+ cmdclass={'sdist': sdist, 'build_ext': build_ext},
+ author="The yt project",
+ author_email="yt-dev@python.org",
+ url="https://github.com/yt-project/yt",
+ project_urls={
+ 'Homepage': 'https://yt-project.org/',
+ 'Documentation': 'https://yt-project.org/doc/',
+ 'Source': 'https://github.com/yt-project/yt/',
+ 'Tracker': 'https://github.com/yt-project/yt/issues'
+ },
+ license="BSD 3-Clause",
+ zip_safe=False,
+ scripts=["scripts/iyt"],
+ ext_modules=cython_extensions + extensions,
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*'
+ )
diff --git a/tests/nose_runner.py b/tests/nose_runner.py
index 54501aa667d..0ec1e0d52f2 100644
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -9,7 +9,7 @@
concurrency="multiprocessing")
cov.start()
-from yt.extern.six import StringIO
+from io import StringIO
from yt.config import ytcfg
from yt.utilities.answer_testing.framework import AnswerTesting
import numpy
diff --git a/tests/report_failed_answers.py b/tests/report_failed_answers.py
index 280fcb89b08..59ceab60bb3 100644
--- a/tests/report_failed_answers.py
+++ b/tests/report_failed_answers.py
@@ -4,13 +4,6 @@
"""
-#-----------------------------------------------------------------------------
-# Copyright (c) 2018, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
from __future__ import print_function
diff --git a/tests/test_install_script.py b/tests/test_install_script.py
index 3eb04bc176d..a1cb4cc77c3 100644
--- a/tests/test_install_script.py
+++ b/tests/test_install_script.py
@@ -24,22 +24,19 @@
OPTIONAL_DEPS = [
'embree',
'pyx',
- 'rockstar',
'scipy',
'astropy',
'cartopy',
+ 'pooch',
]
# dependencies that are only installable when yt is built from source
YT_SOURCE_ONLY_DEPS = [
'embree',
- 'rockstar'
]
DEPENDENCY_IMPORT_TESTS = {
'embree': "from yt.utilities.lib import mesh_traversal",
- 'rockstar': ("from yt.analysis_modules.halo_finding.rockstar "
- "import rockstar_interface")
}
diff --git a/tests/test_minimal_requirements.txt b/tests/test_minimal_requirements.txt
index 72481ed2cd7..a8243558a1b 100644
--- a/tests/test_minimal_requirements.txt
+++ b/tests/test_minimal_requirements.txt
@@ -1,6 +1,6 @@
ipython==1.0.0
matplotlib==1.5.3
-sympy==1.0
+sympy==1.2
nose==1.3.7
nose-timer==0.7.3
pytest~=5.2; python_version >= '3.0'
@@ -8,4 +8,4 @@ pytest~=4.6; python_version < '3.0'
pyyaml>=4.2b1
coverage==4.5.1
codecov==2.0.15
-mock==2.0.0; python_version < '3.0'
+git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt
diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt
index d1e8499cb81..3e4c21dccba 100644
--- a/tests/test_requirements.txt
+++ b/tests/test_requirements.txt
@@ -1,34 +1,30 @@
-astropy==3.0.5; python_version >= '3.0'
-astropy==2.0.9; python_version < '3.0'
+astropy==3.0.5
codecov==2.0.15
coverage==4.5.1
fastcache==1.0.2
-glueviz==0.13.3; python_version >= '3.0'
-h5py==2.8.0
-ipython==7.1.1; python_version >= '3.0'
-ipython==5.8.0; python_version < '3.0'
-matplotlib==3.1.3; python_version >= '3.0'
-matplotlib==2.2.3; python_version < '3.0'
+glueviz==0.13.3
+h5py==2.10.0
+ipython==7.1.1
+matplotlib==3.1.3
mock==2.0.0; python_version < '3.0'
nose-timer==0.7.3
nose==1.3.7
pandas==0.23.4
-pytest~=5.2; python_version >= '3.0'
-pytest~=4.6; python_version < '3.0'
+pytest~=5.2
requests==2.20.0
-scipy==1.1.0; python_version < '3.0'
-scipy==1.3.3; python_version >= '3.0'
+scipy==1.3.3
sympy==1.5
-pyqt5==5.11.3; python_version >= '3.0'
-thingking==1.0.2; python_version < '3.0'
+pyqt5==5.11.3
pint==0.8.1
-netCDF4==1.4.2; python_version < '3.0'
-netCDF4==1.5.3; python_version >= '3.0'
+netCDF4==1.5.3
libconf==1.0.1
cartopy==0.17.0
pyaml==17.10.0
mpi4py==3.0.3
+git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt
pyyaml>=4.2b1
-xarray==0.12.3 ; python_version >= '3.0'
+xarray==0.12.3
firefly_api>=0.0.2
f90nml>=1.1.2
+MiniballCpp>=0.2.1
+pooch>=0.7.0
diff --git a/tests/tests.yaml b/tests/tests.yaml
index cd60e3f7b13..736e63d6176 100644
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -3,55 +3,55 @@ answer_tests:
local_amrvac_004:
- yt/frontends/amrvac/tests/test_outputs.py
- local_artio_002:
+ local_arepo_005:
+ - yt/frontends/arepo/tests/test_outputs.py
+
+ local_artio_003:
- yt/frontends/artio/tests/test_outputs.py
- local_athena_006:
+ local_athena_007:
- yt/frontends/athena
- local_athena_pp_002:
+ local_athena_pp_003:
- yt/frontends/athena_pp
- local_chombo_003:
+ local_chombo_004:
- yt/frontends/chombo/tests/test_outputs.py
local_enzo_006:
- yt/frontends/enzo
- local_enzo_p_006:
+ local_enzo_p_007:
- yt/frontends/enzo_p/tests/test_outputs.py
local_fits_003:
- yt/frontends/fits/tests/test_outputs.py
- local_flash_010:
+ local_flash_012:
- yt/frontends/flash/tests/test_outputs.py
- local_gadget_002:
+ local_gadget_003:
- yt/frontends/gadget/tests/test_outputs.py
- local_gamer_006:
+ local_gamer_007:
- yt/frontends/gamer/tests/test_outputs.py
local_gdf_001:
- yt/frontends/gdf/tests/test_outputs.py
- local_gizmo_003:
+ local_gizmo_004:
- yt/frontends/gizmo/tests/test_outputs.py
local_halos_009:
- - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
- - yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py
- - yt/analysis_modules/halo_finding/tests/test_rockstar.py
- yt/frontends/ahf/tests/test_outputs.py
- - yt/frontends/owls_subfind/tests/test_outputs.py
+ # - yt/frontends/owls_subfind/tests/test_outputs.py
- yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
- yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
- local_owls_003:
+ local_owls_004:
- yt/frontends/owls/tests/test_outputs.py
- local_pw_028:
+ local_pw_029:
- yt/visualization/tests/test_plotwindow.py:test_attributes
- yt/visualization/tests/test_plotwindow.py:test_attributes_wt
- yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
@@ -59,18 +59,13 @@ answer_tests:
- yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
- yt/visualization/tests/test_raw_field_slices.py:test_raw_field_slices
- local_tipsy_003:
+ local_tipsy_005:
- yt/frontends/tipsy/tests/test_outputs.py
- local_varia_011:
- - yt/analysis_modules/radmc3d_export
+ local_varia_015:
- yt/frontends/moab/tests/test_c5.py
- yt/fields/tests/test_xray_fields.py
- local_photon_002:
- - yt/analysis_modules/photon_simulator/tests/test_spectra.py
- - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-
local_unstructured_011:
- yt/visualization/volume_rendering/tests/test_mesh_render.py:test_composite_mesh_render
- yt/visualization/volume_rendering/tests/test_mesh_render.py:test_composite_mesh_render_pyembree
@@ -89,7 +84,7 @@ answer_tests:
- yt/visualization/volume_rendering/tests/test_mesh_render.py:test_wedge6_render
- yt/visualization/volume_rendering/tests/test_mesh_render.py:test_wedge6_render_pyembree
- local_boxlib_009:
+ local_boxlib_010:
- yt/frontends/boxlib/tests/test_outputs.py:test_radadvect
- yt/frontends/boxlib/tests/test_outputs.py:test_radtube
- yt/frontends/boxlib/tests/test_outputs.py:test_star
@@ -113,29 +108,18 @@ answer_tests:
local_ramses_002:
- yt/frontends/ramses/tests/test_outputs.py
- local_ytdata_006:
+ local_ytdata_007:
- yt/frontends/ytdata/tests/test_outputs.py
- yt/frontends/ytdata/tests/test_old_outputs.py
- local_absorption_spectrum_007:
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
- - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
-
local_axialpix_006:
- yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
local_cylindrical_background_002:
- yt/geometry/coordinates/tests/test_cylindrical_coordinates.py:test_noise_plots
- local_particle_trajectory_001:
- - yt/data_objects/tests/test_particle_trajectories.py
-
- local_light_cone_002:
- - yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py
+ #local_particle_trajectory_001:
+ # - yt/data_objects/tests/test_particle_trajectories.py
other_tests:
unittests:
diff --git a/yt/__init__.py b/yt/__init__.py
index 97da62d74fe..da7526706af 100644
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -7,16 +7,11 @@
* Contribute: https://github.com/yt-project/yt
"""
+import sys
+if sys.version_info[0] < 3:
+ raise Exception("Python 2 no longer supported. Please install Python 3 for use with yt.")
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-__version__ = "3.7.dev0"
+__version__ = "4.0.dev0"
# First module imports
import numpy as np # For modern purposes
@@ -44,7 +39,7 @@
import yt.utilities.physical_constants as physical_constants
import yt.units as units
from yt.units.unit_object import define_unit
-from yt.units.yt_array import \
+from yt.units import \
YTArray, \
YTQuantity, \
uconcatenate, \
@@ -126,16 +121,15 @@
from yt.convenience import \
load, simulation
+from yt.utilities.load_sample import load_sample
+
from yt.testing import run_nose
# Import some helpful math utilities
from yt.utilities.math_utils import \
ortho_find, quartiles, periodic_position
-from yt.units.unit_systems import UnitSystem
-from yt.units.unit_object import unit_system_registry
-
-from yt.analysis_modules.list_modules import \
- amods
+from yt.units.unit_systems import \
+ UnitSystem, unit_system_registry
_called_from_pytest = False
diff --git a/yt/analysis_modules/absorption_spectrum/__init__.py b/yt/analysis_modules/absorption_spectrum/__init__.py
index 18ea5c8cef3..e69de29bb2d 100644
--- a/yt/analysis_modules/absorption_spectrum/__init__.py
+++ b/yt/analysis_modules/absorption_spectrum/__init__.py
@@ -1,14 +0,0 @@
-"""
-Import stuff for light cone generator.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
diff --git a/yt/analysis_modules/absorption_spectrum/absorption_line.py b/yt/analysis_modules/absorption_spectrum/absorption_line.py
deleted file mode 100644
index aad9ae8d9b4..00000000000
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""
-Absorption line generating functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.utilities.physical_constants import \
- charge_proton_cgs, \
- mass_electron_cgs, \
- speed_of_light_cgs
-from yt.utilities.on_demand_imports import _scipy, NotAModule
-
-special = _scipy.special
-tau_factor = None
-_cs = None
-
-
-def voigt_scipy(a, u):
- x = np.asarray(u).astype(np.float64)
- y = np.asarray(a).astype(np.float64)
- return special.wofz(x + 1j * y).real
-
-
-def voigt_old(a, u):
- """
- NAME:
- VOIGT
- PURPOSE:
- Implementation of Voigt function
- CATEGORY:
- Math
- CALLING SEQUENCE:
- voigt=Voigt(a,u)
- INPUTS:
- A = Voigt "A" parameter.
- U = Frequency in units of the Doppler frequency.
-
- The line profile "Phi(v)", the doppler width
- "Delv", the voigt parameter "a", and the frequency "u"
- are given by:
-
- Phi(v) = Voigt(a,u)/[ Delv * sqrt(pi) ]
- Delv = Vo/c * sqrt[ 2kT/m ]
- u = V - Vo / Delv
- a = GAMMA / [ Delv * 4pi ]
- Gamma = Gu + Gl + 2*Vcol
- "Gu" and "Gl" are the widths of the upper and lower states
- "Vcol" is the collisions per unit time
- "Vo" is the line center frequency
-
- OUTPUTS:
- An array of the same type as u
- RESTRICTIONS:
- U must be an array, a should not be. Also this procedure is only
- valid for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should
- be most astrophysical conditions (see the article below for further
- comments
- PROCEDURE:
- Follows procedure in Armstrong JQSRT 7, 85 (1967)
- also the same as the intrinsic in the previous version of IDL
- MODIFICATION HISTORY:
- J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
- Sep 1990 (better overflow checking)
- """
- x = np.asarray(u).astype(np.float64)
- y = np.asarray(a).astype(np.float64)
-
- # Hummer's Chebyshev Coefficients
- c = (0.1999999999972224, -0.1840000000029998, 0.1558399999965025,
- -0.1216640000043988, 0.0877081599940391, -0.0585141248086907,
- 0.0362157301623914, -0.0208497654398036, 0.0111960116346270,
- -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2,
- 0.4899519978088e-3, -0.1933630801528e-3, 0.722877446788e-4,
- -0.256555124979e-4, 0.86620736841e-5, -0.27876379719e-5,
- 0.8566873627e-6, -0.2518433784e-6, 0.709360221e-7,
- -0.191732257e-7, 0.49801256e-8, -0.12447734e-8,
- 0.2997777e-9, -0.696450e-10, 0.156262e-10,
- -0.33897e-11, 0.7116e-12, -0.1447e-12,
- 0.285e-13, -0.55e-14, 0.10e-14,
- -0.2e-15)
-
- y2 = y * y
-
- # limits are y<1., x<4 or y<1.8(x+1), x>4 (no checking performed)
- u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
-
- # Clenshaw's Algorithm
- bno1 = np.zeros(x.shape)
- bno2 = np.zeros(x.shape)
- x1 = np.clip((x / 5.), -np.inf, 1.)
- coef = 4. * x1 * x1 - 2.
- for i in range(33, -1, -1):
- bn = coef * bno1 - bno2 + c[i]
- bno2 = np.copy(bno1)
- bno1 = np.copy(bn)
-
- f = x1 * (bn - bno2)
- dno1 = 1. - 2. * x * f
- dno2 = f
-
- q = np.abs(x) > 5
- if q.any():
- x14 = np.power(np.clip(x[q], -np.inf, 500.), 14)
- x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
- x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
- x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8)
- x6 = np.power(np.clip(x[q], -np.inf, 1.e6), 6)
- x4 = np.power(np.clip(x[q], -np.inf, 1.e9), 4)
- x2 = np.power(np.clip(x[q], -np.inf, 1.e18), 2)
- dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 +
- 6.5625 / x8 + 29.53125 / x10 +
- 162.4218 / x12 + 1055.7421 / x14)
- dno2[q] = (1. - dno1[q]) / (2. * x[q])
-
- funct = y * dno1
- if (y > 1.e-8).any():
- q = 1.0
- yn = y
- for i in range(2, 51):
- dn = (x * dno1 + dno2) * (-2. / i)
- dno2 = dno1
- dno1 = dn
- if (i % 2) == 1:
- q = -q
- yn = yn * y2
- g = dn.astype(np.float64) * yn
- funct = funct + q * g
- if np.max(np.abs(g / funct)) <= 1.e-8:
- break
-
- k1 = u1 - 1.12837917 * funct
- k1 = k1.astype(np.float64).clip(0)
- return k1
-
-
-def tau_profile(lambda_0, f_value, gamma, v_doppler, column_density,
- delta_v=None, delta_lambda=None,
- lambda_bins=None, n_lambda=12000, dlambda=0.01):
- r"""
- Create an optical depth vs. wavelength profile for an
- absorption line using a voigt profile.
-
- Parameters
- ----------
-
- lambda_0 : float in angstroms
- central wavelength.
- f_value : float
- absorption line f-value.
- gamma : float
- absorption line gamma value.
- v_doppler : float in cm/s
- doppler b-parameter.
- column_density : float in cm^-2
- column density.
- delta_v : float in cm/s
- velocity offset from lambda_0.
- Default: None (no shift).
- delta_lambda : float in angstroms
- wavelength offset.
- Default: None (no shift).
- lambda_bins : array in angstroms
- wavelength array for line deposition. If None, one will be
- created using n_lambda and dlambda.
- Default: None.
- n_lambda : int
- size of lambda bins to create if lambda_bins is None.
- Default: 12000.
- dlambda : float in angstroms
- lambda bin width in angstroms if lambda_bins is None.
- Default: 0.01.
-
- """
- global tau_factor
- if tau_factor is None:
- tau_factor = (
- np.sqrt(np.pi) * charge_proton_cgs ** 2 /
- (mass_electron_cgs * speed_of_light_cgs)
- ).in_cgs().d
-
- global _cs
- if _cs is None:
- _cs = speed_of_light_cgs.d[()]
-
- # shift lambda_0 by delta_v
- if delta_v is not None:
- lam1 = lambda_0 * (1 + delta_v / _cs)
- elif delta_lambda is not None:
- lam1 = lambda_0 + delta_lambda
- else:
- lam1 = lambda_0
-
- # conversions
- nudop = 1e8 * v_doppler / lam1 # doppler width in Hz
-
- # create wavelength
- if lambda_bins is None:
- lambda_bins = lam1 + \
- np.arange(n_lambda, dtype=np.float) * dlambda - \
- n_lambda * dlambda / 2 # wavelength vector (angstroms)
-
- # tau_0
- tau_X = tau_factor * column_density * f_value / v_doppler
- tau0 = tau_X * lambda_0 * 1e-8
-
- # dimensionless frequency offset in units of doppler freq
- x = _cs / v_doppler * (lam1 / lambda_bins - 1.0)
- a = gamma / (4.0 * np.pi * nudop) # damping parameter
- phi = voigt(a, x) # line profile
- tauphi = tau0 * phi # profile scaled with tau0
-
- return (lambda_bins, tauphi)
-
-if isinstance(special, NotAModule):
- voigt = voigt_old
-else:
- voigt = voigt_scipy
diff --git a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
deleted file mode 100644
index e9b28d85893..00000000000
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ /dev/null
@@ -1,642 +0,0 @@
-"""
-AbsorptionSpectrum class and member functions.
-
-
-
-"""
-
-from __future__ import absolute_import
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-
-from .absorption_line import tau_profile
-
-from yt.extern.six import string_types
-from yt.convenience import load
-from yt.funcs import get_pbar, mylog
-from yt.units.yt_array import YTArray, YTQuantity
-from yt.utilities.physical_constants import \
- boltzmann_constant_cgs, \
- speed_of_light_cgs
-from yt.utilities.on_demand_imports import _astropy
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- _get_comm, \
- parallel_objects, \
- parallel_root_only
-
-pyfits = _astropy.pyfits
-
-class AbsorptionSpectrum(object):
- r"""Create an absorption spectrum object.
-
- Parameters
- ----------
-
- lambda_min : float
- lower wavelength bound in angstroms.
- lambda_max : float
- upper wavelength bound in angstroms.
- n_lambda : int
- number of wavelength bins.
- """
-
- def __init__(self, lambda_min, lambda_max, n_lambda):
- self.n_lambda = int(n_lambda)
- # lambda, flux, and tau are wavelength, flux, and optical depth
- self.lambda_min = lambda_min
- self.lambda_max = lambda_max
- self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max,
- n_lambda), "angstrom")
- self.tau_field = None
- self.flux_field = None
- self.absorbers_list = None
- self.bin_width = YTQuantity((lambda_max - lambda_min) /
- float(n_lambda - 1), "angstrom")
- self.line_list = []
- self.continuum_list = []
-
- def add_line(self, label, field_name, wavelength,
- f_value, gamma, atomic_mass,
- label_threshold=None):
- r"""Add an absorption line to the list of lines included in the spectrum.
-
- Parameters
- ----------
-
- label : string
- label for the line.
- field_name : string
- field name from ray data for column densities.
- wavelength : float
- line rest wavelength in angstroms.
- f_value : float
- line f-value.
- gamma : float
- line gamma value.
- atomic_mass : float
- mass of atom in amu.
- """
- self.line_list.append({'label': label, 'field_name': field_name,
- 'wavelength': YTQuantity(wavelength, "angstrom"),
- 'f_value': f_value,
- 'gamma': gamma,
- 'atomic_mass': YTQuantity(atomic_mass, "amu"),
- 'label_threshold': label_threshold})
-
- def add_continuum(self, label, field_name, wavelength,
- normalization, index):
- """
- Add a continuum feature that follows a power-law.
-
- Parameters
- ----------
-
- label : string
- label for the feature.
- field_name : string
- field name from ray data for column densities.
- wavelength : float
- line rest wavelength in angstroms.
- normalization : float
- the column density normalization.
- index : float
- the power-law index for the wavelength dependence.
- """
-
- self.continuum_list.append({'label': label, 'field_name': field_name,
- 'wavelength': wavelength,
- 'normalization': normalization,
- 'index': index})
-
- def make_spectrum(self, input_file, output_file=None,
- line_list_file=None, output_absorbers_file=None,
- use_peculiar_velocity=True,
- subgrid_resolution=10, observing_redshift=0.,
- njobs="auto"):
- """
- Make spectrum from ray data using the line list.
-
- Parameters
- ----------
-
- input_file : string or dataset
- path to input ray data or a loaded ray dataset
- output_file : optional, string
- Option to save a file containing the wavelength, flux, and optical
- depth fields. File formats are chosen based on the filename
- extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything
- else is ASCII.
- Default: None
- output_absorbers_file : optional, string
- Option to save a text file containing all of the absorbers and
- corresponding wavelength and redshift information.
- For parallel jobs, combining the lines lists can be slow so it
- is recommended to set to None in such circumstances.
- Default: None
- use_peculiar_velocity : optional, bool
- if True, include peculiar velocity for calculating doppler redshift
- to shift lines. Requires similar flag to be set in LightRay
- generation.
- Default: True
- subgrid_resolution : optional, int
- When a line is being added that is unresolved (ie its thermal
- width is less than the spectral bin width), the voigt profile of
- the line is deposited into an array of virtual wavelength bins at
- higher resolution. The optical depth from these virtual bins is
- integrated and then added to the coarser spectral wavelength bin.
- The subgrid_resolution value determines the ratio between the
- thermal width and the bin width of the virtual bins. Increasing
- this value yields smaller virtual bins, which increases accuracy,
- but is more expensive. A value of 10 yields accuracy to the 4th
- significant digit in tau.
- Default: 10
- observing_redshift : optional, float
- This is the redshift at which the observer is observing
- the absorption spectrum.
- Default: 0
- njobs : optional, int or "auto"
- the number of process groups into which the loop over
- absorption lines will be divided. If set to -1, each
- absorption line will be deposited by exactly one processor.
- If njobs is set to a value less than the total number of
- available processors (N), then the deposition of an
- individual line will be parallelized over (N / njobs)
- processors. If set to "auto", it will first try to
- parallelize over the list of lines and only parallelize
- the line deposition if there are more processors than
- lines. This is the optimal strategy for parallelizing
- spectrum generation.
- Default: "auto"
- """
- if line_list_file is not None:
- mylog.info("'line_list_file' keyword is deprecated. Please use " \
- "'output_absorbers_file'.")
- output_absorbers_file = line_list_file
-
- input_fields = ['dl', 'redshift', 'temperature']
- field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
- if use_peculiar_velocity:
- input_fields.append('velocity_los')
- input_fields.append('redshift_eff')
- field_units["velocity_los"] = "cm/s"
- field_units["redshift_eff"] = ""
- if observing_redshift != 0.:
- input_fields.append('redshift_dopp')
- field_units["redshift_dopp"] = ""
- for feature in self.line_list + self.continuum_list:
- if not feature['field_name'] in input_fields:
- input_fields.append(feature['field_name'])
- field_units[feature["field_name"]] = "cm**-3"
-
- if isinstance(input_file, string_types):
- input_ds = load(input_file)
- else:
- input_ds = input_file
- field_data = input_ds.all_data()
-
- # temperature field required to calculate voigt profile widths
- if ('temperature' not in input_ds.derived_field_list) and \
- (('gas', 'temperature') not in input_ds.derived_field_list):
- raise RuntimeError(
- "('gas', 'temperature') field required to be present in %s "
- "for AbsorptionSpectrum to function." % input_file)
-
- self.tau_field = np.zeros(self.lambda_field.size)
- self.absorbers_list = []
-
- if njobs == "auto":
- comm = _get_comm(())
- njobs = min(comm.size, len(self.line_list))
-
- mylog.info("Creating spectrum")
- self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
- output_absorbers_file,
- subgrid_resolution=subgrid_resolution,
- observing_redshift=observing_redshift,
- njobs=njobs)
- self._add_continua_to_spectrum(field_data, use_peculiar_velocity,
- observing_redshift=observing_redshift)
-
- self.flux_field = np.exp(-self.tau_field)
-
- if output_file is None:
- pass
- elif output_file.endswith('.h5'):
- self._write_spectrum_hdf5(output_file)
- elif output_file.endswith('.fits'):
- self._write_spectrum_fits(output_file)
- else:
- self._write_spectrum_ascii(output_file)
- if output_absorbers_file is not None:
- self._write_absorbers_file(output_absorbers_file)
-
- del field_data
- return (self.lambda_field, self.flux_field)
-
- def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
- observing_redshift):
- """
- Change the redshifts of individual absorbers to account for the
- redshift at which the observer sits.
-
- The intermediate redshift that is seen by an observer
- at a redshift other than z=0 is z12, where z1 is the
- observing redshift and z2 is the emitted photon's redshift
- Hogg (2000) eq. 13:
-
- 1 + z12 = (1 + z2) / (1 + z1)
- """
- if observing_redshift == 0.:
- # This is already assumed in the generation of the LightRay
- redshift = field_data['redshift']
- if use_peculiar_velocity:
- redshift_eff = field_data['redshift_eff']
- else:
- # The intermediate redshift that is seen by an observer
- # at a redshift other than z=0 is z12, where z1 is the
- # observing redshift and z2 is the emitted photon's redshift
- # Hogg (2000) eq. 13:
- # 1 + z12 = (1 + z2) / (1 + z1)
- redshift = ((1 + field_data['redshift']) / \
- (1 + observing_redshift)) - 1.
- # Combining cosmological redshift and doppler redshift
- # into an effective redshift is found in Peacock's
- # Cosmological Physics eqn 3.75:
- # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
- if use_peculiar_velocity:
- redshift_eff = ((1 + redshift) * \
- (1 + field_data['redshift_dopp'])) - 1.
-
- if not use_peculiar_velocity:
- redshift_eff = redshift
-
- return redshift, redshift_eff
-
- def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
- observing_redshift=0.):
- """
- Add continuum features to the spectrum. Continuua are recorded as
- a name, associated field, wavelength, normalization value, and index.
- Continuua are applied at and below the denoted wavelength, where the
- optical depth decreases as a power law of desired index. For positive
- index values, this means optical depth is highest at the denoted
- wavelength, and it drops with shorter and shorter wavelengths.
- Consequently, transmitted flux undergoes a discontinuous cutoff at the
- denoted wavelength, and then slowly increases with decreasing wavelength
- according to the power law.
- """
- # Change the redshifts of continuum sources to account for the
- # redshift at which the observer sits
- redshift, redshift_eff = self._apply_observing_redshift(field_data,
- use_peculiar_velocity, observing_redshift)
-
- # min_tau is the minimum optical depth value that warrants
- # accounting for an absorber. for a single absorber, noticeable
- # continuum effects begin for tau = 1e-3 (leading to transmitted
- # flux of e^-tau ~ 0.999). but we apply a cutoff to remove
- # absorbers with insufficient column_density to contribute
- # significantly to a continuum (see below). because lots of
- # low column density absorbers can add up to a significant
- # continuum effect, we normalize min_tau by the n_absorbers.
- n_absorbers = field_data['dl'].size
- min_tau = 1.e-3/n_absorbers
-
- for continuum in self.continuum_list:
-
- # Normalization is in cm**-2, so column density must be as well
- column_density = (field_data[continuum['field_name']] *
- field_data['dl']).in_units('cm**-2')
- if (column_density == 0).all():
- mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
- continue
-
- # redshift_eff field combines cosmological and velocity redshifts
- if use_peculiar_velocity:
- delta_lambda = continuum['wavelength'] * redshift_eff
- else:
- delta_lambda = continuum['wavelength'] * redshift
-
- # right index of continuum affected area is wavelength itself
- this_wavelength = delta_lambda + continuum['wavelength']
- right_index = np.digitize(this_wavelength,
- self.lambda_field).clip(0, self.n_lambda)
- # left index of continuum affected area wavelength at which
- # optical depth reaches tau_min
- left_index = np.digitize((this_wavelength *
- np.power((min_tau * continuum['normalization'] /
- column_density),
- (1. / continuum['index']))),
- self.lambda_field).clip(0, self.n_lambda)
-
- # Only calculate the effects of continuua where normalized
- # column_density is greater than min_tau
- # because lower column will not have significant contribution
- valid_continuua = np.where(((column_density /
- continuum['normalization']) > min_tau) &
- (right_index - left_index > 1))[0]
- if valid_continuua.size == 0:
- mylog.info("Not adding continuum %s: insufficient column density or out of range" %
- continuum['label'])
- continue
-
- pbar = get_pbar("Adding continuum - %s [%f A]: " % \
- (continuum['label'], continuum['wavelength']),
- valid_continuua.size)
-
- # Tau value is (wavelength / continuum_wavelength)**index /
- # (column_dens / norm)
- # i.e. a power law decreasing as wavelength decreases
-
- # Step through the absorber list and add continuum tau for each to
- # the total optical depth for all wavelengths
- for i, lixel in enumerate(valid_continuua):
- cont_tau = \
- np.power((self.lambda_field[left_index[lixel] :
- right_index[lixel]] /
- this_wavelength[lixel]), \
- continuum['index']) * \
- (column_density[lixel] / continuum['normalization'])
- self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau.d
- pbar.update(i)
- pbar.finish()
-
- def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
- output_absorbers_file, subgrid_resolution=10,
- observing_redshift=0., njobs=-1):
- """
- Add the absorption lines to the spectrum.
- """
-
- # Change the redshifts of individual absorbers to account for the
- # redshift at which the observer sits
- redshift, redshift_eff = self._apply_observing_redshift(field_data,
- use_peculiar_velocity, observing_redshift)
-
- # Widen wavelength window until optical depth falls below this tau
- # value at the ends to assure that the wings of a line have been
- # fully resolved.
- min_tau = 1e-3
-
- # step through each ionic transition (e.g. HI, HII, MgII) specified
- # and deposit the lines into the spectrum
- for line in parallel_objects(self.line_list, njobs=njobs):
- column_density = field_data[line['field_name']] * field_data['dl']
- if (column_density < 0).any():
- mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name'])
- np.clip(column_density, 0, np.inf, out=column_density)
- if (column_density == 0).all():
- mylog.info("Not adding line %s: insufficient column density" % line['label'])
- continue
-
- # redshift_eff field combines cosmological and velocity redshifts
- # so delta_lambda gives the offset in angstroms from the rest frame
- # wavelength to the observed wavelength of the transition
- if use_peculiar_velocity:
- delta_lambda = line['wavelength'] * redshift_eff
- else:
- delta_lambda = line['wavelength'] * redshift
- # lambda_obs is central wavelength of line after redshift
- lambda_obs = line['wavelength'] + delta_lambda
- # the total number of absorbers per transition
- n_absorbers = len(lambda_obs)
-
- # we want to know the bin index in the lambda_field array
- # where each line has its central wavelength after being
- # redshifted. however, because we don't know a priori how wide
- # a line will be (ie DLAs), we have to include bin indices
- # *outside* the spectral range of the AbsorptionSpectrum
- # object. Thus, we find the "equivalent" bin index, which
- # may be <0 or >the size of the array. In the end, we deposit
- # the bins that actually overlap with the AbsorptionSpectrum's
- # range in lambda.
-
- # this equation gives us the "equivalent" bin index for each line
- # if it were placed into the self.lambda_field array
- center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
- / self.bin_width.d
- center_index = np.ceil(center_index).astype('int')
-
- # thermal broadening b parameter
- thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
- field_data['temperature']) /
- line['atomic_mass'])
-
- # the actual thermal width of the lines
- thermal_width = (lambda_obs * thermal_b /
- speed_of_light_cgs).convert_to_units("angstrom")
-
- # Sanitize units for faster runtime of the tau_profile machinery.
- lambda_0 = line['wavelength'].d # line's rest frame; angstroms
- cdens = column_density.in_units("cm**-2").d # cm**-2
- thermb = thermal_b.in_cgs().d # thermal b coefficient; cm / s
- dlambda = delta_lambda.d # lambda offset; angstroms
- if use_peculiar_velocity:
- vlos = field_data['velocity_los'].in_units("km/s").d # km/s
- else:
- vlos = np.zeros(field_data['temperature'].size)
-
- # When we actually deposit the voigt profile, sometimes we will
- # have underresolved lines (ie lines with smaller widths than
- # the spectral bin size). Here, we create virtual wavelength bins
- # small enough in width to well resolve each line, deposit the
- # voigt profile into them, then numerically integrate their tau
- # values and sum them to redeposit them into the actual spectral
- # bins.
-
- # virtual bins (vbins) will be:
- # 1) <= the bin_width; assures at least as good as spectral bins
- # 2) <= 1/10th the thermal width; assures resolving voigt profiles
- # (actually 1/subgrid_resolution value, default is 1/10)
- # 3) a bin width will be divisible by vbin_width times a power of
- # 10; this will assure we don't get spikes in the deposited
- # spectra from uneven numbers of vbins per bin
- resolution = thermal_width / self.bin_width
- n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution /
- resolution) ).clip(0, np.inf) ) ).astype('int')
- vbin_width = self.bin_width.d / n_vbins_per_bin
-
- # a note to the user about which lines components are unresolved
- if (thermal_width < self.bin_width).any():
- mylog.info("%d out of %d line components will be " +
- "deposited as unresolved lines.",
- (thermal_width < self.bin_width).sum(),
- n_absorbers)
-
- # provide a progress bar with information about lines processed
- pbar = get_pbar("Adding line - %s [%f A]: " % \
- (line['label'], line['wavelength']), n_absorbers)
-
- # for a given transition, step through each location in the
- # observed spectrum where it occurs and deposit a voigt profile
- for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
-
- # if there is a ray element with temperature = 0 or column
- # density = 0, skip it
- if (thermal_b[i] == 0.) or (cdens[i] == 0.):
- pbar.update(i)
- continue
-
- # the virtual window into which the line is deposited initially
- # spans a region of 2 coarse spectral bins
- # (one on each side of the center_index) but the window
- # can expand as necessary.
- # it will continue to expand until the tau value in the far
- # edge of the wings is less than the min_tau value or it
- # reaches the edge of the spectrum
- window_width_in_bins = 2
-
- while True:
- left_index = (center_index[i] - window_width_in_bins//2)
- right_index = (center_index[i] + window_width_in_bins//2)
- n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
-
- # the array of virtual bins in lambda space
- vbins = \
- np.linspace(self.lambda_min + self.bin_width.d * left_index,
- self.lambda_min + self.bin_width.d * right_index,
- n_vbins, endpoint=False)
-
- # the virtual bins and their corresponding opacities
- vbins, vtau = \
- tau_profile(
- lambda_0, line['f_value'], line['gamma'],
- thermb[i], cdens[i],
- delta_lambda=dlambda[i], lambda_bins=vbins)
-
- # If tau has not dropped below min tau threshold by the
- # edges (ie the wings), then widen the wavelength
- # window and repeat process.
- if (vtau[0] < min_tau and vtau[-1] < min_tau):
- break
- window_width_in_bins *= 2
-
- # numerically integrate the virtual bins to calculate a
- # virtual equivalent width; then sum the virtual equivalent
- # widths and deposit into each spectral bin
- vEW = vtau * vbin_width[i]
- EW = np.zeros(right_index - left_index)
- EW_indices = np.arange(left_index, right_index)
- for k, val in enumerate(EW_indices):
- EW[k] = vEW[n_vbins_per_bin[i] * k: \
- n_vbins_per_bin[i] * (k + 1)].sum()
- EW = EW/self.bin_width.d
-
- # only deposit EW bins that actually intersect the original
- # spectral wavelength range (i.e. lambda_field)
-
- # if EW bins don't intersect the original spectral range at all
- # then skip the deposition
- if ((left_index >= self.n_lambda) or \
- (right_index < 0)):
- pbar.update(i)
- continue
-
- # otherwise, determine how much of the original spectrum
- # is intersected by the expanded line window to be deposited,
- # and deposit the Equivalent Width data into that intersecting
- # window in the original spectrum's tau
- else:
- intersect_left_index = max(left_index, 0)
- intersect_right_index = min(right_index, self.n_lambda-1)
- self.tau_field[intersect_left_index:intersect_right_index] \
- += EW[(intersect_left_index - left_index): \
- (intersect_right_index - left_index)]
-
-
- # write out absorbers to file if the column density of
- # an absorber is greater than the specified "label_threshold"
- # of that absorption line
- if output_absorbers_file and \
- line['label_threshold'] is not None and \
- cdens[i] >= line['label_threshold']:
-
- if use_peculiar_velocity:
- peculiar_velocity = vlos[i]
- else:
- peculiar_velocity = 0.0
- self.absorbers_list.append({'label': line['label'],
- 'wavelength': (lambda_0 + dlambda[i]),
- 'column_density': column_density[i],
- 'b_thermal': thermal_b[i],
- 'redshift': redshift[i],
- 'redshift_eff': redshift_eff[i],
- 'v_pec': peculiar_velocity})
- pbar.update(i)
- pbar.finish()
-
- del column_density, delta_lambda, lambda_obs, center_index, \
- thermal_b, thermal_width, cdens, thermb, dlambda, \
- vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin
-
- comm = _get_comm(())
- self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
- if output_absorbers_file:
- self.absorbers_list = comm.par_combine_object(
- self.absorbers_list, "cat", datatype="list")
-
- @parallel_root_only
- def _write_absorbers_file(self, filename):
- """
- Write out ASCII list of all substantial absorbers found in spectrum
- """
- if filename is None:
- return
- mylog.info("Writing absorber list: %s.", filename)
- self.absorbers_list.sort(key=lambda obj: obj['wavelength'])
- f = open(filename, 'w')
- f.write('#%-14s %-14s %-12s %-14s %-15s %-9s %-10s\n' %
- ('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z_cosmo', \
- 'z_eff', 'v_pec [km/s]'))
- for line in self.absorbers_list:
- f.write('%-14.6f %-14ls %e %e % e % e % e\n' % (line['wavelength'], \
- line['label'], line['column_density'], line['b_thermal'], \
- line['redshift'], line['redshift_eff'], line['v_pec']))
- f.close()
-
- @parallel_root_only
- def _write_spectrum_ascii(self, filename):
- """
- Write spectrum to an ascii file.
- """
- mylog.info("Writing spectrum to ascii file: %s.", filename)
- f = open(filename, 'w')
- f.write("# wavelength[A] tau flux\n")
- for i in range(self.lambda_field.size):
- f.write("%e %e %e\n" % (self.lambda_field[i],
- self.tau_field[i], self.flux_field[i]))
- f.close()
-
- @parallel_root_only
- def _write_spectrum_fits(self, filename):
- """
- Write spectrum to a fits file.
- """
- mylog.info("Writing spectrum to fits file: %s.", filename)
- col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
- col2 = pyfits.Column(name='tau', format='E', array=self.tau_field)
- col3 = pyfits.Column(name='flux', format='E', array=self.flux_field)
- cols = pyfits.ColDefs([col1, col2, col3])
- tbhdu = pyfits.BinTableHDU.from_columns(cols)
- tbhdu.writeto(filename, overwrite=True)
-
- @parallel_root_only
- def _write_spectrum_hdf5(self, filename):
- """
- Write spectrum to an hdf5 file.
-
- """
- mylog.info("Writing spectrum to hdf5 file: %s.", filename)
- output = h5py.File(filename, mode='w')
- output.create_dataset('wavelength', data=self.lambda_field)
- output.create_dataset('tau', data=self.tau_field)
- output.create_dataset('flux', data=self.flux_field)
- output.close()
diff --git a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
deleted file mode 100644
index ca4aa3c033c..00000000000
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ /dev/null
@@ -1,1018 +0,0 @@
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-
-from yt.analysis_modules.absorption_spectrum.absorption_line import \
- voigt
-from yt.funcs import \
- mylog
-from yt.units.yt_array import \
- YTArray
-from yt.utilities.on_demand_imports import \
- _scipy
-
-optimize = _scipy.optimize
-
-def generate_total_fit(x, fluxData, orderFits, speciesDicts,
- minError=1E-4, complexLim=.995,
- fitLim=.97, minLength=3,
- maxLength=1000, splitLim=.99,
- output_file=None):
-
- """
- This function is designed to fit an absorption spectrum by breaking
- the spectrum up into absorption complexes, and iteratively adding
- and optimizing voigt profiles to each complex.
-
- Parameters
- ----------
- x : (N) ndarray
- 1d array of wavelengths
- fluxData : (N) ndarray
- array of flux corresponding to the wavelengths given
- in x. (needs to be the same size as x)
- orderFits : list
- list of the names of the species in the order that they
- should be fit. Names should correspond to the names of the species
- given in speciesDicts. (ex: ['lya','OVI'])
- speciesDicts : dictionary
- Dictionary of dictionaries (I'm addicted to dictionaries, I
- confess). Top level keys should be the names of all the species given
- in orderFits. The entries should be dictionaries containing all
- relevant parameters needed to create an absorption line of a given
- species (f,Gamma,lambda0) as well as max and min values for parameters
- to be fit
- complexLim : float, optional
- Maximum flux to start the edge of an absorption complex. Different
- from fitLim because it decides extent of a complex rather than
- whether or not a complex is accepted.
- fitLim : float,optional
- Maximum flux where the level of absorption will trigger
- identification of the region as an absorption complex. Default = .98.
- (ex: for a minSize=.98, a region where all the flux is between 1.0 and
- .99 will not be separated out to be fit as an absorbing complex, but
- a region that contains a point where the flux is .97 will be fit
- as an absorbing complex.)
- minLength : int, optional
- number of cells required for a complex to be included.
- default is 3 cells.
- maxLength : int, optional
- number of cells required for a complex to be split up. Default
- is 1000 cells.
- splitLim : float, optional
- if attempting to split a region for being larger than maxlength
- the point of the split must have a flux greater than splitLim
- (ie: absorption greater than splitLim). Default= .99.
- output_file : string, optional
- location to save the results of the fit.
-
- Returns
- -------
- allSpeciesLines : dictionary
- Dictionary of dictionaries representing the fit lines.
- Top level keys are the species given in orderFits and the corresponding
- entries are dictionaries with the keys 'N','b','z', and 'group#'.
- Each of these corresponds to a list of the parameters for every
- accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
- fits some part of the absorption spectrum). 'group#' is a similar list
- but identifies which absorbing complex each line belongs to. Lines
- with the same group# were fit at the same time. group#'s do not
- correlate between species (ie: an lya line with group number 1 and
- an OVI line with group number 1 were not fit together and do
- not necessarily correspond to the same region)
- yFit : (N) ndarray
- array of flux corresponding to the combination of all fitted
- absorption profiles. Same size as x.
- """
-
- # convert to NumPy array if we have a YTArray
- if isinstance(x, YTArray):
- x = x.d
-
- #Empty dictionary for fitted lines
- allSpeciesLines = {}
-
- #Wavelength of beginning of array, wavelength resolution
- x0,xRes=x[0],x[1]-x[0]
-
- #Empty fit without any lines
- yFit = np.ones(len(fluxData))
-
- #Force the first and last flux pixel to be 1 to prevent OOB
- fluxData[0]=1
- fluxData[-1]=1
-
-
- #Find all regions where lines/groups of lines are present
- cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
- complexLim=complexLim, minLength=minLength,
- maxLength=maxLength, splitLim=splitLim)
-
- #Fit all species one at a time in given order from low to high wavelength
- for species in orderFits:
- speciesDict = speciesDicts[species]
- speciesLines = {'N':np.array([]),
- 'b':np.array([]),
- 'z':np.array([]),
- 'group#':np.array([])}
-
- #Set up wavelengths for species
- initWl = speciesDict['wavelength'][0]
-
- for b_i,b in enumerate(cBounds):
- xBounded=x[b[1]:b[2]]
- yDatBounded=fluxData[b[1]:b[2]]
- yFitBounded=yFit[b[1]:b[2]]
-
-
- #Find init redshift
- z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
-
- #Check if any flux at partner sites
- if not _line_exists(speciesDict['wavelength'],
- fluxData,z,x0,xRes,fitLim):
- continue
-
- #Fit Using complex tools
- newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
- z,fitLim,minError,speciesDict)
-
- #If flagged as a bad fit, species is lyman alpha,
- # and it may be a saturated line, use special tools
- if flag and species=='lya' and min(yDatBounded)<.1:
- newLinesP=_large_flag_fit(xBounded,yDatBounded,
- yFitBounded,z,speciesDict,
- fitLim,minError)
-
- if np.size(newLinesP)> 0:
-
- #Check for EXPLOOOOSIIONNNSSS
- newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
-
-
- #Check existence of partner lines if applicable
- if len(speciesDict['wavelength']) != 1:
- newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
- b, minError, x0, xRes, speciesDict)
-
-
-
-
- #Adjust total current fit
- yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
-
-
- #Add new group to all fitted lines
- if np.size(newLinesP)>0:
- speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0])
- speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1])
- speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2])
- groupNums = b_i*np.ones(np.size(newLinesP[:,0]))
- speciesLines['group#']=np.append(speciesLines['group#'],groupNums)
-
- allSpeciesLines[species]=speciesLines
-
-
- if output_file:
- _output_fit(allSpeciesLines, output_file)
-
- return (allSpeciesLines,yFit)
-
-def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict,
- initP=None):
- """ Fit an absorption complex by iteratively adding and optimizing
- voigt profiles.
-
- A complex is defined as a region where some number of lines may be present,
- or a region of non zero of absorption. Lines are iteratively added
- and optimized until the difference between the flux generated using
- the optimized parameters has a least squares difference between the
- desired flux profile less than the error bound.
-
- Parameters
- ----------
- x : (N) ndarray
- array of wavelength
- ydat : (N) ndarray
- array of desired flux profile to be fitted for the wavelength
- space given by x. Same size as x.
- yFit : (N) ndarray
- array of flux profile fitted for the wavelength
- space given by x already. Same size as x.
- initz : float
- redshift to try putting first line at
- (maximum absorption for region)
- minsize : float
- minimum absorption allowed for a line to still count as a line
- given in normalized flux (ie: for minSize=.9, only lines with minimum
- flux less than .9 will be fitted)
- errbound : float
- maximum total error allowed for an acceptable fit
- speciesDict : dictionary
- dictionary containing all relevant parameters needed
- to create an absorption line of a given species (f,Gamma,lambda0)
- as well as max and min values for parameters to be fit
- initP : (,3,) ndarray
- initial guess to try for line parameters to fit the region. Used
- by large_flag_fit. Default = None, and initial guess generated
- automatically.
-
- Returns
- -------
- linesP : (3,) ndarray
- Array of best parameters if a good enough fit is found in
- the form [[N1,b1,z1], [N2,b2,z2],...]
- flag : bool
- boolean value indicating the success of the fit (True if unsuccessful)
- """
-
- #Setup initial line guesses
- if initP is None: #Regular fit
- initP = [0,0,0]
- if min(yDat)<.01: #Large lines get larger initial guess
- initP[0] = speciesDict['init_N']*10**2
- elif min(yDat)<.5:
- initP[0] = speciesDict['init_N']*10**1
- elif min(yDat)>.9: #Small lines get smaller initial guess
- initP[0] = speciesDict['init_N']*10**-1
- else:
- initP[0] = speciesDict['init_N']
- initP[1] = speciesDict['init_b']
- initP[2]=initz
- initP=np.array([initP])
-
- linesP = initP
-
- #For generating new z guesses
- wl0 = speciesDict['wavelength'][0]
-
- #Check if first line exists still
- if min(yDat-yFit+1)>minSize:
- return [],False
-
- #Values to proceed through first run
- errSq,prevErrSq,prevLinesP=1,10*len(x),[]
-
- if errBound is None:
- errBound = len(yDat)*(max(1-yDat)*1E-2)**2
- else:
- errBound = errBound*len(yDat)
-
- flag = False
- while True:
-
- #Initial parameter guess from joining parameters from all lines
- # in lines into a single array
- initP = linesP.flatten()
-
- #Optimize line
- fitP,success=optimize.leastsq(_voigt_error,initP,
- args=(x,yDat,yFit,speciesDict),
- epsfcn=1E-10,maxfev=1000)
-
-
- #Set results of optimization
- linesP = np.reshape(fitP,(-1,3))
-
- #Generate difference between current best fit and data
- yNewFit=_gen_flux_lines(x,linesP,speciesDict)
- dif = yFit*yNewFit-yDat
-
- #Sum to get idea of goodness of fit
- errSq=sum(dif**2)
-
- if any(linesP[:,1]==speciesDict['init_b']):
- flag = True
- break
-
- #If good enough, break
- if errSq < errBound:
- break
-
- #If last fit was worse, reject the last line and revert to last fit
- if errSq > prevErrSq*10 :
- #If its still pretty damn bad, cut losses and try flag fit tools
- if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
- return [],True
- else:
- linesP = prevLinesP
- break
-
- #If too many lines
- if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x):
- #If its fitable by flag tools and still bad, use flag tools
- if errSq >1E2*errBound and speciesDict['name']=='HI lya':
- return [],True
- else:
- flag = True
- break
-
- #Store previous data in case reject next fit
- prevErrSq = errSq
- prevLinesP = linesP
-
- #Set up initial condition for new line
- newP = [0,0,0]
-
- yAdjusted = 1+yFit*yNewFit-yDat
-
- if min(yAdjusted)<.01: #Large lines get larger initial guess
- newP[0] = speciesDict['init_N']*10**2
- elif min(yAdjusted)<.5:
- newP[0] = speciesDict['init_N']*10**1
- elif min(yAdjusted)>.9: #Small lines get smaller initial guess
- newP[0] = speciesDict['init_N']*10**-1
- else:
- newP[0] = speciesDict['init_N']
- newP[1] = speciesDict['init_b']
- newP[2]=(x[dif.argmax()]-wl0)/wl0
- linesP=np.append(linesP,[newP],axis=0)
-
-
- #Check the parameters of all lines to see if they fall in an
- # acceptable range, as given in dict ref
- remove=[]
- for i,p in enumerate(linesP):
- check=_check_params(np.array([p]),speciesDict,x)
- if check:
- remove.append(i)
- linesP = np.delete(linesP,remove,axis=0)
-
- return linesP,flag
-
-def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
- """
- Attempts to more robustly fit saturated lyman alpha regions that have
- not converged to satisfactory fits using the standard tools.
-
- Uses a preselected sample of a wide range of initial parameter guesses
- designed to fit saturated lines (see get_test_lines).
-
- Parameters
- ----------
- x : (N) ndarray
- array of wavelength
- ydat : (N) ndarray
- array of desired flux profile to be fitted for the wavelength
- space given by x. Same size as x.
- yFit : (N) ndarray
- array of flux profile fitted for the wavelength
- space given by x already. Same size as x.
- initz : float
- redshift to try putting first line at
- (maximum absorption for region)
- speciesDict : dictionary
- dictionary containing all relevant parameters needed
- to create an absorption line of a given species (f,Gamma,lambda0)
- as well as max and min values for parameters to be fit
- minsize : float
- minimum absorption allowed for a line to still count as a line
- given in normalized flux (ie: for minSize=.9, only lines with minimum
- flux less than .9 will be fitted)
- errbound : float
- maximum total error allowed for an acceptable fit
-
- Returns
- -------
- bestP : (3,) ndarray
- array of best parameters if a good enough fit is found in
- the form [[N1,b1,z1], [N2,b2,z2],...]
- """
-
- #Set up some initial line guesses
- lineTests = _get_test_lines(initz)
-
- #Keep track of the lowest achieved error
- bestError = 1000
-
- #Iterate through test line guesses
- for initLines in lineTests:
- if initLines[1,0]==0:
- initLines = np.delete(initLines,1,axis=0)
-
- #Do fitting with initLines as first guess
- linesP,flag=_complex_fit(x,yDat,yFit,initz,
- minSize,errBound,speciesDict,initP=initLines)
-
- #Find error of last fit
- yNewFit=_gen_flux_lines(x,linesP,speciesDict)
- dif = yFit*yNewFit-yDat
- errSq=sum(dif**2)
-
- #If error lower, keep track of the lines used to make that fit
- if errSq < bestError:
- bestError = errSq
- bestP = linesP
-
- if bestError>10*errBound*len(x):
- return []
- else:
- return bestP
-
-def _get_test_lines(initz):
- """
- Returns a 3d numpy array of lines to test as initial guesses for difficult
- to fit lyman alpha absorbers that are saturated.
-
- The array is 3d because
- the first dimension gives separate initial guesses, the second dimension
- has multiple lines for the same guess (trying a broad line plus a
- saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
-
- Parameters
- ----------
- initz : float
- redshift to give all the test lines
-
- Returns
- -------
- testP : (,3,) ndarray
- numpy array of the form
- [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
- """
-
- #Set up a bunch of empty lines
- testP = np.zeros((10,2,3))
-
- testP[0,0,:]=[1E18,20,initz]
- testP[1,0,:]=[1E18,40,initz]
- testP[2,0,:]=[1E16,5, initz]
- testP[3,0,:]=[1E16,20,initz]
- testP[4,0,:]=[1E16,80,initz]
-
- testP[5,0,:]=[1E18,20,initz]
- testP[6,0,:]=[1E18,40,initz]
- testP[7,0,:]=[1E16,5, initz]
- testP[8,0,:]=[1E16,20,initz]
- testP[9,0,:]=[1E16,80,initz]
-
- testP[5,1,:]=[1E13,100,initz]
- testP[6,1,:]=[1E13,100,initz]
- testP[7,1,:]=[1E13,100,initz]
- testP[8,1,:]=[1E13,100,initz]
- testP[9,1,:]=[1E13,100,initz]
-
- return testP
-
-def _get_bounds(z, b, wl, x0, xRes):
- """
- Gets the indices of range of wavelength that the wavelength wl is in
- with the size of some initial wavelength range.
-
- Used for checking if species with multiple lines (as in the OVI doublet)
- fit all lines appropriately.
-
- Parameters
- ----------
- z : float
- redshift
- b : (3) ndarray/list
- initial bounds in form [i0,i1,i2] where i0 is the index of the
- minimum flux for the complex, i1 is index of the lower wavelength
- edge of the complex, and i2 is the index of the higher wavelength
- edge of the complex.
- wl : float
- unredshifted wavelength of the peak of the new region
- x0 : float
- wavelength of the index 0
- xRes : float
- difference in wavelength for two consecutive indices
-
- Returns
- -------
- indices : (2) tuple
- Tuple (i1,i2) where i1 is the index of the lower wavelength bound of
- the new region and i2 is the index of the higher wavelength bound of
- the new region
- """
-
- r=[-b[1]+100+b[0],b[2]+100-b[0]]
- redWl = (z+1)*wl
- iRedWl=int((redWl-x0)/xRes)
- indices = (iRedWl-r[0],iRedWl+r[1])
-
- return indices
-
-def _remove_unaccepted_partners(linesP, x, y, b, errBound,
- x0, xRes, speciesDict):
- """
- Given a set of parameters [N,b,z] that form multiple lines for a given
- species (as in the OVI doublet), remove any set of parameters where
- not all transition wavelengths have a line that matches the fit.
-
- (ex: if a fit is determined based on the first line of the OVI doublet,
- but the given parameters give a bad fit of the wavelength space of
- the second line then that set of parameters is removed from the array
- of line parameters.)
-
- Parameters
- ----------
- linesP : (3,) ndarray
- array giving sets of line parameters in
- form [[N1, b1, z1], ...]
- x : (N) ndarray
- wavelength array [nm]
- y : (N) ndarray
- normalized flux array of original data
- b : (3) tuple/list/ndarray
- indices that give the bounds of the original region so that another
- region of similar size can be used to determine the goodness
- of fit of the other wavelengths
- errBound : float
- size of the error that is appropriate for a given region,
- adjusted to account for the size of the region.
-
- Returns
- -------
- linesP : (3,) ndarray
- array similar to linesP that only contains lines with
- appropriate fits of all transition wavelengths.
- """
-
- #List of lines to remove
- removeLines=[]
-
- #Set error
-
-
- #Iterate through all sets of line parameters
- for i,p in enumerate(linesP):
-
- #iterate over all transition wavelengths
- for wl in speciesDict['wavelength']:
-
- #Get the bounds of a similar sized region around the
- # appropriate wavelength, and then get the appropriate
- # region of wavelength and flux
- lb = _get_bounds(p[2],b,wl,x0,xRes)
- xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
-
- if errBound is None:
- errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
- else:
- errBound = 10*errBound*len(yb)
-
- #Generate a fit and find the difference to data
- yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict)
- dif =yb-yFitb
-
-
-
- #Only counts as an error if line is too big ---------------<
- dif = [k for k in dif if k>0]
- err = sum(dif)
-
- #If the fit is too bad then add the line to list of removed lines
- if err > errBound:
- removeLines.append(i)
- break
-
- #Remove all bad line fits
- linesP = np.delete(linesP,removeLines,axis=0)
-
- return linesP
-
-
-
-def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
- """For a group of lines finds if the there is some change in flux greater
- than some minimum at the same redshift with different initial wavelengths
-
- Parameters
- ----------
- wavelengths : (N) ndarray
- array of initial wavelengths to check
- y : (N) ndarray
- flux array to check
- x0 : float
- wavelength of the first value in y
- xRes : float
- difference in wavelength between consecutive cells in flux array
- fluxMin : float
- maximum flux to count as a line existing.
-
- Returns
- -------
-
- flag : boolean
- value indicating whether all lines exist. True if all lines exist
- """
-
- #Iterate through initial wavelengths
- for wl in wavelengths:
- #Redshifted wavelength
- redWl = (z+1)*wl
-
- #Index of the redshifted wavelength
- indexRedWl = (redWl-x0)/xRes
-
- #Check to see if even in flux range
- if indexRedWl > len(y):
- return False
-
- #Check if surpasses minimum absorption bound
- if y[int(indexRedWl)]>fluxMin:
- return False
-
- return True
-
-def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
- minLength =3, maxLength=1000, splitLim=.99):
- """Breaks up the wavelength space into groups
- where there is some absorption.
-
- Parameters
- ----------
- x : (N) ndarray
- array of wavelengths
- yDat : (N) ndarray
- array of flux corresponding to the wavelengths given
- in x. (needs to be the same size as x)
- complexLim : float, optional
- Maximum flux to start the edge of an absorption complex. Different
- from fitLim because it decides extent of a complex rather than
- whether or not a complex is accepted.
- fitLim : float,optional
- Maximum flux where the level of absorption will trigger
- identification of the region as an absorption complex. Default = .98.
- (ex: for a minSize=.98, a region where all the flux is between 1.0 and
- .99 will not be separated out to be fit as an absorbing complex, but
- a region that contains a point where the flux is .97 will be fit
- as an absorbing complex.)
- minLength : int, optional
- number of cells required for a complex to be included.
- default is 3 cells.
- maxLength : int, optional
- number of cells required for a complex to be split up. Default
- is 1000 cells.
- splitLim : float, optional
- if attempting to split a region for being larger than maxlength
- the point of the split must have a flux greater than splitLim
- (ie: absorption greater than splitLim). Default= .99.
-
- Returns
- -------
- cBounds : (3,)
- list of bounds in the form [[i0,i1,i2],...] where i0 is the
- index of the maximum flux for a complex, i1 is the index of the
- beginning of the complex, and i2 is the index of the end of the
- complex. Indexes refer to the indices of x and yDat.
- """
-
- #Initialize empty list of bounds
- cBounds=[]
-
- #Iterate through cells of flux
- i=0
- while (iminLength:
-
- #Check if there is enough absorption for the complex to
- # be included
- cPeak = yDat[i:i+j].argmin()
- if yDat[cPeak+i]maxLength:
-
- split = _split_region(yDat,b,splitLim)
-
- if split:
-
- #add the two regions separately
- cBounds.insert(i+1,split[0])
- cBounds.insert(i+2,split[1])
-
- #Remove the original region
- cBounds.pop(i)
- i=i+1
- i=i+1
-
- return cBounds
-
-
-def _split_region(yDat,b,splitLim):
- #Find the minimum absorption in the middle two quartiles of
- # the large complex
-
- q=(b[2]-b[1])/4
- cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
-
- #Only break it up if the minimum absorption is actually low enough
- if yDat[cut]>splitLim:
-
- #Get the new two peaks
- b1Peak = yDat[b[1]:cut].argmin()+b[1]
- b2Peak = yDat[cut:b[2]].argmin()+cut
-
- region_1 = [b1Peak,b[1],cut]
- region_2 = [b2Peak,cut,b[2]]
-
- return [region_1,region_2]
-
- else:
-
- return []
-
-
-
-def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
- """
- Calculates the normalized flux for a region of wavelength space
- generated by a set of absorption lines.
-
- Parameters
- ----------
- x : (N) ndarray
- Array of wavelength
- linesP: (3,) ndarray
- Array giving sets of line parameters in
- form [[N1, b1, z1], ...]
- speciesDict : dictionary
- Dictionary containing all relevant parameters needed
- to create an absorption line of a given species (f,Gamma,lambda0)
-
- Returns
- -------
- flux : (N) ndarray
- Array of normalized flux generated by the line parameters
- given in linesP over the wavelength space given in x. Same size as x.
- """
- y=0
- for p in linesP:
- for i in range(speciesDict['numLines']):
- f=speciesDict['f'][i]
- g=speciesDict['Gamma'][i]
- wl=speciesDict['wavelength'][i]
- y = y+ _gen_tau(x,p,f,g,wl)
- if firstLine:
- break
-
- flux = np.exp(-y)
- return flux
-
-def _gen_tau(t, p, f, Gamma, lambda_unshifted):
- """This calculates a flux distribution for given parameters using the yt
- voigt profile generator"""
- N,b,z= p
-
- #Calculating quantities
- tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
- a=7.95774715459E-15*Gamma*lambda_unshifted/b
- x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
-
- H = np.zeros(len(x))
- H = voigt(a,x)
-
- tau = tau_o*H
-
- return tau
-
-def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
- """
- Gives the error of each point used to optimize the fit of a group
- of absorption lines to a given flux profile.
-
- If the parameters are not in the acceptable range as defined
- in speciesDict, the first value of the error array will
- contain a large value (999), to prevent the optimizer from running
- into negative number problems.
-
- Parameters
- ----------
- pTotal : (3,) ndarray
- Array with form [[N1, b1, z1], ...]
- x : (N) ndarray
- array of wavelengths [nm]
- yDat : (N) ndarray
- desired normalized flux from fits of lines in wavelength
- space given by x
- yFit : (N) ndarray
- previous fit over the wavelength space given by x.
- speciesDict : dictionary
- dictionary containing all relevant parameters needed
- to create an absorption line of a given species (f,Gamma,lambda0)
- as well as max and min values for parameters to be fit
-
- Returns
- -------
- error : (N) ndarray
- the difference between the fit generated by the parameters
- given in pTotal multiplied by the previous fit and the desired
- flux profile, w/ first index modified appropriately for bad
- parameter choices and additional penalty for fitting with a lower
- flux than observed.
- """
-
- pTotal.shape = (-1,3)
- yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
-
- error = yDat-yFit*yNewFit
- error_plus = (yDat-yFit*yNewFit).clip(min=0)
-
- error = error+error_plus
- error[0] = _check_params(pTotal,speciesDict,x)
-
- return error
-
-def _check_params(p, speciesDict,xb):
- """
- Check to see if any of the parameters in p fall outside the range
- given in speciesDict or on the boundaries
-
- Parameters
- ----------
- p : (3,) ndarray
- array with form [[N1, b1, z1], ...]
- speciesDict : dictionary
- dictionary with properties giving the max and min
- values appropriate for each parameter N,b, and z.
- xb : (N) ndarray
- wavelength array [nm]
-
- Returns
- -------
- check : int
- 0 if all values are fine
- 999 if any values fall outside acceptable range
- """
-
- minz = (xb[0])/speciesDict['wavelength'][0]-1
- maxz = (xb[-1])/speciesDict['wavelength'][0]-1
-
- check = 0
- if any(p[:,0] >= speciesDict['maxN']) or\
- any(p[:,0] <= speciesDict['minN']) or\
- any(p[:,1] >= speciesDict['maxb']) or\
- any(p[:,1] <= speciesDict['minb']) or\
- any(p[:,2] >= maxz) or\
- any(p[:,2] <= minz):
- check = 999
-
- return check
-
-def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
-
- """
- Check to see if any of the parameters in p are the
- same as initial parameters and if so, attempt to
- split the region and refit it.
-
- Parameters
- ----------
- p : (3,) ndarray
- array with form [[N1, b1, z1], ...]
- speciesDict : dictionary
- dictionary with properties giving the max and min
- values appropriate for each parameter N,b, and z.
- x : (N) ndarray
- wavelength array [nm]
- """
-
- # Check if anything is a default parameter
- if any(p[:,0] == speciesDict['init_N']) or\
- any(p[:,0] == speciesDict['init_N']*10) or\
- any(p[:,0] == speciesDict['init_N']*100) or\
- any(p[:,0] == speciesDict['init_N']*.1) or\
- any(p[:,1] == speciesDict['init_b']) or\
- any(p[:,1] == speciesDict['maxb']):
-
- # These are the initial bounds
- init_bounds = [yDat.argmin(),0,len(xb)-1]
-
- # Gratitutous limit for splitting region
- newSplitLim = 1 - (1-min(yDat))*.5
-
- # Attempt to split region
- split = _split_region(yDat,init_bounds,newSplitLim)
-
- # If we can't split it, just reject it. Its unphysical
- # to just keep the default parameters and we're out of
- # options at this point
- if not split:
- return []
-
- # Else set up the bounds for each region and fit separately
- b1,b2 = split[0][2], split[1][1]
-
- p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
- initz, minSize, errorBound, speciesDict)
-
- p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
- initz, minSize, errorBound, speciesDict)
-
- # Make the final line parameters. Its annoying because
- # one or both regions may have fit to nothing
- if np.size(p1)> 0 and np.size(p2)>0:
- p = np.r_[p1,p2]
- elif np.size(p1) > 0:
- p = p1
- else:
- p = p2
-
- return p
-
-
-def _check_numerical_instability(x, p, speciesDict,b):
-
- """
- Check to see if any of the parameters in p are causing
- unstable numerical effects outside the region of fit
-
- Parameters
- ----------
- p : (3,) ndarray
- array with form [[N1, b1, z1], ...]
- speciesDict : dictionary
- dictionary with properties giving the max and min
- values appropriate for each parameter N,b, and z.
- x : (N) ndarray
- wavelength array [nm]
- b : (3) list
- list of integers indicating bounds of region fit in x
- """
-
- remove_lines = []
-
-
- for i,line in enumerate(p):
-
- # First to check if the line is at risk for instability
- if line[1]<5 or line[0] < 1E12:
-
-
- # get all flux that isn't part of fit plus a little wiggle room
- # max and min to prevent boundary errors
-
- flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
- flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
-
- #Find regions that are absorbing outside the region we fit
- flux_dif = 1 - flux
- absorbing_coefficient = max(abs(flux_dif))
-
-
- #Really there shouldn't be any absorption outside
- #the region we fit, but we'll give some leeway.
- #for high resolution spectra the tiny bits on the edges
- #can give a non negligible amount of flux. Plus the errors
- #we are looking for are HUGE.
- if absorbing_coefficient > .1:
-
- # we just set it to no fit because we've tried
- # everything else at this point. this region just sucks :(
- remove_lines.append(i)
-
- if remove_lines:
- p = np.delete(p, remove_lines, axis=0)
-
- return p
-
-def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
- """
- This function is designed to output the parameters of the series
- of lines used to fit an absorption spectrum.
-
- The dataset contains entries in the form species/N, species/b
- species/z, and species/complex. The ith entry in each of the datasets
- is the fitted parameter for the ith line fitted to the spectrum for
- the given species. The species names come from the fitted line
- dictionary.
-
- Parameters
- ----------
- lineDic : dictionary
- Dictionary of dictionaries representing the fit lines.
- Top level keys are the species given in orderFits and the corresponding
- entries are dictionaries with the keys 'N','b','z', and 'group#'.
- Each of these corresponds to a list of the parameters for every
- accepted fitted line.
- fileName : string, optional
- Name of the file to output fit to. Default = 'spectrum_fit.h5'
-
- """
- f = h5py.File(file_name, mode='w')
- for ion, params in lineDic.items():
- f.create_dataset("{0}/N".format(ion),data=params['N'])
- f.create_dataset("{0}/b".format(ion),data=params['b'])
- f.create_dataset("{0}/z".format(ion),data=params['z'])
- f.create_dataset("{0}/complex".format(ion),data=params['group#'])
- mylog.info('Writing spectrum fit to {0}'.format(file_name))
- f.close()
diff --git a/yt/analysis_modules/absorption_spectrum/api.py b/yt/analysis_modules/absorption_spectrum/api.py
index 40281fde65f..bb66b6f4b45 100644
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -1,28 +1,7 @@
-"""
-API for absorption_spectrum
+from yt.utilities.exceptions import \
+ YTModuleRemoved
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the AbsorptionSpectrum module has been moved to the "
- "Trident package. This version is deprecated and will be removed from yt "
- "in a future release. See https://github.com/trident-project/trident "
- "for further information.")
-
-from .absorption_spectrum import \
- AbsorptionSpectrum
-
-from .absorption_spectrum_fit import \
- generate_total_fit
+raise YTModuleRemoved(
+ "AbsorptionSpectrum",
+ "https://github.com/trident-project/trident",
+ "https://trident.readthedocs.io/")
diff --git a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
deleted file mode 100644
index ece514a1a97..00000000000
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ /dev/null
@@ -1,558 +0,0 @@
-"""
-Unit test for the AbsorptionSpectrum analysis module
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from yt.testing import \
- assert_allclose_units, requires_file, requires_module, \
- assert_almost_equal
-from yt.analysis_modules.absorption_spectrum.absorption_line import \
- voigt_old, voigt_scipy
-from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
-from yt.analysis_modules.cosmological_observation.api import LightRay
-from yt.utilities.answer_testing.framework import \
- GenericArrayTest, \
- requires_answer_testing
-import tempfile
-import os
-import shutil
-from yt.utilities.on_demand_imports import \
- _h5py as h5
-from yt.convenience import load
-
-
-COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
-GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
-GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
-FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
-
-@requires_file(COSMO_PLUS)
-@requires_answer_testing()
-def test_absorption_spectrum_cosmo():
- """
- This test generates an absorption spectrum from a compound light ray on a
- grid dataset
- """
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
-
- lr.make_light_ray(seed=1234567,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
- normalization = 1.6e17
- index = 3.0
-
- sp.add_continuum(my_label, field, wavelength, normalization, index)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_cosmo".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_cosmo.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS_SINGLE)
-@requires_answer_testing()
-def test_absorption_spectrum_non_cosmo():
- """
- This test generates an absorption spectrum from a simple light ray on a
- grid dataset
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS_SINGLE)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_non_cosmo".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_non_cosmo.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS_SINGLE)
-@requires_answer_testing()
-def test_absorption_spectrum_non_cosmo_novpec():
- """
- This test generates an absorption spectrum from a simple light ray on a
- grid dataset
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS_SINGLE)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5', use_peculiar_velocity=False)
-
- sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=False)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_non_cosmo_novpec".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS_SINGLE)
-def test_equivalent_width_conserved():
- """
- This tests that the equivalent width of the optical depth is conserved
- regardless of the bin width employed in wavelength space.
- Unresolved lines should still deposit optical depth into the spectrum.
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS_SINGLE)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wave = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- lambda_min= 1200
- lambda_max= 1300
- lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
- total_tau = []
-
- for lambda_bin_width in lambda_bin_widths:
- n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
- sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max,
- n_lambda=n_lambda)
- sp.add_line(my_label, field, wave, f_value, gamma, mass)
- wavelength, flux = sp.make_spectrum('lightray.h5')
- total_tau.append((lambda_bin_width * sp.tau_field).sum())
-
- # assure that the total tau values are all within 1e-3 of each other
- for tau in total_tau:
- assert_almost_equal(tau, total_tau[0], 3)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-
-@requires_file(COSMO_PLUS_SINGLE)
-@requires_module("astropy")
-def test_absorption_spectrum_fits():
- """
- This test generates an absorption spectrum and saves it as a fits file.
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS_SINGLE)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
- normalization = 1.6e17
- index = 3.0
-
- sp.add_continuum(my_label, field, wavelength, normalization, index)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.fits',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-
-@requires_module("scipy")
-def test_voigt_profiles():
- a = 1.7e-4
- x = np.linspace(5.0, -3.6, 60)
- assert_allclose_units(voigt_old(a, x), voigt_scipy(a, x), 1e-8)
-
-@requires_file(GIZMO_PLUS)
-@requires_answer_testing()
-def test_absorption_spectrum_cosmo_sph():
- """
- This test generates an absorption spectrum from a compound light ray on a
- particle dataset
- """
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01)
-
- lr.make_light_ray(seed=1234567,
- fields=[('gas', 'temperature'),
- ('gas', 'H_number_density')],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
-
- my_label = 'HI Lya'
- field = ('gas', 'H_number_density')
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- my_label = 'HI Lya'
- field = ('gas', 'H_number_density')
- wavelength = 912.323660 # Angstroms
- normalization = 1.6e17
- index = 3.0
-
- sp.add_continuum(my_label, field, wavelength, normalization, index)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_cosmo_sph".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_cosmo_sph.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(GIZMO_PLUS_SINGLE)
-@requires_answer_testing()
-def test_absorption_spectrum_non_cosmo_sph():
- """
- This test generates an absorption spectrum from a simple light ray on a
- particle dataset
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- ds = load(GIZMO_PLUS_SINGLE)
- lr = LightRay(ds)
- ray_start = ds.domain_left_edge
- ray_end = ds.domain_right_edge
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=[('gas', 'temperature'),
- ('gas', 'H_number_density')],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
-
- my_label = 'HI Lya'
- field = ('gas', 'H_number_density')
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_non_cosmo_sph".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_non_cosmo_sph.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(ISO_GALAXY)
-@requires_answer_testing()
-def test_absorption_spectrum_with_continuum():
- """
- This test generates an absorption spectrum from a simple light ray on a
- grid dataset and adds Lyman alpha and Lyman continuum to it
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- ds = load(ISO_GALAXY)
- lr = LightRay(ds)
-
- ray_start = ds.domain_left_edge
- ray_end = ds.domain_right_edge
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
-
- my_label = 'HI Lya'
- field = 'H_number_density'
- wavelength = 1215.6700 # Angstroms
- f_value = 4.164E-01
- gamma = 6.265e+08
- mass = 1.00794
-
- sp.add_line(my_label, field, wavelength, f_value,
- gamma, mass, label_threshold=1.e10)
-
- my_label = 'Ly C'
- field = 'H_number_density'
- wavelength = 912.323660 # Angstroms
- normalization = 1.6e17
- index = 3.0
-
- sp.add_continuum(my_label, field, wavelength, normalization, index)
-
- wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.h5',
- line_list_file='lines.txt',
- use_peculiar_velocity=True)
-
- # load just-generated hdf5 file of spectral data (for consistency)
- data = h5.File('spectrum.h5', 'r')
-
- for key in data.keys():
- func = lambda x=key: data[x][:]
- func.__name__ = "{}_continuum".format(key)
- test = GenericArrayTest(None, func)
- test_absorption_spectrum_with_continuum.__name__ = test.description
- yield test
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(FIRE)
-def test_absorption_spectrum_with_zero_field():
- """
- This test generates an absorption spectrum with some
- particle dataset
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- ds = load(FIRE)
- lr = LightRay(ds)
-
- # Define species and associated parameters to add to continuum
- # Parameters used for both adding the transition to the spectrum
- # and for fitting
- # Note that for single species that produce multiple lines
- # (as in the OVI doublet), 'numLines' will be equal to the number
- # of lines, and f,gamma, and wavelength will have multiple values.
-
- HI_parameters = {
- 'name': 'HI',
- 'field': 'H_number_density',
- 'f': [.4164],
- 'Gamma': [6.265E8],
- 'wavelength': [1215.67],
- 'mass': 1.00794,
- 'numLines': 1,
- 'maxN': 1E22, 'minN': 1E11,
- 'maxb': 300, 'minb': 1,
- 'maxz': 6, 'minz': 0,
- 'init_b': 30,
- 'init_N': 1E14
- }
-
- species_dicts = {'HI': HI_parameters}
-
-
- # Get all fields that need to be added to the light ray
- fields = [('gas','temperature')]
- for s, params in species_dicts.items():
- fields.append(params['field'])
-
- # With a single dataset, a start_position and
- # end_position or trajectory must be given.
- # Trajectory should be given as (r, theta, phi)
- lr.make_light_ray(
- start_position=ds.arr([0., 0., 0.], 'unitary'),
- end_position=ds.arr([1., 1., 1.], 'unitary'),
- solution_filename='test_lightraysolution.txt',
- data_filename='test_lightray.h5',
- fields=fields)
-
- # Create an AbsorptionSpectrum object extending from
- # lambda = 900 to lambda = 1800, with 10000 pixels
- sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
-
- # Iterate over species
- for s, params in species_dicts.items():
- # Iterate over transitions for a single species
- for i in range(params['numLines']):
- # Add the lines to the spectrum
- sp.add_line(
- s, params['field'],
- params['wavelength'][i], params['f'][i],
- params['Gamma'][i], params['mass'],
- label_threshold=1.e10)
-
-
- # Make and save spectrum
- wavelength, flux = sp.make_spectrum(
- 'test_lightray.h5',
- output_file='test_spectrum.h5',
- line_list_file='test_lines.txt',
- use_peculiar_velocity=True)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
diff --git a/yt/analysis_modules/cosmological_observation/api.py b/yt/analysis_modules/cosmological_observation/api.py
index 2e0dd00d4c0..da1df1ec4b7 100644
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -1,32 +1,7 @@
-"""
-API for cosmology analysis.
+from yt.utilities.exceptions import \
+ YTModuleRemoved
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the CosmologySplice module has been moved to "
- "the yt_astro_analysis package. This version is deprecated "
- "and will be removed from yt in a future release. See "
- "https://github.com/yt-project/yt_astro_analysis for further "
- "information.")
-
-from .cosmology_splice import \
- CosmologySplice
-
-from .light_cone.api import \
- LightCone
-
-from .light_ray.api import \
- LightRay
+raise YTModuleRemoved(
+ "CosmologySplice and LightCone",
+ "https://github.com/yt-project/yt_astro_analysis",
+ "https://yt-astro-analysis.readthedocs.io/")
diff --git a/yt/analysis_modules/cosmological_observation/cosmology_splice.py b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
deleted file mode 100644
index b3dbd6ad764..00000000000
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ /dev/null
@@ -1,344 +0,0 @@
-"""
-CosmologyTimeSeries class and member functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import os
-
-from yt.convenience import \
- simulation
-from yt.funcs import mylog
-from yt.utilities.cosmology import \
- Cosmology
-from yt.utilities.physical_constants import \
- c
-
-class CosmologySplice(object):
- """
- Class for splicing together datasets to extend over a
- cosmological distance.
- """
-
- def __init__(self, parameter_filename, simulation_type, find_outputs=False):
- self.parameter_filename = parameter_filename
- self.simulation_type = simulation_type
- self.simulation = simulation(parameter_filename, simulation_type,
- find_outputs=find_outputs)
-
- self.cosmology = Cosmology(
- hubble_constant=(self.simulation.hubble_constant),
- omega_matter=self.simulation.omega_matter,
- omega_lambda=self.simulation.omega_lambda)
-
- def create_cosmology_splice(self, near_redshift, far_redshift,
- minimal=True, max_box_fraction=1.0,
- deltaz_min=0.0,
- time_data=True, redshift_data=True):
- r"""Create list of datasets capable of spanning a redshift
- interval.
-
- For cosmological simulations, the physical width of the simulation
- box corresponds to some \Delta z, which varies with redshift.
- Using this logic, one can stitch together a series of datasets to
- create a continuous volume or length element from one redshift to
- another. This method will return such a list
-
- Parameters
- ----------
- near_redshift : float
- The nearest (lowest) redshift in the cosmology splice list.
- far_redshift : float
- The furthest (highest) redshift in the cosmology splice list.
- minimal : bool
- If True, the minimum number of datasets is used to connect the
- initial and final redshift. If false, the list will contain as
- many entries as possible within the redshift
- interval.
- Default: True.
- max_box_fraction : float
- In terms of the size of the domain, the maximum length a light
- ray segment can be in order to span the redshift interval from
- one dataset to another. If using a zoom-in simulation, this
- parameter can be set to the length of the high resolution
- region so as to limit ray segments to that size. If the
- high resolution region is not cubical, the smallest side
- should be used.
- Default: 1.0 (the size of the box)
- deltaz_min : float
- Specifies the minimum delta z between consecutive datasets
- in the returned
- list.
- Default: 0.0.
- time_data : bool
- Whether or not to include time outputs when gathering
- datasets for time series.
- Default: True.
- redshift_data : bool
- Whether or not to include redshift outputs when gathering
- datasets for time series.
- Default: True.
-
- Examples
- --------
-
- >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
- >>> cosmo = co.create_cosmology_splice(1.0, 0.0)
-
- """
-
- if time_data and redshift_data:
- self.splice_outputs = self.simulation.all_outputs
- elif time_data:
- self.splice_outputs = self.simulation.all_time_outputs
- elif redshift_data:
- self.splice_outputs = self.simulation.all_redshift_outputs
- else:
- mylog.error('Both time_data and redshift_data are False.')
- return
-
- # Link datasets in list with pointers.
- # This is used for connecting datasets together.
- for i, output in enumerate(self.splice_outputs):
- if i == 0:
- output['previous'] = None
- output['next'] = self.splice_outputs[i + 1]
- elif i == len(self.splice_outputs) - 1:
- output['previous'] = self.splice_outputs[i - 1]
- output['next'] = None
- else:
- output['previous'] = self.splice_outputs[i - 1]
- output['next'] = self.splice_outputs[i + 1]
-
- # Calculate maximum delta z for each data dump.
- self.max_box_fraction = max_box_fraction
- self._calculate_deltaz_max()
-
- # Calculate minimum delta z for each data dump.
- self._calculate_deltaz_min(deltaz_min=deltaz_min)
-
- cosmology_splice = []
-
- if near_redshift == far_redshift:
- self.simulation.get_time_series(redshifts=[near_redshift])
- cosmology_splice.append(
- {'time': self.simulation[0].current_time,
- 'redshift': self.simulation[0].current_redshift,
- 'filename': os.path.join(self.simulation[0].fullpath,
- self.simulation[0].basename),
- 'next': None})
- mylog.info("create_cosmology_splice: Using %s for z = %f ." %
- (cosmology_splice[0]['filename'], near_redshift))
- return cosmology_splice
-
- # Use minimum number of datasets to go from z_i to z_f.
- if minimal:
-
- z_Tolerance = 1e-3
- z = far_redshift
-
- # Sort data outputs by proximity to current redshift.
- self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
- cosmology_splice.append(self.splice_outputs[0])
- z = cosmology_splice[-1]["redshift"]
- z_target = z - cosmology_splice[-1]["dz_max"]
-
- # fill redshift space with datasets
- while ((z_target > near_redshift) and
- (np.abs(z_target - near_redshift) > z_Tolerance)):
-
- # Move forward from last slice in stack until z > z_max.
- current_slice = cosmology_splice[-1]
-
- while current_slice["next"] is not None:
- current_slice = current_slice['next']
- if current_slice["next"] is None:
- break
- if current_slice["next"]["redshift"] < z_target:
- break
-
- if current_slice["redshift"] < z_target:
- need_fraction = self.cosmology.comoving_radial_distance(
- current_slice["redshift"], z) / \
- self.simulation.box_size
- raise RuntimeError(
- ("Cannot create cosmology splice: " +
- "Getting from z = %f to %f requires " +
- "max_box_fraction = %f, but max_box_fraction "
- "is set to %f") %
- (z, current_slice["redshift"],
- need_fraction, max_box_fraction))
-
- cosmology_splice.append(current_slice)
- z = current_slice["redshift"]
- z_target = z - current_slice["dz_max"]
-
- # Make light ray using maximum number of datasets (minimum spacing).
- else:
- # Sort data outputs by proximity to current redshift.
- self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
- obj['redshift']))
- # For first data dump, choose closest to desired redshift.
- cosmology_splice.append(self.splice_outputs[0])
-
- nextOutput = cosmology_splice[-1]['next']
- while (nextOutput is not None):
- if (nextOutput['redshift'] <= near_redshift):
- break
- if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
- cosmology_splice[-1]['dz_min']):
- cosmology_splice.append(nextOutput)
- nextOutput = nextOutput['next']
- if (cosmology_splice[-1]['redshift'] -
- cosmology_splice[-1]['dz_max']) > near_redshift:
- mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
- near_redshift = cosmology_splice[-1]['redshift'] - \
- cosmology_splice[-1]['dz_max']
-
- mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
- (len(cosmology_splice), far_redshift, near_redshift))
-
- # change the 'next' and 'previous' pointers to point to the correct outputs
- # for the created splice
- for i, output in enumerate(cosmology_splice):
- if len(cosmology_splice) == 1:
- output['previous'] = None
- output['next'] = None
- elif i == 0:
- output['previous'] = None
- output['next'] = cosmology_splice[i + 1]
- elif i == len(cosmology_splice) - 1:
- output['previous'] = cosmology_splice[i - 1]
- output['next'] = None
- else:
- output['previous'] = cosmology_splice[i - 1]
- output['next'] = cosmology_splice[i + 1]
-
- self.splice_outputs.sort(key=lambda obj: obj['time'])
- return cosmology_splice
-
- def plan_cosmology_splice(self, near_redshift, far_redshift,
- decimals=3, filename=None,
- start_index=0):
- r"""Create imaginary list of redshift outputs to maximally
- span a redshift interval.
-
- If you want to run a cosmological simulation that will have just
- enough data outputs to create a cosmology splice,
- this method will calculate a list of redshifts outputs that will
- minimally connect a redshift interval.
-
- Parameters
- ----------
- near_redshift : float
- The nearest (lowest) redshift in the cosmology splice list.
- far_redshift : float
- The furthest (highest) redshift in the cosmology splice list.
- decimals : int
- The decimal place to which the output redshift will be rounded.
- If the decimal place in question is nonzero, the redshift will
- be rounded up to
- ensure continuity of the splice. Default: 3.
- filename : string
- If provided, a file will be written with the redshift outputs in
- the form in which they should be given in the enzo dataset.
- Default: None.
- start_index : int
- The index of the first redshift output. Default: 0.
-
- Examples
- --------
- >>> from yt.analysis_modules.api import CosmologySplice
- >>> my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
- >>> my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out')
-
- """
-
- z = far_redshift
- outputs = []
-
- while z > near_redshift:
- rounded = np.round(z, decimals=decimals)
- if rounded - z < 0:
- rounded += np.power(10.0, (-1.0*decimals))
- z = rounded
-
- deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
- self.max_box_fraction)
- outputs.append({'redshift': z, 'dz_max': deltaz_max})
- z -= deltaz_max
-
- mylog.info("%d data dumps will be needed to get from z = %f to %f." %
- (len(outputs), near_redshift, far_redshift))
-
- if filename is not None:
- self.simulation._write_cosmology_outputs(filename, outputs,
- start_index,
- decimals=decimals)
- return outputs
-
- def _calculate_deltaz_max(self):
- r"""Calculate delta z that corresponds to full box length going
- from z to (z - delta z).
- """
-
- target_distance = self.simulation.box_size * \
- self.max_box_fraction
- for output in self.splice_outputs:
- output['dz_max'] = self._deltaz_forward(output['redshift'],
- target_distance)
-
- def _calculate_deltaz_min(self, deltaz_min=0.0):
- r"""Calculate delta z that corresponds to a single top grid pixel
- going from z to (z - delta z).
- """
-
- target_distance = self.simulation.box_size / \
- self.simulation.domain_dimensions[0]
- for output in self.splice_outputs:
- zf = self._deltaz_forward(output['redshift'],
- target_distance)
- output['dz_min'] = max(zf, deltaz_min)
-
- def _deltaz_forward(self, z, target_distance):
- r"""Calculate deltaz corresponding to moving a comoving distance
- starting from some redshift.
- """
-
- d_Tolerance = 1e-4
- max_Iterations = 100
-
- z1 = z
- # Use Hubble's law for initial guess
- target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
- v = self.cosmology.hubble_parameter(z) * target_distance
- v = min(v, 0.9 * c)
- dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
- z2 = z1 - dz
- distance1 = self.cosmology.quan(0.0, "Mpccm / h")
- distance2 = self.cosmology.comoving_radial_distance(z2, z)
- iteration = 1
-
- while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):
- m = (distance2 - distance1) / (z2 - z1)
- z1 = z2
- distance1 = distance2
- z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
- distance2 = self.cosmology.comoving_radial_distance(z2, z)
- iteration += 1
- if (iteration > max_Iterations):
- mylog.error("deltaz_forward: Warning - max iterations " +
- "exceeded for z = %f (delta z = %f)." %
- (z, np.abs(z2 - z)))
- break
- return np.abs(z2 - z)
diff --git a/yt/analysis_modules/cosmological_observation/light_cone/__init__.py b/yt/analysis_modules/cosmological_observation/light_cone/__init__.py
deleted file mode 100644
index 18ea5c8cef3..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_cone/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Import stuff for light cone generator.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
diff --git a/yt/analysis_modules/cosmological_observation/light_cone/api.py b/yt/analysis_modules/cosmological_observation/light_cone/api.py
deleted file mode 100644
index a9a74b2119b..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for light_cone
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the LightCone module has been moved to "
- "the yt_astro_analysis package. This version is deprecated "
- "and will be removed from yt in a future release. See "
- "https://github.com/yt-project/yt_astro_analysis for further "
- "information.")
-
-from .light_cone import \
- LightCone
diff --git a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
deleted file mode 100644
index c7aa2168da0..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ /dev/null
@@ -1,468 +0,0 @@
-"""
-LightCone class and member functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.config import \
- ytcfg
-from yt.funcs import \
- ensure_dir, \
- mylog, \
- only_on_root
-from yt.analysis_modules.cosmological_observation.cosmology_splice import \
- CosmologySplice
-from yt.convenience import \
- load
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- parallel_objects, \
- parallel_root_only
-from yt.visualization.image_writer import \
- write_image
-from yt.units.yt_array import \
- YTArray
-from .light_cone_projection import \
- _light_cone_projection
-
-class LightCone(CosmologySplice):
- """
- Initialize a LightCone object.
-
- Parameters
- ----------
- near_redshift : float
- The near (lowest) redshift for the light cone.
- far_redshift : float
- The far (highest) redshift for the light cone.
- observer_redshift : float
- The redshift of the observer.
- Default: 0.0.
- use_minimum_datasets : bool
- If True, the minimum number of datasets is used to connect the initial
- and final redshift. If false, the light cone solution will contain
- as many entries as possible within the redshift interval.
- Default: True.
- deltaz_min : float
- Specifies the minimum :math:`\Delta z` between consecutive datasets in
- the returned list.
- Default: 0.0.
- minimum_coherent_box_fraction : float
- Used with use_minimum_datasets set to False, this parameter specifies
- the fraction of the total box size to be traversed before rerandomizing
- the projection axis and center. This was invented to allow light cones
- with thin slices to sample coherent large scale structure, but in
- practice does not work so well. Try setting this parameter to 1 and
- see what happens.
- Default: 0.0.
- time_data : bool
- Whether or not to include time outputs when gathering
- datasets for time series.
- Default: True.
- redshift_data : bool
- Whether or not to include redshift outputs when gathering
- datasets for time series.
- Default: True.
- find_outputs : bool
- Whether or not to search for datasets in the current
- directory.
- Default: False.
- set_parameters : dict
- Dictionary of parameters to attach to ds.parameters.
- Default: None.
- output_dir : string
- The directory in which images and data files will be written.
- Default: "LC".
- output_prefix : string
- The prefix of all images and data files.
- Default: "LightCone".
-
- """
- def __init__(self, parameter_filename, simulation_type,
- near_redshift, far_redshift,
- observer_redshift=0.0,
- use_minimum_datasets=True, deltaz_min=0.0,
- minimum_coherent_box_fraction=0.0,
- time_data=True, redshift_data=True,
- find_outputs=False, set_parameters=None,
- output_dir="LC", output_prefix="LightCone"):
-
- self.near_redshift = near_redshift
- self.far_redshift = far_redshift
- self.observer_redshift = observer_redshift
- self.use_minimum_datasets = use_minimum_datasets
- self.deltaz_min = deltaz_min
- self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
- if set_parameters is None:
- self.set_parameters = {}
- else:
- self.set_parameters = set_parameters
- self.output_dir = output_dir
- self.output_prefix = output_prefix
-
- # Create output directory.
- ensure_dir(self.output_dir)
-
- # Calculate light cone solution.
- CosmologySplice.__init__(self, parameter_filename, simulation_type,
- find_outputs=find_outputs)
- self.light_cone_solution = \
- self.create_cosmology_splice(self.near_redshift, self.far_redshift,
- minimal=self.use_minimum_datasets,
- deltaz_min=self.deltaz_min,
- time_data=time_data,
- redshift_data=redshift_data)
-
- def calculate_light_cone_solution(self, seed=None, filename=None):
- r"""Create list of projections to be added together to make the light cone.
-
- Several sentences providing an extended description. Refer to
- variables using back-ticks, e.g. `var`.
-
- Parameters
- ----------
- seed : int
- The seed for the random number generator. Any light cone solution
- can be reproduced by giving the same random seed. Default: None
- (each solution will be distinct).
- filename : string
- If given, a text file detailing the solution will be written out.
- Default: None.
-
- """
-
- # Don"t use box coherence with maximum projection depths.
- if self.use_minimum_datasets and \
- self.minimum_coherent_box_fraction > 0:
- mylog.info("Setting minimum_coherent_box_fraction to 0 with " +
- "minimal light cone.")
- self.minimum_coherent_box_fraction = 0
-
- # Calculate projection sizes, and get
- # random projection axes and centers.
- seed = int(seed)
- np.random.seed(seed)
-
- # For box coherence, keep track of effective depth travelled.
- box_fraction_used = 0.0
-
- for q in range(len(self.light_cone_solution)):
- if "previous" in self.light_cone_solution[q]:
- del self.light_cone_solution[q]["previous"]
- if "next" in self.light_cone_solution[q]:
- del self.light_cone_solution[q]["next"]
- if q == len(self.light_cone_solution) - 1:
- z_next = self.near_redshift
- else:
- z_next = self.light_cone_solution[q+1]["redshift"]
-
- # Calculate fraction of box required for a depth of delta z
- self.light_cone_solution[q]["box_depth_fraction"] = \
- (self.cosmology.comoving_radial_distance(z_next, \
- self.light_cone_solution[q]["redshift"]) / \
- self.simulation.box_size).in_units("")
-
- # Calculate fraction of box required for width corresponding to
- # requested image size.
- proper_box_size = self.simulation.box_size / \
- (1.0 + self.light_cone_solution[q]["redshift"])
- self.light_cone_solution[q]["box_width_per_angle"] = \
- (self.cosmology.angular_scale(self.observer_redshift,
- self.light_cone_solution[q]["redshift"]) /
- proper_box_size).in_units("1 / degree")
-
- # Simple error check to make sure more than 100% of box depth
- # is never required.
- if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
- mylog.error(("Warning: box fraction required to go from " +
- "z = %f to %f is %f") %
- (self.light_cone_solution[q]["redshift"], z_next,
- self.light_cone_solution[q]["box_depth_fraction"]))
- mylog.error(("Full box delta z is %f, but it is %f to the " +
- "next data dump.") %
- (self.light_cone_solution[q]["dz_max"],
- self.light_cone_solution[q]["redshift"]-z_next))
-
- # Get projection axis and center.
- # If using box coherence, only get random axis and center if enough
- # of the box has been used, or if box_fraction_used will be greater
- # than 1 after this slice.
- if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
- (box_fraction_used > self.minimum_coherent_box_fraction) or \
- (box_fraction_used +
- self.light_cone_solution[q]["box_depth_fraction"] > 1.0):
- # Random axis and center.
- self.light_cone_solution[q]["projection_axis"] = \
- np.random.randint(0, 3)
- self.light_cone_solution[q]["projection_center"] = \
- np.random.random(3)
- box_fraction_used = 0.0
- else:
- # Same axis and center as previous slice,
- # but with depth center shifted.
- self.light_cone_solution[q]["projection_axis"] = \
- self.light_cone_solution[q-1]["projection_axis"]
- self.light_cone_solution[q]["projection_center"] = \
- self.light_cone_solution[q-1]["projection_center"].copy()
- self.light_cone_solution[q]["projection_center"]\
- [self.light_cone_solution[q]["projection_axis"]] += \
- 0.5 * (self.light_cone_solution[q]["box_depth_fraction"] +
- self.light_cone_solution[q-1]["box_depth_fraction"])
- if self.light_cone_solution[q]["projection_center"]\
- [self.light_cone_solution[q]["projection_axis"]] >= 1.0:
- self.light_cone_solution[q]["projection_center"]\
- [self.light_cone_solution[q]["projection_axis"]] -= 1.0
-
- box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"]
-
- # Write solution to a file.
- if filename is not None:
- self._save_light_cone_solution(filename=filename)
-
- def project_light_cone(self, field_of_view, image_resolution, field,
- weight_field=None, photon_field=False,
- save_stack=True, save_final_image=True,
- save_slice_images=False,
- cmap_name=None,
- njobs=1, dynamic=False):
- r"""Create projections for light cone, then add them together.
-
- Parameters
- ----------
- field_of_view : YTQuantity or tuple of (float, str)
- The field of view of the image and the units.
- image_resolution : YTQuantity or tuple of (float, str)
- The size of each image pixel and the units.
- field : string
- The projected field.
- weight_field : string
- the weight field of the projection. This has the same meaning as
- in standard projections.
- Default: None.
- photon_field : bool
- if True, the projection data for each slice is decremented by 4 Pi
- R^2`, where R is the luminosity distance between the observer and
- the slice redshift.
- Default: False.
- save_stack : bool
- if True, the light cone data including each individual
- slice is written to an hdf5 file.
- Default: True.
- save_final_image : bool
- if True, save an image of the final light cone projection.
- Default: True.
- save_slice_images : bool
- save images for each individual projection slice.
- Default: False.
- cmap_name : string
- color map for images.
- Default: your default colormap.
- njobs : int
- The number of parallel jobs over which the light cone projection
- will be split. Choose -1 for one processor per individual
- projection and 1 to have all processors work together on each
- projection.
- Default: 1.
- dynamic : bool
- If True, use dynamic load balancing to create the projections.
- Default: False.
-
- """
-
- if cmap_name is None:
- cmap_name = ytcfg.get("yt", "default_colormap")
-
- if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
- field_of_view = self.simulation.quan(field_of_view[0],
- field_of_view[1])
- elif not isinstance(field_of_view, YTArray):
- raise RuntimeError("field_of_view argument must be either a YTQuantity " +
- "or a tuple of type (float, str).")
- if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
- image_resolution = self.simulation.quan(image_resolution[0],
- image_resolution[1])
- elif not isinstance(image_resolution, YTArray):
- raise RuntimeError("image_resolution argument must be either a YTQuantity " +
- "or a tuple of type (float, str).")
-
- # Calculate number of pixels on a side.
- pixels = int((field_of_view / image_resolution).in_units(""))
-
- # Clear projection stack.
- projection_stack = []
- projection_weight_stack = []
- if "object" in self.light_cone_solution[-1]:
- del self.light_cone_solution[-1]["object"]
-
- # for q, output in enumerate(self.light_cone_solution):
- all_storage = {}
- for my_storage, output in parallel_objects(self.light_cone_solution,
- storage=all_storage,
- dynamic=dynamic):
- output["object"] = load(output["filename"])
- output["object"].parameters.update(self.set_parameters)
-
- # Calculate fraction of box required for width corresponding to
- # requested image size.
- proper_box_size = self.simulation.box_size / \
- (1.0 + output["redshift"])
- output["box_width_fraction"] = (output["box_width_per_angle"] *
- field_of_view).in_units("")
-
- frb = _light_cone_projection(output, field, pixels,
- weight_field=weight_field)
-
- if photon_field:
- # Decrement the flux by the luminosity distance.
- # Assume field in frb is in erg/s/cm^2/Hz
- dL = self.cosmology.luminosity_distance(self.observer_redshift,
- output["redshift"])
- proper_box_size = self.simulation.box_size / \
- (1.0 + output["redshift"])
- pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
- factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
- mylog.info("Distance to slice = %s" % dL)
- frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.
-
- if weight_field is None:
- my_storage.result = {"field": frb[field]}
- else:
- my_storage.result = {"field": (frb[field] *
- frb["weight_field"]),
- "weight_field": frb["weight_field"]}
-
- del output["object"]
-
- # Combine results from each slice.
- all_slices = list(all_storage.keys())
- all_slices.sort()
- for my_slice in all_slices:
- if save_slice_images:
- name = os.path.join(self.output_dir,
- "%s_%04d_%04d" %
- (self.output_prefix,
- my_slice, len(self.light_cone_solution)))
- if weight_field is None:
- my_image = all_storage[my_slice]["field"]
- else:
- my_image = all_storage[my_slice]["field"] / \
- all_storage[my_slice]["weight_field"]
- only_on_root(write_image, np.log10(my_image),
- "%s_%s.png" % (name, field), cmap_name=cmap_name)
-
- projection_stack.append(all_storage[my_slice]["field"])
- if weight_field is not None:
- projection_weight_stack.append(all_storage[my_slice]["field"])
-
- projection_stack = self.simulation.arr(projection_stack)
- projection_weight_stack = self.simulation.arr(projection_weight_stack)
-
- # Add up slices to make light cone projection.
- if (weight_field is None):
- light_cone_projection = projection_stack.sum(axis=0)
- else:
- light_cone_projection = \
- projection_stack.sum(axis=0) / \
- self.simulation.arr(projection_weight_stack).sum(axis=0)
-
- filename = os.path.join(self.output_dir, self.output_prefix)
-
- # Write image.
- if save_final_image:
- only_on_root(write_image, np.log10(light_cone_projection),
- "%s_%s.png" % (filename, field), cmap_name=cmap_name)
-
- # Write stack to hdf5 file.
- if save_stack:
- self._save_light_cone_stack(field, weight_field,
- projection_stack, projection_weight_stack,
- filename=filename,
- attrs={"field_of_view": str(field_of_view),
- "image_resolution": str(image_resolution)})
-
- @parallel_root_only
- def _save_light_cone_solution(self, filename="light_cone.dat"):
- "Write out a text file with information on light cone solution."
-
- mylog.info("Saving light cone solution to %s." % filename)
-
- f = open(filename, "w")
- f.write("# parameter_filename = %s\n" % self.parameter_filename)
- f.write("\n")
- f.write("# Slice Dataset Redshift depth/box " + \
- "width/degree axis center\n")
- for q, output in enumerate(self.light_cone_solution):
- f.write(("%04d %s %f %f %f %d %f %f %f\n") %
- (q, output["filename"], output["redshift"],
- output["box_depth_fraction"], output["box_width_per_angle"],
- output["projection_axis"], output["projection_center"][0],
- output["projection_center"][1], output["projection_center"][2]))
- f.close()
-
- @parallel_root_only
- def _save_light_cone_stack(self, field, weight_field,
- pstack, wstack,
- filename=None, attrs=None):
- "Save the light cone projection stack as a 3d array in and hdf5 file."
-
- if attrs is None:
- attrs = {}
-
- # Make list of redshifts to include as a dataset attribute.
- redshift_list = np.array([my_slice["redshift"] \
- for my_slice in self.light_cone_solution])
-
- field_node = "%s_%s" % (field, weight_field)
- weight_field_node = "weight_field_%s" % weight_field
-
- if (filename is None):
- filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix)
- if not(filename.endswith(".h5")):
- filename += ".h5"
-
- if pstack.size == 0:
- mylog.info("save_light_cone_stack: light cone projection is empty.")
- return
-
- mylog.info("Writing light cone data to %s." % filename)
-
- fh = h5py.File(filename, mode="a")
-
- if field_node in fh:
- del fh[field_node]
-
- mylog.info("Saving %s to %s." % (field_node, filename))
- dataset = fh.create_dataset(field_node,
- data=pstack)
- dataset.attrs["units"] = str(pstack.units)
- dataset.attrs["redshifts"] = redshift_list
- dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
- for key, value in attrs.items():
- dataset.attrs[key] = value
-
- if wstack.size > 0:
- if weight_field_node in fh:
- del fh[weight_field_node]
-
- mylog.info("Saving %s to %s." % (weight_field_node, filename))
- dataset = fh.create_dataset(weight_field_node,
- data=wstack)
- dataset.attrs["units"] = str(wstack.units)
- dataset.attrs["redshifts"] = redshift_list
- dataset.attrs["observer_redshift"] = np.float(self.observer_redshift)
- for key, value in attrs.items():
- dataset.attrs[key] = value
-
- fh.close()
diff --git a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
deleted file mode 100644
index 33c2b7856e6..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-Create randomly centered, tiled projections to be used in light cones.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import \
- mylog
-from yt.units.yt_array import \
- uconcatenate
-from yt.visualization.fixed_resolution import \
- FixedResolutionBuffer
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- parallel_blocking_call
-
-@parallel_blocking_call
-def _light_cone_projection(my_slice, field, pixels, weight_field=None,
- save_image=False, field_cuts=None):
- "Create a single projection to be added into the light cone stack."
-
- # We are just saving the projection object, so only the projection axis
- # needs to be considered since the lateral shifting and tiling occurs after
- # the projection object is made.
- # Likewise, only the box_depth_fraction needs to be considered.
-
- mylog.info("Making projection at z = %f from %s." % \
- (my_slice["redshift"], my_slice["filename"]))
-
- region_center = [0.5 * (my_slice["object"].domain_right_edge[q] +
- my_slice["object"].domain_left_edge[q]) \
- for q in range(my_slice["object"].dimensionality)]
-
- # 1. The Depth Problem
- # Use coordinate field cut in line of sight to cut projection to proper depth.
- if field_cuts is None:
- these_field_cuts = []
- else:
- these_field_cuts = field_cuts.copy()
-
- if (my_slice["box_depth_fraction"] < 1):
- axis = ("x", "y", "z")[my_slice["projection_axis"]]
- depthLeft = \
- my_slice["projection_center"][my_slice["projection_axis"]] \
- - 0.5 * my_slice["box_depth_fraction"]
- depthRight = \
- my_slice["projection_center"][my_slice["projection_axis"]] \
- + 0.5 * my_slice["box_depth_fraction"]
- if (depthLeft < 0):
- cut_mask = (
- "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
- " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
- "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
- " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
- (axis, axis, axis, axis, depthRight,
- axis, axis, (depthLeft+1), axis, axis)
- elif (depthRight > 1):
- cut_mask = (
- "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
- "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
- "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
- "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
- (axis, axis, axis, axis, (depthRight-1),
- axis, axis, depthLeft, axis, axis)
- else:
- cut_mask = (
- "(obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
- "(obj['index', '%s'] - 0.5*obj['index', '%s'] <= %f)") % \
- (axis, axis, depthLeft, axis, axis, depthRight)
-
- these_field_cuts.append(cut_mask)
-
- data_source = my_slice["object"].all_data()
- cut_region = data_source.cut_region(these_field_cuts)
-
- # Make projection.
- proj = my_slice["object"].proj(field, my_slice["projection_axis"],
- weight_field, center=region_center,
- data_source=cut_region)
- proj_field = proj.field[0]
-
- del data_source, cut_region
-
- # 2. The Tile Problem
- # Tile projection to specified width.
-
- # Original projection data.
- original_px = proj.field_data["px"].in_units("code_length").copy()
- original_py = proj.field_data["py"].in_units("code_length").copy()
- original_pdx = proj.field_data["pdx"].in_units("code_length").copy()
- original_pdy = proj.field_data["pdy"].in_units("code_length").copy()
- original_field = proj.field_data[proj_field].copy()
- original_weight_field = proj.field_data["weight_field"].copy()
-
- for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
- proj.field_data[my_field] = [proj.field_data[my_field]]
-
- # Copy original into offset positions to make tiles.
- for x in range(int(np.ceil(my_slice["box_width_fraction"]))):
- x = my_slice["object"].quan(x, "code_length")
- for y in range(int(np.ceil(my_slice["box_width_fraction"]))):
- y = my_slice["object"].quan(y, "code_length")
- if ((x + y) > 0):
- proj.field_data["px"] += [original_px+x]
- proj.field_data["py"] += [original_py+y]
- proj.field_data["pdx"] += [original_pdx]
- proj.field_data["pdy"] += [original_pdy]
- proj.field_data["weight_field"] += [original_weight_field]
- proj.field_data[proj_field] += [original_field]
-
- for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]:
- proj.field_data[my_field] = \
- my_slice["object"].arr(proj.field_data[my_field]).flatten()
-
- # Delete originals.
- del original_px
- del original_py
- del original_pdx
- del original_pdy
- del original_field
- del original_weight_field
-
- # 3. The Shift Problem
- # Shift projection by random x and y offsets.
-
- image_axes = np.roll(np.arange(3), -my_slice["projection_axis"])[1:]
- di_left_x = my_slice["object"].domain_left_edge[image_axes[0]]
- di_right_x = my_slice["object"].domain_right_edge[image_axes[0]]
- di_left_y = my_slice["object"].domain_left_edge[image_axes[1]]
- di_right_y = my_slice["object"].domain_right_edge[image_axes[1]]
-
- offset = my_slice["projection_center"].copy() * \
- my_slice["object"].domain_width
- offset = np.roll(offset, -my_slice["projection_axis"])[1:]
-
- # Shift x and y positions.
- proj.field_data["px"] -= offset[0]
- proj.field_data["py"] -= offset[1]
-
- # Wrap off-edge cells back around to other side (periodic boundary conditions).
- proj.field_data["px"][proj.field_data["px"] < di_left_x] += \
- np.ceil(my_slice["box_width_fraction"]) * di_right_x
- proj.field_data["py"][proj.field_data["py"] < di_left_y] += \
- np.ceil(my_slice["box_width_fraction"]) * di_right_y
-
- # After shifting, some cells have fractional coverage on both sides of the box.
- # Find those cells and make copies to be placed on the other side.
-
- # Cells hanging off the right edge.
- add_x_right = proj.field_data["px"] + 0.5 * proj.field_data["pdx"] > \
- np.ceil(my_slice["box_width_fraction"]) * di_right_x
- add_x_px = proj.field_data["px"][add_x_right]
- add_x_px -= np.ceil(my_slice["box_width_fraction"]) * di_right_x
- add_x_py = proj.field_data["py"][add_x_right]
- add_x_pdx = proj.field_data["pdx"][add_x_right]
- add_x_pdy = proj.field_data["pdy"][add_x_right]
- add_x_field = proj.field_data[proj_field][add_x_right]
- add_x_weight_field = proj.field_data["weight_field"][add_x_right]
- del add_x_right
-
- # Cells hanging off the left edge.
- add_x_left = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < di_left_x
- add2_x_px = proj.field_data["px"][add_x_left]
- add2_x_px += np.ceil(my_slice["box_width_fraction"]) * di_right_x
- add2_x_py = proj.field_data["py"][add_x_left]
- add2_x_pdx = proj.field_data["pdx"][add_x_left]
- add2_x_pdy = proj.field_data["pdy"][add_x_left]
- add2_x_field = proj.field_data[proj_field][add_x_left]
- add2_x_weight_field = proj.field_data["weight_field"][add_x_left]
- del add_x_left
-
- # Cells hanging off the top edge.
- add_y_right = proj.field_data["py"] + 0.5 * proj.field_data["pdy"] > \
- np.ceil(my_slice["box_width_fraction"]) * di_right_y
- add_y_px = proj.field_data["px"][add_y_right]
- add_y_py = proj.field_data["py"][add_y_right]
- add_y_py -= np.ceil(my_slice["box_width_fraction"]) * di_right_y
- add_y_pdx = proj.field_data["pdx"][add_y_right]
- add_y_pdy = proj.field_data["pdy"][add_y_right]
- add_y_field = proj.field_data[proj_field][add_y_right]
- add_y_weight_field = proj.field_data["weight_field"][add_y_right]
- del add_y_right
-
- # Cells hanging off the bottom edge.
- add_y_left = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < di_left_y
- add2_y_px = proj.field_data["px"][add_y_left]
- add2_y_py = proj.field_data["py"][add_y_left]
- add2_y_py += np.ceil(my_slice["box_width_fraction"]) * di_right_y
- add2_y_pdx = proj.field_data["pdx"][add_y_left]
- add2_y_pdy = proj.field_data["pdy"][add_y_left]
- add2_y_field = proj.field_data[proj_field][add_y_left]
- add2_y_weight_field = proj.field_data["weight_field"][add_y_left]
- del add_y_left
-
- # Add the hanging cells back to the projection data.
- proj.field_data["px"] = uconcatenate(
- [proj.field_data["px"], add_x_px,
- add_y_px, add2_x_px, add2_y_px])
- proj.field_data["py"] = uconcatenate(
- [proj.field_data["py"], add_x_py,
- add_y_py, add2_x_py, add2_y_py])
- proj.field_data["pdx"] = uconcatenate(
- [proj.field_data["pdx"], add_x_pdx,
- add_y_pdx, add2_x_pdx, add2_y_pdx])
- proj.field_data["pdy"] = uconcatenate(
- [proj.field_data["pdy"], add_x_pdy,
- add_y_pdy, add2_x_pdy, add2_y_pdy])
- proj.field_data[proj_field] = uconcatenate(
- [proj.field_data[proj_field], add_x_field,
- add_y_field, add2_x_field, add2_y_field])
- proj.field_data["weight_field"] = uconcatenate(
- [proj.field_data["weight_field"],
- add_x_weight_field, add_y_weight_field,
- add2_x_weight_field, add2_y_weight_field])
-
- # Delete original copies of hanging cells.
- del add_x_px, add_y_px, add2_x_px, add2_y_px
- del add_x_py, add_y_py, add2_x_py, add2_y_py
- del add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx
- del add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy
- del add_x_field, add_y_field, add2_x_field, add2_y_field
- del add_x_weight_field, add_y_weight_field, add2_x_weight_field, add2_y_weight_field
-
- # Tiles were made rounding up the width to the nearest integer.
- # Cut off the edges to get the specified width.
- # Cut in the x direction.
- cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < \
- di_right_x * my_slice["box_width_fraction"]
- proj.field_data["px"] = proj.field_data["px"][cut_x]
- proj.field_data["py"] = proj.field_data["py"][cut_x]
- proj.field_data["pdx"] = proj.field_data["pdx"][cut_x]
- proj.field_data["pdy"] = proj.field_data["pdy"][cut_x]
- proj.field_data[proj_field] = proj.field_data[proj_field][cut_x]
- proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_x]
- del cut_x
-
- # Cut in the y direction.
- cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < \
- di_right_y * my_slice["box_width_fraction"]
- proj.field_data["px"] = proj.field_data["px"][cut_y]
- proj.field_data["py"] = proj.field_data["py"][cut_y]
- proj.field_data["pdx"] = proj.field_data["pdx"][cut_y]
- proj.field_data["pdy"] = proj.field_data["pdy"][cut_y]
- proj.field_data[proj_field] = proj.field_data[proj_field][cut_y]
- proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_y]
- del cut_y
-
- # Create fixed resolution buffer to return back to the light cone object.
- # These buffers will be stacked together to make the light cone.
- frb = FixedResolutionBuffer(proj,
- (di_left_x, di_right_x * my_slice["box_width_fraction"],
- di_left_y, di_right_y * my_slice["box_width_fraction"]),
- (pixels, pixels), antialias=False)
-
- return frb
diff --git a/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py b/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py
deleted file mode 100644
index 18cdaec3c4f..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-light cone generator test
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2017, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import \
- _h5py as h5py
-import numpy as np
-import os
-import shutil
-import tempfile
-
-from yt.analysis_modules.cosmological_observation.api import \
- LightCone
-from yt.testing import \
- assert_equal, \
- requires_module
-from yt.utilities.answer_testing.framework import \
- AnswerTestingTest, \
- requires_sim
-
-ETC = "enzo_tiny_cosmology/32Mpc_32.enzo"
-
-class LightConeProjectionTest(AnswerTestingTest):
- _type_name = "LightConeProjection"
- _attrs = ()
-
- def __init__(self, parameter_file, simulation_type):
- self.parameter_file = parameter_file
- self.simulation_type = simulation_type
- self.ds = os.path.basename(self.parameter_file)
-
- @property
- def storage_name(self):
- return os.path.basename(self.parameter_file)
-
- @requires_module("h5py")
- def run(self):
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lc = LightCone(
- self.parameter_file, self.simulation_type, 0., 0.1,
- observer_redshift=0.0, time_data=False)
- lc.calculate_light_cone_solution(
- seed=123456789, filename="LC/solution.txt")
- lc.project_light_cone(
- (600.0, "arcmin"), (60.0, "arcsec"), "density",
- weight_field=None, save_stack=True)
-
- fh = h5py.File("LC/LightCone.h5", mode="r")
- data = fh["density_None"].value
- units = fh["density_None"].attrs["units"]
- assert units == "g/cm**2"
- fh.close()
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
- mean = data.mean()
- mi = data[data.nonzero()].min()
- ma = data.max()
- return np.array([mean, mi, ma])
-
- def compare(self, new_result, old_result):
- assert_equal(new_result, old_result, verbose=True)
-
-@requires_sim(ETC, "Enzo")
-def test_light_cone_projection():
- yield LightConeProjectionTest(ETC, "Enzo")
diff --git a/yt/analysis_modules/cosmological_observation/light_ray/api.py b/yt/analysis_modules/cosmological_observation/light_ray/api.py
deleted file mode 100644
index 7f356fe3506..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_ray/api.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-API for light_ray
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the LightRay module has been moved to the Trident "
- "package. This version is deprecated and will be removed from yt "
- "in a future release. See https://github.com/trident-project/trident "
- "for further information.")
-
-from .light_ray import \
- LightRay
diff --git a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
deleted file mode 100644
index aadbf4a2e01..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ /dev/null
@@ -1,900 +0,0 @@
-"""
-LightRay class and member functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.analysis_modules.cosmological_observation.cosmology_splice import \
- CosmologySplice
-from yt.convenience import \
- load
-from yt.frontends.ytdata.utilities import \
- save_as_dataset
-from yt.units.yt_array import \
- YTArray
-from yt.utilities.cosmology import \
- Cosmology
-from yt.utilities.logger import \
- ytLogger as mylog
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- parallel_objects, \
- parallel_root_only
-from yt.utilities.physical_constants import speed_of_light_cgs
-from yt.data_objects.static_output import Dataset
-
-class LightRay(CosmologySplice):
- """
- A 1D object representing the path of a light ray passing through a
- simulation. LightRays can be either simple, where they pass through a
- single dataset, or compound, where they pass through consecutive
- datasets from the same cosmological simulation. One can sample any of
- the fields intersected by the LightRay object as it passed through
- the dataset(s).
-
- For compound rays, the LightRay stacks together multiple datasets in a time
- series in order to approximate a LightRay's path through a volume
- and redshift interval larger than a single simulation data output.
- The outcome is something akin to a synthetic QSO line of sight.
-
- Once the LightRay object is set up, use LightRay.make_light_ray to
- begin making rays. Different randomizations can be created with a
- single object by providing different random seeds to make_light_ray.
-
- Parameters
- ----------
- parameter_filename : string or :class:`~yt.data_objects.static_output.Dataset`
- For simple rays, one may pass either a loaded dataset object or
- the filename of a dataset.
- For compound rays, one must pass the filename of the simulation
- parameter file.
- simulation_type : optional, string
- This refers to the simulation frontend type. Do not use for simple
- rays.
- Default: None
- near_redshift : optional, float
- The near (lowest) redshift for a light ray containing multiple
- datasets. Do not use for simple rays.
- Default: None
- far_redshift : optional, float
- The far (highest) redshift for a light ray containing multiple
- datasets. Do not use for simple rays.
- Default: None
- use_minimum_datasets : optional, bool
- If True, the minimum number of datasets is used to connect the
- initial and final redshift. If false, the light ray solution
- will contain as many entries as possible within the redshift
- interval. Do not use for simple rays.
- Default: True.
- max_box_fraction : optional, float
- In terms of the size of the domain, the maximum length a light
- ray segment can be in order to span the redshift interval from
- one dataset to another. If using a zoom-in simulation, this
- parameter can be set to the length of the high resolution
- region so as to limit ray segments to that size. If the
- high resolution region is not cubical, the smallest side
- should be used.
- Default: 1.0 (the size of the box)
- deltaz_min : optional, float
- Specifies the minimum :math:`\Delta z` between consecutive
- datasets in the returned list. Do not use for simple rays.
- Default: 0.0.
- minimum_coherent_box_fraction : optional, float
- Use to specify the minimum length of a ray, in terms of the
- size of the domain, before the trajectory is re-randomized.
- Set to 0 to have ray trajectory randomized for every dataset.
- Set to np.inf (infinity) to use a single trajectory for the
- entire ray.
- Default: 0.
- time_data : optional, bool
- Whether or not to include time outputs when gathering
- datasets for time series. Do not use for simple rays.
- Default: True.
- redshift_data : optional, bool
- Whether or not to include redshift outputs when gathering
- datasets for time series. Do not use for simple rays.
- Default: True.
- find_outputs : optional, bool
- Whether or not to search for datasets in the current
- directory. Do not use for simple rays.
- Default: False.
- load_kwargs : optional, dict
- If you are passing a filename of a dataset to LightRay rather than an
- already loaded dataset, then you can optionally provide this dictionary
- as keywords when the dataset is loaded by yt with the "load" function.
- Necessary for use with certain frontends. E.g.
- Tipsy using "bounding_box"
- Gadget using "unit_base", etc.
- Default : None
-
- """
- def __init__(self, parameter_filename, simulation_type=None,
- near_redshift=None, far_redshift=None,
- use_minimum_datasets=True, max_box_fraction=1.0,
- deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
- time_data=True, redshift_data=True,
- find_outputs=False, load_kwargs=None):
-
- if near_redshift is not None and far_redshift is not None and \
- near_redshift >= far_redshift:
- raise RuntimeError(
- "near_redshift must be less than far_redshift.")
-
- self.near_redshift = near_redshift
- self.far_redshift = far_redshift
- self.use_minimum_datasets = use_minimum_datasets
- self.deltaz_min = deltaz_min
- self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
- self.parameter_filename = parameter_filename
- if load_kwargs is None:
- self.load_kwargs = {}
- else:
- self.load_kwargs = load_kwargs
- self.light_ray_solution = []
- self._data = {}
-
- # The options here are:
- # 1) User passed us a dataset: use it to make a simple ray
- # 2) User passed us a dataset filename: use it to make a simple ray
- # 3) User passed us a simulation filename: use it to make a compound ray
-
- # Make a light ray from a single, given dataset: #1, #2
- if simulation_type is None:
- self.simulation_type = simulation_type
- if isinstance(self.parameter_filename, Dataset):
- self.ds = self.parameter_filename
- self.parameter_filename = self.ds.basename
- elif isinstance(self.parameter_filename, str):
- self.ds = load(self.parameter_filename, **self.load_kwargs)
- if self.ds.cosmological_simulation:
- redshift = self.ds.current_redshift
- self.cosmology = Cosmology(
- hubble_constant=self.ds.hubble_constant,
- omega_matter=self.ds.omega_matter,
- omega_lambda=self.ds.omega_lambda)
- else:
- redshift = 0.
- self.light_ray_solution.append({"filename": self.parameter_filename,
- "redshift": redshift})
-
- # Make a light ray from a simulation time-series. #3
- else:
- self.ds = None
- assert isinstance(self.parameter_filename, str)
- # Get list of datasets for light ray solution.
- CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
- find_outputs=find_outputs)
- self.light_ray_solution = \
- self.create_cosmology_splice(
- self.near_redshift, self.far_redshift,
- minimal=self.use_minimum_datasets,
- max_box_fraction=max_box_fraction,
- deltaz_min=self.deltaz_min,
- time_data=time_data,
- redshift_data=redshift_data)
-
- def _calculate_light_ray_solution(self, seed=None,
- left_edge=None, right_edge=None,
- min_level=None, periodic=True,
- start_position=None, end_position=None,
- trajectory=None, filename=None):
- "Create list of datasets to be added together to make the light ray."
-
- # Calculate dataset sizes, and get random dataset axes and centers.
- my_random = np.random.RandomState(seed)
-
- # If using only one dataset, set start and stop manually.
- if start_position is not None:
- if self.near_redshift is not None or self.far_redshift is not None:
- raise RuntimeError("LightRay Error: cannot specify both " + \
- "start_position and a redshift range.")
- if not ((end_position is None) ^ (trajectory is None)):
- raise RuntimeError("LightRay Error: must specify either end_position " + \
- "or trajectory, but not both.")
- self.light_ray_solution[0]['start'] = start_position
- if end_position is not None:
- self.light_ray_solution[0]['end'] = end_position
- else:
- # assume trajectory given as r, theta, phi
- if len(trajectory) != 3:
- raise RuntimeError("LightRay Error: trajectory must have length 3.")
- r, theta, phi = trajectory
- self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
- r * np.array([np.cos(phi) * np.sin(theta),
- np.sin(phi) * np.sin(theta),
- np.cos(theta)])
- self.light_ray_solution[0]['traversal_box_fraction'] = \
- vector_length(self.light_ray_solution[0]['start'],
- self.light_ray_solution[0]['end'])
-
- # the normal way (random start positions and trajectories for each dataset)
- else:
-
- # For box coherence, keep track of effective depth travelled.
- box_fraction_used = 0.0
-
- for q in range(len(self.light_ray_solution)):
- if (q == len(self.light_ray_solution) - 1):
- z_next = self.near_redshift
- else:
- z_next = self.light_ray_solution[q+1]['redshift']
-
- # Calculate fraction of box required for a depth of delta z
- self.light_ray_solution[q]['traversal_box_fraction'] = \
- self.cosmology.comoving_radial_distance(z_next, \
- self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \
- self.simulation.box_size
-
- # Get dataset axis and center.
- # If using box coherence, only get start point and vector if
- # enough of the box has been used.
- if (q == 0) or (box_fraction_used >=
- self.minimum_coherent_box_fraction):
- if periodic:
- self.light_ray_solution[q]['start'] = left_edge + \
- (right_edge - left_edge) * my_random.random_sample(3)
- theta = np.pi * my_random.random_sample()
- phi = 2 * np.pi * my_random.random_sample()
- box_fraction_used = 0.0
- else:
- ds = load(self.light_ray_solution[q]["filename"])
- ray_length = \
- ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
- "unitary")
- self.light_ray_solution[q]['start'], \
- self.light_ray_solution[q]['end'] = \
- non_periodic_ray(ds, left_edge, right_edge, ray_length,
- my_random=my_random, min_level=min_level)
- del ds
- else:
- # Use end point of previous segment, adjusted for periodicity,
- # and the same trajectory.
- self.light_ray_solution[q]['start'] = \
- periodic_adjust(self.light_ray_solution[q-1]['end'][:],
- left=left_edge, right=right_edge)
-
- if "end" not in self.light_ray_solution[q]:
- self.light_ray_solution[q]['end'] = \
- self.light_ray_solution[q]['start'] + \
- self.light_ray_solution[q]['traversal_box_fraction'] * \
- self.simulation.box_size * \
- np.array([np.cos(phi) * np.sin(theta),
- np.sin(phi) * np.sin(theta),
- np.cos(theta)])
- box_fraction_used += \
- self.light_ray_solution[q]['traversal_box_fraction']
-
- if filename is not None:
- self._write_light_ray_solution(filename,
- extra_info={'parameter_filename':self.parameter_filename,
- 'random_seed':seed,
- 'far_redshift':self.far_redshift,
- 'near_redshift':self.near_redshift})
-
- def make_light_ray(self, seed=None, periodic=True,
- left_edge=None, right_edge=None, min_level=None,
- start_position=None, end_position=None,
- trajectory=None,
- fields=None, setup_function=None,
- solution_filename=None, data_filename=None,
- get_los_velocity=None, use_peculiar_velocity=True,
- redshift=None, field_parameters=None, njobs=-1):
- """
- make_light_ray(seed=None, periodic=True,
- left_edge=None, right_edge=None, min_level=None,
- start_position=None, end_position=None,
- trajectory=None, fields=None, setup_function=None,
- solution_filename=None, data_filename=None,
- use_peculiar_velocity=True, redshift=None,
- njobs=-1)
-
- Create a light ray and get field values for each lixel. A light
- ray consists of a list of field values for cells intersected by
- the ray and the path length of the ray through those cells.
- Light ray data must be written out to an hdf5 file.
-
- Parameters
- ----------
- seed : optional, int
- Seed for the random number generator.
- Default: None.
- periodic : optional, bool
- If True, ray trajectories will make use of periodic
- boundaries. If False, ray trajectories will not be
- periodic.
- Default : True.
- left_edge : optional, iterable of floats or YTArray
- The left corner of the region in which rays are to be
- generated. If None, the left edge will be that of the
- domain. If specified without units, it is assumed to
- be in code units.
- Default: None.
- right_edge : optional, iterable of floats or YTArray
- The right corner of the region in which rays are to be
- generated. If None, the right edge will be that of the
- domain. If specified without units, it is assumed to
- be in code units.
- Default: None.
- min_level : optional, int
- The minimum refinement level of the spatial region in which
- the ray passes. This can be used with zoom-in simulations
- where the high resolution region does not keep a constant
- geometry.
- Default: None.
- start_position : optional, iterable of floats or YTArray.
- Used only if creating a light ray from a single dataset.
- The coordinates of the starting position of the ray.
- If specified without units, it is assumed to be in code units.
- Default: None.
- end_position : optional, iterable of floats or YTArray.
- Used only if creating a light ray from a single dataset.
- The coordinates of the ending position of the ray.
- If specified without units, it is assumed to be in code units.
- Default: None.
- trajectory : optional, list of floats
- Used only if creating a light ray from a single dataset.
- The (r, theta, phi) direction of the light ray. Use either
- end_position or trajectory, not both.
- Default: None.
- fields : optional, list
- A list of fields for which to get data.
- Default: None.
- setup_function : optional, callable, accepts a ds
- This function will be called on each dataset that is loaded
- to create the light ray. For, example, this can be used to
- add new derived fields.
- Default: None.
- solution_filename : optional, string
- Path to a text file where the trajectories of each
- subray is written out.
- Default: None.
- data_filename : optional, string
- Path to output file for ray data.
- Default: None.
- use_peculiar_velocity : optional, bool
- If True, the peculiar velocity along the ray will be sampled for
- calculating the effective redshift combining the cosmological
- redshift and the doppler redshift.
- Default: True.
- redshift : optional, float
- Used with light rays made from single datasets to specify a
- starting redshift for the ray. If not used, the starting
- redshift will be 0 for a non-cosmological dataset and
- the dataset redshift for a cosmological dataset.
- Default: None.
- njobs : optional, int
- The number of parallel jobs over which the segments will
- be split. Choose -1 for one processor per segment.
- Default: -1.
-
- Examples
- --------
-
- Make a light ray from multiple datasets:
-
- >>> import yt
- >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
- ... LightRay
- >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo",
- ... 0., 0.1, time_data=False)
- ...
- >>> my_ray.make_light_ray(seed=12345,
- ... solution_filename="solution.txt",
- ... data_filename="my_ray.h5",
- ... fields=["temperature", "density"],
- ... use_peculiar_velocity=True)
-
- Make a light ray from a single dataset:
-
- >>> import yt
- >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
- ... LightRay
- >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030")
- ...
- >>> my_ray.make_light_ray(start_position=[0., 0., 0.],
- ... end_position=[1., 1., 1.],
- ... solution_filename="solution.txt",
- ... data_filename="my_ray.h5",
- ... fields=["temperature", "density"],
- ... use_peculiar_velocity=True)
-
- """
- if self.simulation_type is None:
- domain = self.ds
- else:
- domain = self.simulation
-
- assumed_units = "code_length"
- if left_edge is None:
- left_edge = domain.domain_left_edge
- elif not hasattr(left_edge, 'units'):
- left_edge = domain.arr(left_edge, assumed_units)
- left_edge.convert_to_units('unitary')
-
- if right_edge is None:
- right_edge = domain.domain_right_edge
- elif not hasattr(right_edge, 'units'):
- right_edge = domain.arr(right_edge, assumed_units)
- right_edge.convert_to_units('unitary')
-
- if start_position is not None:
- if hasattr(start_position, 'units'):
- start_position = start_position
- else:
- start_position = self.ds.arr(start_position, assumed_units)
- start_position.convert_to_units('unitary')
-
- if end_position is not None:
- if hasattr(end_position, 'units'):
- end_position = end_position
- else:
- end_position = self.ds.arr(end_position, assumed_units)
- end_position.convert_to_units('unitary')
-
- if get_los_velocity is not None:
- use_peculiar_velocity = get_los_velocity
- mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
- "Use 'use_peculiar_velocity' instead.")
-
- # Calculate solution.
- self._calculate_light_ray_solution(seed=seed,
- left_edge=left_edge,
- right_edge=right_edge,
- min_level=min_level, periodic=periodic,
- start_position=start_position,
- end_position=end_position,
- trajectory=trajectory,
- filename=solution_filename)
-
- if field_parameters is None:
- field_parameters = {}
-
- # Initialize data structures.
- self._data = {}
- # temperature field is automatically added to fields
- if fields is None: fields = []
- if (('gas', 'temperature') not in fields) and \
- ('temperature' not in fields):
- fields.append(('gas', 'temperature'))
- data_fields = fields[:]
- all_fields = fields[:]
- all_fields.extend(['dl', 'dredshift', 'redshift'])
- all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
- data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
- if use_peculiar_velocity:
- all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z',
- 'velocity_los', 'redshift_eff',
- 'redshift_dopp'])
- data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
-
- all_ray_storage = {}
- for my_storage, my_segment in parallel_objects(self.light_ray_solution,
- storage=all_ray_storage,
- njobs=njobs):
-
- # In case of simple rays, use the already loaded dataset: self.ds,
- # otherwise, load dataset for segment.
- if self.ds is None:
- ds = load(my_segment['filename'], **self.load_kwargs)
- else:
- ds = self.ds
-
- my_segment['unique_identifier'] = ds.unique_identifier
- if redshift is not None:
- if ds.cosmological_simulation and redshift != ds.current_redshift:
- mylog.warn("Generating light ray with different redshift than " +
- "the dataset itself.")
- my_segment["redshift"] = redshift
-
- if setup_function is not None:
- setup_function(ds)
-
- if not ds.cosmological_simulation:
- next_redshift = my_segment["redshift"]
- elif self.near_redshift == self.far_redshift:
- if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
- not my_segment["traversal_box_fraction"].units.is_dimensionless:
- segment_length = \
- my_segment["traversal_box_fraction"].in_units("Mpccm / h")
- else:
- segment_length = my_segment["traversal_box_fraction"] * \
- ds.domain_width[0].in_units("Mpccm / h")
- next_redshift = my_segment["redshift"] - \
- self._deltaz_forward(my_segment["redshift"],
- segment_length)
- elif my_segment.get("next", None) is None:
- next_redshift = self.near_redshift
- else:
- next_redshift = my_segment['next']['redshift']
-
- # Make sure start, end, left, right
- # are using the dataset's unit system.
- my_start = ds.arr(my_segment['start'])
- my_end = ds.arr(my_segment['end'])
- my_left = ds.arr(left_edge)
- my_right = ds.arr(right_edge)
- mylog.info("Getting segment at z = %s: %s to %s." %
- (my_segment['redshift'], my_start, my_end))
-
- # Break periodic ray into non-periodic segments.
- sub_segments = periodic_ray(my_start, my_end,
- left=my_left, right=my_right)
-
- # Prepare data structure for subsegment.
- sub_data = {}
- sub_data['segment_redshift'] = my_segment['redshift']
- for field in all_fields:
- sub_data[field] = []
-
- # Get data for all subsegments in segment.
- for sub_segment in sub_segments:
- mylog.info("Getting subsegment: %s to %s." %
- (list(sub_segment[0]), list(sub_segment[1])))
- sub_ray = ds.ray(sub_segment[0], sub_segment[1])
- for key, val in field_parameters.items():
- sub_ray.set_field_parameter(key, val)
- asort = np.argsort(sub_ray["t"])
- sub_data['dl'].extend(sub_ray['dts'][asort] *
- vector_length(sub_ray.start_point,
- sub_ray.end_point))
-
- for field in data_fields:
- sub_data[field].extend(sub_ray[field][asort])
-
- if use_peculiar_velocity:
- line_of_sight = sub_segment[0] - sub_segment[1]
- line_of_sight /= ((line_of_sight**2).sum())**0.5
- sub_vel = ds.arr([sub_ray['velocity_x'],
- sub_ray['velocity_y'],
- sub_ray['velocity_z']])
- # Line of sight velocity = vel_los
- sub_vel_los = (np.rollaxis(sub_vel, 1) * \
- line_of_sight).sum(axis=1)
- sub_data['velocity_los'].extend(sub_vel_los[asort])
-
- # doppler redshift:
- # See https://en.wikipedia.org/wiki/Redshift and
- # Peebles eqns: 5.48, 5.49
-
- # 1 + redshift_dopp = (1 + v*cos(theta)/c) /
- # sqrt(1 - v**2/c**2)
-
- # where v is the peculiar velocity (ie physical velocity
- # without the hubble flow, but no hubble flow in sim, so
- # just the physical velocity).
-
- # the bulk of the doppler redshift is from line of sight
- # motion, but there is a small amount from time dilation
- # of transverse motion, hence the inclusion of theta (the
- # angle between line of sight and the velocity).
- # theta is the angle between the ray vector (i.e. line of
- # sight) and the velocity vectors: a dot b = ab cos(theta)
-
- sub_vel_mag = sub_ray['velocity_magnitude']
- cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
- # Protect against situations where velocity mag is exactly
- # zero, in which case zero / zero = NaN.
- cos_theta = np.nan_to_num(cos_theta)
- redshift_dopp = \
- (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
- np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
- sub_data['redshift_dopp'].extend(redshift_dopp[asort])
- del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
- redshift_dopp
-
- sub_ray.clear_data()
- del sub_ray, asort
-
- for key in sub_data:
- sub_data[key] = ds.arr(sub_data[key]).in_cgs()
-
- # Get redshift for each lixel. Assume linear relation between l
- # and z.
- sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
- (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
- sub_data['redshift'] = my_segment['redshift'] - \
- sub_data['dredshift'].cumsum() + sub_data['dredshift']
-
- # When using the peculiar velocity, create effective redshift
- # (redshift_eff) field combining cosmological redshift and
- # doppler redshift.
-
- # then to add cosmological redshift and doppler redshifts, follow
- # eqn 3.75 in Peacock's Cosmological Physics:
- # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
-
- if use_peculiar_velocity:
- sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
- (1 + sub_data['redshift'])) - 1
-
- # Remove empty lixels.
- sub_dl_nonzero = sub_data['dl'].nonzero()
- for field in all_fields:
- sub_data[field] = sub_data[field][sub_dl_nonzero]
- del sub_dl_nonzero
-
- # Add to storage.
- my_storage.result = sub_data
-
- del ds
-
- # Reconstruct ray data from parallel_objects storage.
- all_data = [my_data for my_data in all_ray_storage.values()]
- # This is now a list of segments where each one is a dictionary
- # with all the fields.
- all_data.sort(key=lambda a:a['segment_redshift'], reverse=True)
- # Flatten the list into a single dictionary containing fields
- # for the whole ray.
- all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
- self._data = all_data
-
- if data_filename is not None:
- self._write_light_ray(data_filename, all_data)
- ray_ds = load(data_filename)
- return ray_ds
- else:
- return None
-
- def __getitem__(self, field):
- return self._data[field]
-
- @parallel_root_only
- def _write_light_ray(self, filename, data):
- """
- _write_light_ray(filename, data)
-
- Write light ray data to hdf5 file.
- """
-
- extra_attrs = {"data_type": "yt_light_ray"}
- if self.simulation_type is None:
- ds = self.ds
- else:
- ds = {}
- ds["periodicity"] = (True, True, True)
- ds["current_redshift"] = self.near_redshift
- for attr in ["dimensionality", "cosmological_simulation",
- "domain_left_edge", "domain_right_edge",
- "length_unit", "time_unit"]:
- ds[attr] = getattr(self.simulation, attr)
- if self.simulation.cosmological_simulation:
- for attr in ["omega_lambda", "omega_matter",
- "hubble_constant"]:
- ds[attr] = getattr(self.cosmology, attr)
- ds["current_time"] = \
- self.cosmology.t_from_z(ds["current_redshift"])
- if isinstance(ds["hubble_constant"], YTArray):
- ds["hubble_constant"] = \
- ds["hubble_constant"].to("100*km/(Mpc*s)").d
- extra_attrs["unit_registry_json"] = \
- self.simulation.unit_registry.to_json()
-
- # save the light ray solution
- if len(self.light_ray_solution) > 0:
- for key in self.light_ray_solution[0]:
- if key in ["next", "previous", "index"]:
- continue
- lrsa = [sol[key] for sol in self.light_ray_solution]
- if isinstance(lrsa[-1], YTArray):
- to_arr = YTArray
- else:
- to_arr = np.array
- arr = to_arr(lrsa)
- # If we somehow create an object array, convert it to a string
- # to avoid errors later
- if arr.dtype == 'O':
- arr = arr.astype(str)
- extra_attrs["light_ray_solution_%s" % key] = arr
-
- field_types = dict([(field, "grid") for field in data.keys()])
-
- # Only return LightRay elements with non-zero density
- if 'temperature' in data: f = 'temperature'
- if ('gas', 'temperature') in data: f = ('gas', 'temperature')
- if 'temperature' in data or ('gas', 'temperature') in data:
- mask = data[f] > 0
- if not np.any(mask):
- raise RuntimeError(
- "No zones along light ray with nonzero %s. "
- "Please modify your light ray trajectory." % (f,))
- for key in data.keys():
- data[key] = data[key][mask]
- save_as_dataset(ds, filename, data, field_types=field_types,
- extra_attrs=extra_attrs)
-
- @parallel_root_only
- def _write_light_ray_solution(self, filename, extra_info=None):
- """
- _write_light_ray_solution(filename, extra_info=None)
-
- Write light ray solution to a file.
- """
-
- mylog.info("Writing light ray solution to %s." % filename)
- f = open(filename, 'w')
- if extra_info is not None:
- for par, val in extra_info.items():
- f.write("%s = %s\n" % (par, val))
- f.write("\nSegment Redshift dl/box Start x y " + \
- "z End x y z Dataset\n")
- for q, my_segment in enumerate(self.light_ray_solution):
- f.write("%04d %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \
- (q, my_segment['redshift'], my_segment['traversal_box_fraction'],
- my_segment['start'][0], my_segment['start'][1], my_segment['start'][2],
- my_segment['end'][0], my_segment['end'][1], my_segment['end'][2],
- my_segment['filename']))
- f.close()
-
-def _flatten_dict_list(data, exceptions=None):
- """
- _flatten_dict_list(data, exceptions=None)
-
- Flatten the list of dicts into one dict.
- """
-
- if exceptions is None: exceptions = []
- new_data = {}
- for datum in data:
- for field in [field for field in datum.keys()
- if field not in exceptions]:
- if field not in new_data:
- new_data[field] = []
- new_data[field].extend(datum[field])
- for field in new_data:
- new_data[field] = YTArray(new_data[field])
- return new_data
-
-def vector_length(start, end):
- """
- vector_length(start, end)
-
- Calculate vector length.
- """
-
- return np.sqrt(np.power((end - start), 2).sum())
-
-def periodic_adjust(p, left=None, right=None):
- """
- Return the point p adjusted for periodic boundaries.
-
- """
- if isinstance(p, YTArray):
- p.convert_to_units("unitary")
- if left is None:
- left = np.zeros_like(p)
- if right is None:
- right = np.ones_like(p)
-
- w = right - left
- p -= left
- return np.mod(p, w)
-
-def periodic_distance(coord1, coord2):
- """
- periodic_distance(coord1, coord2)
-
- Calculate length of shortest vector between to points in periodic domain.
- """
- dif = coord1 - coord2
-
- dim = np.ones(coord1.shape,dtype=int)
- def periodic_bind(num):
- pos = np.abs(num % dim)
- neg = np.abs(num % -dim)
- return np.min([pos,neg],axis=0)
-
- dif = periodic_bind(dif)
- return np.sqrt((dif * dif).sum(axis=-1))
-
-def periodic_ray(start, end, left=None, right=None):
- """
- periodic_ray(start, end, left=None, right=None)
-
- Break up periodic ray into non-periodic segments.
- Accepts start and end points of periodic ray as YTArrays.
- Accepts optional left and right edges of periodic volume as YTArrays.
- Returns a list of lists of coordinates, where each element of the
- top-most list is a 2-list of start coords and end coords of the
- non-periodic ray:
-
- [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
- [[x1start,y1start,z1start], [x1end, y1end, z1end]],
- ...,]
-
- """
-
- if left is None:
- left = np.zeros(start.shape)
- if right is None:
- right = np.ones(start.shape)
- dim = right - left
-
- vector = end - start
- wall = np.zeros_like(start)
- close = np.zeros(start.shape, dtype=object)
-
- left_bound = vector < 0
- right_bound = vector > 0
- no_bound = vector == 0.0
- bound = vector != 0.0
-
- wall[left_bound] = left[left_bound]
- close[left_bound] = np.max
- wall[right_bound] = right[right_bound]
- close[right_bound] = np.min
- wall[no_bound] = np.inf
- close[no_bound] = np.min
-
- segments = []
- this_start = start.copy()
- this_end = end.copy()
- t = 0.0
- tolerance = 1e-6
- while t < 1.0 - tolerance:
- hit_left = (this_start <= left) & (vector < 0)
- if (hit_left).any():
- this_start[hit_left] += dim[hit_left]
- this_end[hit_left] += dim[hit_left]
- hit_right = (this_start >= right) & (vector > 0)
- if (hit_right).any():
- this_start[hit_right] -= dim[hit_right]
- this_end[hit_right] -= dim[hit_right]
-
- nearest = vector.unit_array * \
- np.array([close[q]([this_end[q], wall[q]]) \
- for q in range(start.size)])
- dt = ((nearest - this_start) / vector)[bound].min()
- now = this_start + vector * dt
- close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
- now[close_enough] = nearest[close_enough]
- segments.append([this_start.copy(), now.copy()])
- this_start = now.copy()
- t += dt
-
- return segments
-
-def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
- min_level=None, my_random=None):
-
- max_length = vector_length(left_edge, right_edge)
- if ray_length > max_length:
- raise RuntimeError(
- ("The maximum segment length in the region %s to %s is %s, " +
- "but the ray length requested is %s. Decrease ray length.") %
- (left_edge, right_edge, max_length, ray_length))
-
- if my_random is None:
- my_random = np.random.RandomState()
- i = 0
- while True:
- start = my_random.random_sample(3) * \
- (right_edge - left_edge) + left_edge
- theta = np.pi * my_random.random_sample()
- phi = 2 * np.pi * my_random.random_sample()
- end = start + ray_length * \
- np.array([np.cos(phi) * np.sin(theta),
- np.sin(phi) * np.sin(theta),
- np.cos(theta)])
- i += 1
- test_ray = ds.ray(start, end)
- if (end >= left_edge).all() and (end <= right_edge).all() and \
- (min_level is None or min_level <= 0 or
- (test_ray["grid_level"] >= min_level).all()):
- mylog.info("Found ray after %d attempts." % i)
- del test_ray
- return start, end
- del test_ray
- if i > max_iter:
- raise RuntimeError(
- ("Failed to create segment in %d attempts. " +
- "Decreasing ray length is recommended") % i)
diff --git a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
deleted file mode 100644
index e09af2cae41..00000000000
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Unit test for the light_ray analysis module
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2016, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.convenience import \
- load
-from yt.testing import \
- assert_array_equal, \
- requires_file
-from yt.analysis_modules.cosmological_observation.api import LightRay
-import os
-import shutil
-from yt.utilities.answer_testing.framework import data_dir_load
-import tempfile
-
-COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
-
-def compare_light_ray_solutions(lr1, lr2):
- assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)
- if len(lr1.light_ray_solution) == 0:
- return
- for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):
- for field in s1:
- if field in ["next", "previous"]:
- continue
- if isinstance(s1[field], np.ndarray):
- assert_array_equal(s1[field], s2[field])
- else:
- assert s1[field] == s2[field]
-
-@requires_file(COSMO_PLUS)
-def test_light_ray_cosmo():
- """
- This test generates a cosmological light ray
- """
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
-
- lr.make_light_ray(seed=1234567,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- ds = load('lightray.h5')
- compare_light_ray_solutions(lr, ds)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS)
-def test_light_ray_cosmo_nested():
- """
- This test generates a cosmological light ray confing the ray to a subvolume
- """
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- left = np.ones(3) * 0.25
- right = np.ones(3) * 0.75
-
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
-
- lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- ds = load('lightray.h5')
- compare_light_ray_solutions(lr, ds)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS)
-def test_light_ray_cosmo_nonperiodic():
- """
- This test generates a cosmological light ray using non-periodic segments
- """
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
-
- lr.make_light_ray(seed=1234567, periodic=False,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- ds = load('lightray.h5')
- compare_light_ray_solutions(lr, ds)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS_SINGLE)
-def test_light_ray_non_cosmo():
- """
- This test generates a non-cosmological light ray
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- lr = LightRay(COSMO_PLUS_SINGLE)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- ds = load('lightray.h5')
- compare_light_ray_solutions(lr, ds)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
-@requires_file(COSMO_PLUS_SINGLE)
-def test_light_ray_non_cosmo_from_dataset():
- """
- This test generates a non-cosmological light ray created from an already
- loaded dataset
- """
-
- # Set up in a temp dir
- tmpdir = tempfile.mkdtemp()
- curdir = os.getcwd()
- os.chdir(tmpdir)
-
- ds = data_dir_load(COSMO_PLUS_SINGLE)
- lr = LightRay(ds)
-
- ray_start = [0,0,0]
- ray_end = [1,1,1]
- lr.make_light_ray(start_position=ray_start, end_position=ray_end,
- fields=['temperature', 'density', 'H_number_density'],
- data_filename='lightray.h5')
-
- ds = load('lightray.h5')
- compare_light_ray_solutions(lr, ds)
-
- # clean up
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
diff --git a/yt/analysis_modules/halo_analysis/api.py b/yt/analysis_modules/halo_analysis/api.py
index 0f59bc9058e..e9eb1f054ad 100644
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -1,41 +1,7 @@
-"""
-API for halo_analysis
+from yt.utilities.exceptions import \
+ YTModuleRemoved
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the HaloCatalog module has been moved to "
- "the yt_astro_analysis package. This version is deprecated "
- "and will be removed from yt in a future release. See "
- "https://github.com/yt-project/yt_astro_analysis for further "
- "information.")
-
-from .halo_catalog import \
- HaloCatalog
-
-from .halo_callbacks import \
- add_callback
-
-from .halo_finding_methods import \
- add_finding_method
-
-from .halo_filters import \
- add_filter
-
-from .halo_quantities import \
- add_quantity
-
-from .halo_recipes import \
- add_recipe
+raise YTModuleRemoved(
+ "halo_analysis",
+ "https://github.com/yt-project/yt_astro_analysis",
+ "https://yt-astro-analysis.readthedocs.io/")
diff --git a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
deleted file mode 100644
index c2aaa794f22..00000000000
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ /dev/null
@@ -1,805 +0,0 @@
-"""
-A very simple, purely-serial, merger tree script that knows how to parse FOF
-catalogs, either output by Enzo or output by yt's FOF halo finder, and then
-compare parent/child relationships.
-
-
-
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-# plot_halo_evolution() gives a good full example of how to use the framework
-
-# First pass at a simplified merger tree
-#
-# Basic outline:
-#
-# 1. Halo find inline, obtaining particle catalogs
-# 2. Load dataset at time t
-# 3. Load dataset at time t+1
-# 4. Parse catalogs for t and t+1
-# 5. Place halos for t+1 in kD-tree
-# 6. For every halo in t, execute ball-query with some linking length
-# 7. For every halo in ball-query result, execute numpy's intersect1d on
-# particle IDs
-# 8. Parentage is described by a fraction of particles that pass from one to
-# the other; we have both descendent fractions and ancestor fractions.
-
-
-import numpy as np
-from yt.utilities.on_demand_imports import _h5py as h5py
-import glob
-import os
-
-from yt.extern.six.moves import cPickle
-from yt.extern.pykdtree import KDTree
-from yt.funcs import mylog, get_pbar
-
-import yt.extern.pydot as pydot
-
-# We don't currently use this, but we may again find a use for it in the
-# future.
-class MaxLengthDict(dict):
- def __init__(self, *args, **kwargs):
- dict.__init__(self, *args, **kwargs)
- self.order = [None] * 50
-
- def __setitem__(self, key, val):
- if key not in self.order:
- to_remove = self.order.pop(0)
- self.pop(to_remove, None)
- self.order.append(key)
- dict.__setitem__(self, key, val)
-
- def __getitem__(self, key):
- if key in self.order:
- self.order.pop(self.order.index(key))
- self.order.append(key)
- return dict.__getitem__(self, key)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- self.order.pop(self.order.index(key))
- self.order.insert(0, None)
-
-class HaloCatalog(object):
- r"""A catalog of halos, parsed from EnzoFOF outputs.
-
- This class will read in catalogs output by the Enzo FOF halo finder and
- make available their positions, radii, etc. Enzo FOF was provided
- starting with 2.0, and can be run either inline (with the correct
- options) or as a postprocessing step using the `-F` command line
- option. This class is mostly useful when calculating a merger tree,
- and when the particle IDs for members of a given halo are output as
- well.
-
- Parameters
- ----------
- output_id : int
- This is the integer output id of the halo catalog to parse and
- load.
- cache : bool
- Should we store, in between accesses, the particle IDs? If set to
- true, the correct particle files must exist.
- external_FOF : bool, optional
- Are we building a tree from outputs generated by an
- external FOF program, or an FOF internal to yt?
- FOF_directory : str, optional
- Directory where FOF files are located
- """
- cache = None
- def __init__(self, output_id, cache = True, external_FOF=True, FOF_directory="FOF"):
- self.output_id = output_id
- self.external_FOF = external_FOF
- self.redshift = 0.0
- self.FOF_directory = FOF_directory
- self.particle_file = h5py.File("%s/particles_%05i.h5" % \
- (FOF_directory, output_id), "r")
- if self.external_FOF:
- self.parse_halo_catalog_external()
- else:
- self.parse_halo_catalog_internal()
- if cache: self.cache = dict()#MaxLengthDict()
-
- def __del__(self):
- self.particle_file.close()
-
- def parse_halo_catalog_external(self):
- hp = []
- for line in open("%s/groups_%05i.dat" % \
- (self.FOF_directory, self.output_id)):
- if line.strip() == "": continue # empty
- if line.startswith("# Red"):
- self.redshift = float(line.split("=")[1])
- if line[0] == "#": continue # comment
- if line[0] == "d": continue # datavar
- x,y,z = [float(f) for f in line.split(None, 3)[:-1]]
- hp.append([x,y,z])
- if hp != []:
- self.halo_positions = np.array(hp)
- self.halo_kdtree = KDTree(self.halo_positions)
- else:
- self.halo_positions = None
- self.halo_kdtree = None
- return hp
-
- def parse_halo_catalog_internal(self):
- """
- This parser works on the files output directly out of yt's internal
- halo_finder. The parse_halo_catalog_external works with an
- external version of FOF.
-
- Examples
- --------
- >>> ds = load("DD0000/DD0000")
- >>> halo_list = FOFHaloFinder(ds)
- >>> halo_list.write_out("FOF/groups_00000.txt")
- >>> halos_COM = parse_halo_catalog_internal()
- """
- hp = []
- for line in open("%s/groups_%05i.txt" % \
- (self.FOF_directory, self.output_id)):
- if line.startswith("# RED"):
- self.redshift = float(line.split("=")[1])
- continue
- if line.strip() == "": continue # empty
- if line[0] == "#": continue # comment
- x,y,z = [float(f) for f in line.split()[7:10]] # COM x,y,z
- hp.append([x,y,z])
- if hp != []:
- self.halo_positions = np.array(hp)
- self.halo_kdtree = KDTree(self.halo_positions)
- else:
- self.halo_positions = None
- self.halo_kdtree = None
- return hp
-
- def read_particle_ids(self, halo_id):
- if self.cache is not None:
- if halo_id not in self.cache:
- if self.external_FOF:
- self.cache[halo_id] = \
- self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
- else:
- self.cache[halo_id] = \
- self.particle_file["/Halo%08i/particle_index" % halo_id][:]
- ids = self.cache[halo_id]
- else:
- if self.external_FOF:
- ids = self.particle_file["/Halo%08i/Particle ID" % halo_id][:]
- else:
- ids = self.particle_file["/Halo%08i/particle_index" % halo_id][:]
- return HaloParticleList(halo_id, self.halo_positions[halo_id,:], ids)
-
- def calculate_parentage_fractions(self, other_catalog, radius = 0.10):
- parentage_fractions = {}
- if self.halo_positions is None or other_catalog.halo_positions is None:
- return parentage_fractions
- mylog.debug("Ball-tree query with radius %0.3e", radius)
- all_nearest = self.halo_kdtree.query_ball_tree(
- other_catalog.halo_kdtree, radius)
- pbar = get_pbar("Halo Mergers", self.halo_positions.shape[0])
- for hid1, nearest in enumerate(all_nearest):
- pbar.update(hid1)
- parentage_fractions[hid1] = {}
- HPL1 = self.read_particle_ids(hid1)
- for hid2 in sorted(nearest):
- HPL2 = other_catalog.read_particle_ids(hid2)
- p1, p2 = HPL1.find_relative_parentage(HPL2)
- parentage_fractions[hid1][hid2] = (p1, p2, HPL2.number_of_particles)
- parentage_fractions[hid1]["NumberOfParticles"] = HPL1.number_of_particles
- pbar.finish()
- return parentage_fractions
-
-class HaloParticleList(object):
- def __init__(self, halo_id, position, particle_ids):
- self.halo_id = halo_id
- self.position = np.array(position)
- self.particle_ids = particle_ids
- self.number_of_particles = particle_ids.size
-
- def find_nearest(self, other_tree, radius = 0.10):
- return other_tree.query_ball_point(self.position, radius)
-
- def find_relative_parentage(self, child):
- # Return two values: percent this halo gave to the other, and percent
- # of the other that comes from this halo
- overlap = np.intersect1d(self.particle_ids, child.particle_ids).size
- of_child_from_me = float(overlap)/child.particle_ids.size
- of_mine_from_me = float(overlap)/self.particle_ids.size
- return of_child_from_me, of_mine_from_me
-
-class EnzoFOFMergerBranch(object):
- def __init__(self, tree, output_num, halo_id, max_children,
- min_relation=0.25):
- self.output_num = output_num
- self.halo_id = halo_id
- self.npart = tree.relationships[output_num][halo_id]["NumberOfParticles"]
- self.children = []
- self.progenitor = -1
- max_relationship = 0.0
- halo_count = 0
- keys = list(tree.relationships[output_num][halo_id].keys())
- keys.remove('NumberOfParticles')
- for k in sorted(keys):
- v = tree.relationships[output_num][halo_id][k]
- if v[1] > min_relation and halo_count < max_children:
- halo_count += 1
- self.children.append((k,v[1],v[2]))
- if v[1] > max_relationship:
- self.progenitor = k
- max_relationship = v[1]
-
-class EnzoFOFMergerTree(object):
- r"""Calculates the parentage relationships for halos for a series of
- outputs, using the framework provided in enzofof_merger_tree.
-
- Parameters
- ----------
- zrange : tuple
- This is the redshift range (min, max) to calculate the
- merger tree. E.g. (0, 2) for z=2 to z=0
- cycle_range : tuple, optional
- This is the cycle number range (min, max) to calculate the
- merger tree. If both zrange and cycle_number given,
- ignore zrange.
- output : bool, optional
- If provided, both .cpkl and .txt files containing the parentage
- relationships will be output.
- load_saved : bool, optional
- Flag to load previously saved parental relationships
- save_filename : str, optional
- Filename to save parental relationships
- external_FOF : bool, optional
- Are we building a tree from outputs generated by an
- external FOF program, or an FOF internal to yt?
- FOF_directory : str, optional
- Directory where FOF files are located, note that the files
- must be named according to the syntax: groups_DDDDD.txt for
- internal yt outputs, and groups_DDDDD.dat for external FOF outputs.
- where DDDDD are digits representing the equivalent cycle number.
- e.g. groups_00000.txt
-
- Examples
- --------
- >>> mt = EnzoFOFMergerTree() # by default it grabs every DD in FOF dir
- >>> mt.build_tree(0) # Create tree for halo 0
- >>> mt.print_tree()
- >>> mt.write_dot()
-
- See Also
- --------
- plot_halo_evolution()
- """
- def __init__(self, zrange=None, cycle_range=None, output=False,
- load_saved=False, save_filename="merger_tree.cpkl",
- external_FOF=True, FOF_directory="FOF"):
-
- self.relationships = {}
- self.redshifts = {}
- self.external_FOF = external_FOF
- self.FOF_directory = FOF_directory
- if load_saved:
- self.load_tree("%s/%s" % (self.FOF_directory, save_filename))
- # make merger tree work within specified cycle/z limits
- # on preloaded halos
- if zrange is not None:
- self.select_redshifts(zrange)
- if cycle_range is not None:
- self.select_cycles(cycle_range)
- else:
- self.find_outputs(zrange, cycle_range, output)
- self.run_merger_tree(output)
- self.save_tree("%s/%s" % (self.FOF_directory, save_filename))
-
- def select_cycles(self, cycle_range):
- """
- Takes an existing tree and pares it to only include a subset of
- cycles. Useful in paring a loaded tree.
- """
- # N.B. Does not delete info from self.relationships to save space
- # just removes it from redshift dict for indexing
- for cycle in self.redshifts.keys():
- if cycle <= cycle_range[0] and cycle >= cycle_range[1]:
- del self.redshifts[cycle]
-
- def select_redshifts(self, zrange):
- """
- Takes an existing tree and pares it to only include a subset of
- redshifts. Useful in paring a loaded tree.
- """
- # N.B. Does not delete info from self.relationships to save space
- # just removes it from redshift dict for indexing
- for redshift in self.redshifts.values():
- if redshift <= zrange[0] and redshift >= zrange[1]:
- # some reverse lookup magic--assumes unique cycle/z pairs
- cycle = [key for key,value in self.redshifts.items() \
- if value == redshift][0]
- del self.redshifts[cycle]
-
- def save_tree(self, filename):
- cPickle.dump((self.redshifts, self.relationships),
- open(filename, "wb"))
-
- def load_tree(self, filename):
- self.redshifts, self.relationships = \
- cPickle.load(open(filename, "rb"))
-
- def clear_data(self):
- r"""Deletes previous merger tree, but keeps parentage
- relationships.
- """
- del self.levels
-
- def find_outputs(self, zrange, cycle_range, output):
- self.numbers = []
- if self.external_FOF:
- filenames = "%s/groups_*.dat" % (self.FOF_directory)
- files = glob.glob(filenames)
- else:
- filenames = "%s/groups_*.txt" % (self.FOF_directory)
- files = glob.glob(filenames)
- # If using redshift range, load redshifts only
- for f in files:
- num = int(f[-9:-4])
- if zrange is not None:
- HC = HaloCatalog(num, external_FOF=self.external_FOF, \
- FOF_directory=self.FOF_directory)
- # Allow for some epsilon
- diff1 = (HC.redshift - zrange[0]) / zrange[0]
- diff2 = (HC.redshift - zrange[1]) / zrange[1]
- if diff1 >= -1e-3 and diff2 <= 1e-3:
- self.numbers.append(num)
- del HC
- elif cycle_range is not None:
- if num >= cycle_range[0] and num <= cycle_range[1]:
- self.numbers.append(num)
- else:
- self.numbers.append(num)
- self.numbers.sort()
-
- def run_merger_tree(self, output):
- # Run merger tree for all outputs, starting with the last output
- for i in range(len(self.numbers)-1, 0, -1):
- if output:
- output = "%s/tree-%5.5d-%5.5d" % \
- (self.FOF_directory, self.numbers[i], self.numbers[i-1])
- else:
- output = None
- z0, z1, fr = find_halo_relationships(self.numbers[i], \
- self.numbers[i-1], \
- output_basename=output, \
- external_FOF=self.external_FOF,
- FOF_directory=self.FOF_directory)
- self.relationships[self.numbers[i]] = fr
- self.redshifts[self.numbers[i]] = z0
- # Fill in last redshift
- self.redshifts[self.numbers[0]] = z1
-
- def build_tree(self, halonum, min_particles=0, max_children=1e20):
- r"""Builds a merger tree, starting at the last output.
-
- Parameters
- ----------
- halonum : int
- Halo number in the last output to analyze.
- min_particles : int, optional
- Minimum number of particles of halos in tree.
- max_children : int, optional
- Maximum number of child halos each leaf can have.
- """
- self.halonum = halonum
- self.max_children = max_children
- self.output_numbers = sorted(self.relationships, reverse=True)
- self.levels = {}
- trunk = self.output_numbers[0]
- self.levels[trunk] = [EnzoFOFMergerBranch(self, trunk, halonum,
- max_children)]
- self.generate_tree(min_particles, max_children)
-
- def filter_small_halos(self, lvl, min_particles):
- # Filter out children with less than min_particles
- for h in self.levels[lvl]:
- fil = []
- for c in h.children:
- if c[2] > min_particles: # c[2] = npart
- fil.append(c)
- h.children = fil
-
- def generate_tree(self, min_particles, max_children):
- self.filter_small_halos(self.output_numbers[0], min_particles)
- for i in range(1,len(self.output_numbers)):
- prev = self.output_numbers[i-1]
- this = self.output_numbers[i]
- self.levels[this] = []
- this_halos = [] # To check for duplicates
- for h in self.levels[prev]:
- for c in h.children:
- if c[0] in this_halos: continue
- if self.relationships[this] == {}: continue
- branch = EnzoFOFMergerBranch(self, this, c[0],
- max_children)
- self.levels[this].append(branch)
- this_halos.append(c[0])
- self.filter_small_halos(this, min_particles)
-
- def get_massive_progenitors(self, halonum, min_relation=0.25):
- r"""Returns a list of the most massive progenitor halos.
-
- This routine walks down the tree, following the most massive
- progenitor on each node.
-
- Parameters
- ----------
- halonum : int
- Halo number at the last output to trace.
-
- Returns
- -------
- output : dict
- Dictionary of redshifts, cycle numbers, and halo numbers
- of the most massive progenitor. keys = {redshift, cycle,
- halonum}
- """
- output = {"redshift": [], "cycle": [], "halonum": []}
- # First (lowest redshift) node in tree
- halo0 = halonum
- for cycle in sorted(self.numbers, reverse=True):
- if cycle not in self.relationships: break
- if halo0 not in self.relationships[cycle]: break
- node = self.relationships[cycle][halo0]
- output["redshift"].append(self.redshifts[cycle])
- output["cycle"].append(cycle)
- output["halonum"].append(halo0)
- # Find progenitor
- max_rel = 0.0
- for k,v in node.items():
- if not str(k).isdigit(): continue
- if v[1] > max_rel and v[1] > min_relation:
- halo0 = k
- max_rel = v[1]
- return output
-
- def print_tree(self):
- r"""Prints the merger tree to stdout.
- """
- for lvl in sorted(self.levels, reverse=True):
- if lvl not in self.redshifts: continue
- print("========== Cycle %5.5d (z=%f) ==========" % \
- (lvl, self.redshifts[lvl]))
- for br in self.levels[lvl]:
- print("Parent halo = %d" % br.halo_id)
- print("--> Most massive progenitor == Halo %d" % \
- (br.progenitor))
- for i,c in enumerate(br.children):
- if i > self.max_children: break
- print("--> Halo %8.8d :: fraction = %g" % (c[0], c[1]))
-
- def save_halo_evolution(self, filename):
- """
- Saves as an HDF5 file the relevant details about a halo
- over the course of its evolution following the most massive
- progenitor to have given it the bulk of its particles.
- It stores info from the FOF_groups file: location, mass, id, etc.
- """
- f = h5py.File("%s/%s" % (self.FOF_directory, filename), 'a')
- cycle_fin = sorted(list(self.redshifts.keys()))[-1]
- halo_id = self.levels[cycle_fin][0].halo_id
- halo = "halo%05d" % halo_id
- if halo in f:
- del f["halo%05d" % halo_id]
- g = f.create_group("halo%05d" % halo_id)
- size = len(self.redshifts)
- cycle = np.zeros(size)
- redshift = np.zeros(size)
- halo_id = np.zeros(size)
- fraction = np.zeros(size)
- mass = np.zeros(size)
- densest_point = np.zeros((3,size))
- COM = np.zeros((6,size))
- fraction[0] = 1.
-
- for i, lvl in enumerate(sorted(self.levels, reverse=True)):
- if len(self.levels[lvl]) == 0: # lineage for this halo ends
- cycle = cycle[:i] # so truncate arrays, and break
- redshift = redshift[:i] # Not big enough.
- halo_id = halo_id[:i]
- fraction = fraction[:i]
- mass = mass[:i]
- densest_point = densest_point[:,:i]
- COM = COM[:,:i]
- break
- if lvl not in self.redshifts: continue
- mylog.info("========== Cycle %5.5d (z=%f) ==========" % \
- (lvl, self.redshifts[lvl]))
- cycle[i] = lvl
- redshift[i] = self.redshifts[lvl]
-
- br = self.levels[lvl][0]
- mylog.info("Parent halo = %d" % br.halo_id)
- mylog.info("-> Most massive progenitor == Halo %d" % (br.progenitor))
- halo_id[i] = br.halo_id
-
- if len(br.children) == 0: # lineage for this halo ends
- cycle = cycle[:i+1] # (no children)
- redshift = redshift[:i+1] # so truncate arrays, and break
- halo_id = halo_id[:i+1]
- fraction = fraction[:i+1]
- mass = mass[:i+1]
- densest_point = densest_point[:,:i+1]
- COM = COM[:,:i+1]
- break
-
- if i < size-1:
- fraction[i+1] = br.children[0][1]
-
- # open up FOF file to parse for details
- filename = "%s/groups_%05d.txt" % (self.FOF_directory, lvl)
- mass[i], densest_point[:,i], COM[:,i] = \
- grab_FOF_halo_info_internal(filename, br.halo_id)
-
- # save the arrays in the hdf5 file
- g.create_dataset("cycle", data=cycle)
- g.create_dataset("redshift", data=redshift)
- g.create_dataset("halo_id", data=halo_id)
- g.create_dataset("fraction", data=fraction)
- g.create_dataset("mass", data=mass)
- g.create_dataset("densest_point", data=densest_point)
- g.create_dataset("COM", data=COM)
- f.close()
-
- def write_dot(self, filename=None):
- r"""Writes merger tree to a GraphViz or image file.
-
- Parameters
- ----------
- filename : str, optional
- Filename to write the GraphViz file. Default will be
- tree_halo%05i.gv, which is a text file in the GraphViz format.
- If filename is an image (e.g. "MergerTree.png") the output will
- be in the appropriate image format made by calling GraphViz
- automatically. See GraphViz (e.g. "dot -v")
- for a list of available output formats.
- """
- if filename is None:
- filename = "%s/tree_halo%5.5d.gv" % \
- (self.FOF_directory, self.halonum)
- # Create the pydot graph object.
- self.graph = pydot.Dot('galaxy', graph_type='digraph')
- self.halo_shape = "rect"
- self.z_shape = "plaintext"
- # Subgraphs to align levels
- self.subgs = {}
- for num in self.numbers:
- self.subgs[num] = pydot.Subgraph('', rank = 'same')
- self.graph.add_subgraph(self.subgs[num])
- sorted_lvl = sorted(self.levels, reverse=True)
- for ii,lvl in enumerate(sorted_lvl):
- # Since we get the cycle number from the key, it won't
- # exist for the last level, i.e. children of last level.
- # Get it from self.numbers.
- if ii < len(sorted_lvl)-1:
- next_lvl = sorted_lvl[ii+1]
- else:
- next_lvl = self.numbers[0]
- for br in self.levels[lvl]:
- for c in br.children:
- color = "red" if c[0] == br.progenitor else "black"
- self.graph.add_edge(pydot.Edge("C%d_H%d" %(lvl, br.halo_id),
- "C%d_H%d" % (next_lvl, c[0]), color=color))
- #line = " C%d_H%d -> C%d_H%d [color=%s];\n" % \
- # (lvl, br.halo_id, next_lvl, c[0], color)
-
- #fp.write(line)
- for ii,lvl in enumerate(sorted_lvl):
- npart_max = 0
- for br in self.levels[lvl]:
- if br.npart > npart_max: npart_max = br.npart
- for br in self.levels[lvl]:
- halo_str = "C%d_H%d" % (lvl, br.halo_id)
- style = "filled" if br.npart == npart_max else "solid"
- self.graph.add_node(pydot.Node(halo_str,
- label = "Halo %d\\n%d particles" % (br.halo_id, br.npart),
- style = style, shape = self.halo_shape))
- # Add this node to the correct level subgraph.
- self.subgs[lvl].add_node(pydot.Node(halo_str))
- for lvl in self.numbers:
- # Don't add the z if there are no halos already in the subgraph.
- if len(self.subgs[lvl].get_node_list()) == 0: continue
- self.subgs[lvl].add_node(pydot.Node("%1.5e" % self.redshifts[lvl],
- shape = self.z_shape, label = "z=%0.3f" % self.redshifts[lvl]))
- # Based on the suffix of the file name, write out the result to a file.
- suffix = filename.split(".")[-1]
- if suffix == "gv": suffix = "raw"
- mylog.info("Writing %s format %s to disk." % (suffix, filename))
- self.graph.write("%s" % filename, format=suffix)
-
-def find_halo_relationships(output1_id, output2_id, output_basename = None,
- radius = 0.10, external_FOF=True,
- FOF_directory='FOF'):
- r"""Calculate the parentage and child relationships between two EnzoFOF
- halo catalogs.
-
- This function performs a very simple merger tree calculation between two
- sets of halos. For every halo in the second halo catalog, it looks to the
- first halo catalog to find the parents by looking at particle IDs. The
- particle IDs from the child halos are identified in potential parents, and
- then both percent-of-parent and percent-to-child values are recorded.
-
- Note that this works with catalogs constructed by Enzo's FOF halo
- when used in external_FOF=True mode, whereas it will work with
- catalogs constructed by yt using external_FOF=False mode.
-
- Parameters
- ----------
- output1_id : int
- This is the integer output id of the (first) halo catalog to parse and
- load.
- output2_id : int
- This is the integer output id of the (second) halo catalog to parse and
- load.
- output_basename : string
- If provided, both .cpkl and .txt files containing the parentage
- relationships will be output.
- radius : float, default to 0.10
- In absolute units, the radius to examine when guessing possible
- parent/child relationships. If this value is too small, you will miss
- possible relationships.
- FOF_directory : str, optional
- Directory where FOF files are located
-
- Returns
- -------
- pfrac : dict
- This is a dict of dicts. The first key is the parent halo id, the
- second is the child halo id. The values are the percent contributed
- from parent to child and the percent of a child that came from the
- parent.
- """
- mylog.info("Parsing Halo Catalog %04i", output1_id)
- HC1 = HaloCatalog(output1_id, False, external_FOF=external_FOF, \
- FOF_directory=FOF_directory)
- mylog.info("Parsing Halo Catalog %04i", output2_id)
- HC2 = HaloCatalog(output2_id, True, external_FOF=external_FOF, \
- FOF_directory=FOF_directory)
- mylog.info("Calculating fractions")
- pfrac = HC1.calculate_parentage_fractions(HC2)
-
- if output_basename is not None and pfrac != {}:
- f = open("%s.txt" % (output_basename), "w")
- for hid1 in sorted(pfrac):
- for hid2 in sorted(pfrac[hid1]):
- if not str(hid2).isdigit(): continue
- p1, p2, npart = pfrac[hid1][hid2]
- if p1 == 0.0: continue
- f.write( "Halo %s (%s) contributed %0.3e of its particles to %s (%s), which makes up %0.3e of that halo\n" % (
- hid1, output1_id, p2, hid2, output2_id, p1))
- f.close()
-
- cPickle.dump(pfrac, open("%s.cpkl" % (output_basename), "wb"))
-
- return HC1.redshift, HC2.redshift, pfrac
-
-def grab_FOF_halo_info_internal(filename, halo_id):
- """
- Finds a specific halo's information in the FOF group output information
- and pass relevant parameters to caller.
- """
- # open up FOF file to parse for details
- groups_file = open(filename, 'r')
- for line in groups_file:
- if line.startswith("#"): continue
- if int(line.split()[0]) == halo_id:
- ar = np.array(line.split()).astype('float64')
- return ar[1], ar[4:7], ar[7:13] # mass, xyz_dens, xyzvxvyvz_COM
-
-def plot_halo_evolution(filename, halo_id, x_quantity='cycle', y_quantity='mass',
- x_log=False, y_log=True, FOF_directory='FOF'):
- """
- Once you have generated a file using the
- EnzoFOFMergerTree.save_halo_evolution function, this is a simple way of
- plotting the evolution in the quantities of that halo over its lifetime.
-
- Parameters
- ----------
- filename : str
- The filename to which you saved the hdf5 data from save_halo_evolution
- halo_id : int
- The halo in 'filename' that you want to follow
- x_quantity : str, optional
- The quantity that you want to plot as the x_coord.
- Valid options are:
-
- * cycle
- * mass
- * fraction
- * halo_id
- * redshift
- * dense_x
- * dense_y
- * dense_z
- * COM_x
- * COM_y
- * COM_z
- * COM_vx
- * COM_vy
- * COM_vz
-
- y_quantity : str, optional
- The quantity that you want to plot as the y_coord.
- x_log : bool, optional
- Do you want the x-axis to be in log or linear?
- y_log : bool, optional
- Do you want the y-axis to be in log or linear?
- FOF_directory : str, optional
- Directory where FOF files (and hdf file) are located
-
- Examples
- --------
-
- >>> # generates mass history plots for the 20 most massive halos at t_fin.
- >>> ts = DatasetSeries.from_filenames("DD????/DD????")
- >>> # long step--must run FOF on each DD, but saves outputs for later use
- >>> for ds in ts:
- ... halo_list = FOFHaloFinder(ds)
- ... i = int(ds.basename[2:])
- ... halo_list.write_out("FOF/groups_%05i.txt" % i)
- ... halo_list.write_particle_lists("FOF/particles_%05i" % i)
- ...
- >>> mt = EnzoFOFMergerTree(external_FOF=False)
- >>> for i in range(20):
- ... mt.build_tree(i)
- ... mt.save_halo_evolution('halos.h5')
- ...
- >>> for i in range(20):
- ... plot_halo_evolution('halos.h5', i)
- """
- import matplotlib.pyplot as plt
- f = h5py.File("%s/%s" % (FOF_directory, filename), 'r')
- basename = os.path.splitext(filename)[0]
- halo = "halo%05d" % halo_id
- basename = basename + "_" + halo
- g = f[halo]
- values = list(g)
- index_dict = {'x' : 0, 'y' : 1, 'z' : 2, 'vx' : 3, 'vy' : 4, 'vz' : 5}
- coords = {}
- fields = {}
- for i, quantity in enumerate((x_quantity, y_quantity)):
- field = quantity
- if quantity.startswith('COM'):
- index = index_dict[quantity.split('_')[-1]]
- quantity = ('COM')
- if quantity.startswith('dense'):
- index = index_dict[quantity.split('_')[-1]]
- quantity = ('densest_point')
- if quantity not in values:
- exit('%s not in list of values in %s for halo %d' % \
- (quantity, filename, halo_id))
- if not field == quantity:
- coords[i] = g[quantity][index,:]
- else:
- coords[i] = g[quantity]
- if len(coords[i]) == 1:
- # ("Only 1 value for Halo %d. Ignoring." % halo_id)
- return
- fields[i] = field
-
- ax = plt.axes()
- ax.plot(coords[0], coords[1])
- ax.set_title(basename)
- ax.set_xlabel(fields[0])
- ax.set_ylabel(fields[1])
- if x_log:
- ax.set_xscale("log")
- if y_log:
- ax.set_yscale("log")
- ofn = "%s/%s_%s_%s.png" % (FOF_directory, basename, fields[0], fields[1])
- plt.savefig(ofn)
- plt.clf()
diff --git a/yt/analysis_modules/halo_analysis/halo_callbacks.py b/yt/analysis_modules/halo_analysis/halo_callbacks.py
deleted file mode 100644
index 3c828724385..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ /dev/null
@@ -1,588 +0,0 @@
-"""
-Halo callback object
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
- periodic_distance
-from yt.data_objects.profiles import \
- create_profile
-from yt.frontends.ytdata.utilities import \
- _hdf5_yt_array, \
- _yt_array_hdf5
-from yt.units.yt_array import \
- YTArray
-from yt.utilities.exceptions import \
- YTSphereTooSmall
-from yt.funcs import \
- ensure_list
-from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.operator_registry import \
- OperatorRegistry
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- parallel_root_only
-from yt.visualization.profile_plotter import \
- PhasePlot
-
-callback_registry = OperatorRegistry()
-
-def add_callback(name, function):
- callback_registry[name] = HaloCallback(function)
-
-class HaloCallback(object):
- r"""
- A HaloCallback is a function that minimally takes in a Halo object
- and performs some analysis on it. This function may attach attributes
- to the Halo object, write out data, etc, but does not return anything.
- """
- def __init__(self, function, args=None, kwargs=None):
- self.function = function
- self.args = args
- if self.args is None: self.args = []
- self.kwargs = kwargs
- if self.kwargs is None: self.kwargs = {}
-
- def __call__(self, halo):
- self.function(halo, *self.args, **self.kwargs)
- return True
-
-def halo_sphere(halo, radius_field="virial_radius", factor=1.0,
- field_parameters=None):
- r"""
- Create a sphere data container to associate with a halo.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- radius_field : string
- Field to be retrieved from the quantities dictionary as
- the basis of the halo radius.
- Default: "virial_radius".
- factor : float
- Factor to be multiplied by the base radius for defining
- the radius of the sphere.
- Default: 1.0.
- field_parameters : dict
- Dictionary of field parameters to be set with the sphere
- created.
-
- """
-
- dds = halo.halo_catalog.data_ds
- center = dds.arr([halo.quantities["particle_position_%s" % axis] \
- for axis in "xyz"])
- radius = factor * halo.quantities[radius_field]
- if radius <= 0.0:
- halo.data_object = None
- return
- try:
- sphere = dds.sphere(center, radius)
- except YTSphereTooSmall:
- halo.data_object = None
- return
- if field_parameters is not None:
- for field, par in field_parameters.items():
- if isinstance(par, tuple) and par[0] == "quantity":
- value = halo.quantities[par[1]]
- else:
- value = par
- sphere.set_field_parameter(field, value)
- halo.data_object = sphere
-
-add_callback("sphere", halo_sphere)
-
-def sphere_field_max_recenter(halo, field):
- r"""
- Recenter the halo sphere on the location of the maximum of the given field.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- field : string
- Field to be used for recentering.
-
- """
-
- if halo.data_object is None: return
- s_ds = halo.data_object.ds
- old_sphere = halo.data_object
- max_vals = old_sphere.quantities.max_location(field)
- new_center = s_ds.arr(max_vals[1:])
- new_sphere = s_ds.sphere(new_center.in_units("code_length"),
- old_sphere.radius.in_units("code_length"))
- mylog.info("Moving sphere center from %s to %s." % (old_sphere.center,
- new_sphere.center))
- for par, value in old_sphere.field_parameters.items():
- if par not in new_sphere.field_parameters:
- new_sphere.set_field_parameter(par, value)
- halo.data_object = new_sphere
-
-add_callback("sphere_field_max_recenter", sphere_field_max_recenter)
-
-def sphere_bulk_velocity(halo):
- r"""
- Set the bulk velocity for the sphere.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
-
- """
-
- halo.data_object.set_field_parameter("bulk_velocity",
- halo.data_object.quantities.bulk_velocity(use_particles=True))
-
-add_callback("sphere_bulk_velocity", sphere_bulk_velocity)
-
-def profile(halo, bin_fields, profile_fields, n_bins=32, extrema=None, logs=None, units=None,
- weight_field="cell_mass", accumulation=False, fractional=False,
- storage="profiles", output_dir="."):
- r"""
- Create 1, 2, or 3D profiles of a halo.
-
- Store profile data in a dictionary associated with the halo object.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- bin_fields : list of strings
- The binning fields for the profile.
- profile_fields : string or list of strings
- The fields to be profiled.
- n_bins : int or list of ints
- The number of bins in each dimension. If None, 32 bins for
- each bin are used for each bin field.
- Default: 32.
- extrema : dict of min, max tuples
- Minimum and maximum values of the bin_fields for the profiles.
- The keys correspond to the field names. Defaults to the extrema
- of the bin_fields of the dataset. If a units dict is provided, extrema
- are understood to be in the units specified in the dictionary.
- logs : dict of boolean values
- Whether or not to log the bin_fields for the profiles.
- The keys correspond to the field names. Defaults to the take_log
- attribute of the field.
- units : dict of strings
- The units of the fields in the profiles, including the bin_fields.
- weight_field : string
- Weight field for profiling.
- Default : "cell_mass"
- accumulation : bool or list of bools
- If True, the profile values for a bin n are the cumulative sum of
- all the values from bin 0 to n. If -True, the sum is reversed so
- that the value for bin n is the cumulative sum from bin N (total bins)
- to n. If the profile is 2D or 3D, a list of values can be given to
- control the summation in each dimension independently.
- Default: False.
- fractional : If True the profile values are divided by the sum of all
- the profile data such that the profile represents a probability
- distribution function.
- storage : string
- Name of the dictionary to store profiles.
- Default: "profiles"
- output_dir : string
- Name of directory where profile data will be written. The full path will be
- the output_dir of the halo catalog concatenated with this directory.
- Default : "."
-
- """
-
- mylog.info("Calculating 1D profile for halo %d." %
- halo.quantities["particle_identifier"])
-
- dds = halo.halo_catalog.data_ds
-
- if dds is None:
- raise RuntimeError("Profile callback requires a data ds.")
-
- if not hasattr(halo, "data_object"):
- raise RuntimeError("Profile callback requires a data container.")
-
- if halo.data_object is None:
- mylog.info("Skipping halo %d since data_object is None." %
- halo.quantities["particle_identifier"])
- return
-
- if output_dir is None:
- output_dir = storage
- output_dir = os.path.join(halo.halo_catalog.output_dir, output_dir)
-
- bin_fields = ensure_list(bin_fields)
- my_profile = create_profile(halo.data_object, bin_fields, profile_fields, n_bins=n_bins,
- extrema=extrema, logs=logs, units=units, weight_field=weight_field,
- accumulation=accumulation, fractional=fractional)
-
- prof_store = dict([(field, my_profile[field]) \
- for field in my_profile.field_data])
- prof_store[my_profile.x_field] = my_profile.x
- if len(bin_fields) > 1:
- prof_store[my_profile.y_field] = my_profile.y
- if len(bin_fields) > 2:
- prof_store[my_profile.z_field] = my_profile.z
- if hasattr(halo, storage):
- halo_store = getattr(halo, storage)
- if "used" in halo_store:
- halo_store["used"] &= my_profile.used
- else:
- halo_store = {"used": my_profile.used}
- setattr(halo, storage, halo_store)
- halo_store.update(prof_store)
-
- if my_profile.standard_deviation is not None:
- variance_store = dict([(field, my_profile.standard_deviation[field]) \
- for field in my_profile.standard_deviation])
- variance_storage = "%s_variance" % storage
- if hasattr(halo, variance_storage):
- halo_variance_store = getattr(halo, variance_storage)
- else:
- halo_variance_store = {}
- setattr(halo, variance_storage, halo_variance_store)
- halo_variance_store.update(variance_store)
-
-add_callback("profile", profile)
-
-@parallel_root_only
-def save_profiles(halo, storage="profiles", filename=None,
- output_dir="."):
- r"""
- Save profile data to disk.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- storage : string
- Name of the dictionary attribute containing the profile data to be written.
- Default: "profiles"
- filename : string
- The name of the file to be written. The final filename will be
- "_.h5". If None, filename is set to the value given
- by the storage keyword.
- Default: None
- output_dir : string
- Name of directory where profile data will be written. The full path will be
- the output_dir of the halo catalog concatenated with this directory.
- Default : "."
-
- """
-
- if not hasattr(halo, storage):
- return
-
- if filename is None:
- filename = storage
- output_file = os.path.join(halo.halo_catalog.output_dir, output_dir,
- "%s_%06d.h5" % (filename,
- halo.quantities["particle_identifier"]))
- mylog.info("Saving halo %d profile data to %s." %
- (halo.quantities["particle_identifier"], output_file))
-
- fh = h5py.File(output_file, mode="w")
- my_profile = getattr(halo, storage)
- profile_group = fh.create_group("profiles")
- for field in my_profile:
- # Don't write code units because we might not know those later.
- if isinstance(my_profile[field], YTArray):
- my_profile[field].convert_to_cgs()
- _yt_array_hdf5(profile_group, str(field), my_profile[field])
- variance_storage = "%s_variance" % storage
- if hasattr(halo, variance_storage):
- my_profile = getattr(halo, variance_storage)
- variance_group = fh.create_group("variance")
- for field in my_profile:
- # Don't write code units because we might not know those later.
- if isinstance(my_profile[field], YTArray):
- my_profile[field].convert_to_cgs()
- _yt_array_hdf5(variance_group, str(field), my_profile[field])
- fh.close()
-
-add_callback("save_profiles", save_profiles)
-
-def load_profiles(halo, storage="profiles", fields=None,
- filename=None, output_dir="."):
- r"""
- Load profile data from disk.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- storage : string
- Name of the dictionary attribute to store profile data.
- Default: "profiles"
- fields : string or list of strings
- The fields to be loaded. If None, all fields present will be loaded.
- Default : None
- filename : string
- The name of the file to be loaded. The final filename will be
- "_.h5". If None, filename is set to the value given
- by the storage keyword.
- Default: None
- output_dir : string
- Name of directory where profile data will be read. The full path will be
- the output_dir of the halo catalog concatenated with this directory.
- Default : "."
-
- """
-
- if filename is None:
- filename = storage
- output_file = os.path.join(halo.halo_catalog.output_dir, output_dir,
- "%s_%06d.h5" % (filename,
- halo.quantities["particle_identifier"]))
- if not os.path.exists(output_file):
- raise RuntimeError("Profile file not found: %s." % output_file)
- mylog.info("Loading halo %d profile data from %s." %
- (halo.quantities["particle_identifier"], output_file))
-
- fh = h5py.File(output_file, mode="r")
- if fields is None:
- profile_fields = fh["profiles"].keys()
- else:
- profile_fields = fields
- my_profile = {}
- my_group = fh["profiles"]
- for field in profile_fields:
- if field not in my_group:
- raise RuntimeError("%s field not present in %s." % (field, output_file))
- my_profile[field] = _hdf5_yt_array(my_group, field,
- ds=halo.halo_catalog.halos_ds)
- setattr(halo, storage, my_profile)
-
- if "variance" in fh:
- my_variance = {}
- my_group = fh["variance"]
- if fields is None:
- profile_fields = my_group.keys()
- for field in profile_fields:
- if field not in my_group:
- raise RuntimeError("%s field not present in %s." % (field, output_file))
- my_variance[field] = _hdf5_yt_array(my_group, field,
- ds=halo.halo_catalog.halos_ds)
- setattr(halo, "%s_variance" % storage, my_variance)
-
- fh.close()
-
-add_callback("load_profiles", load_profiles)
-
-def virial_quantities(halo, fields,
- overdensity_field=("gas", "overdensity"),
- critical_overdensity=200,
- profile_storage="profiles"):
- r"""
- Calculate the value of the given fields at the virial radius defined at
- the given critical density by interpolating from radial profiles.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- fields : string or list of strings
- The fields whose virial values are to be calculated.
- overdensity_field : string or tuple of strings
- The field used as the overdensity from which interpolation is done to
- calculate virial quantities.
- Default: ("gas", "overdensity")
- critical_overdensity : float
- The value of the overdensity at which to evaluate the virial quantities.
- Overdensity is with respect to the critical density.
- Default: 200
- profile_storage : string
- Name of the halo attribute that holds the profiles to be used.
- Default: "profiles"
-
- """
-
- mylog.info("Calculating virial quantities for halo %d." %
- halo.quantities["particle_identifier"])
-
- fields = ensure_list(fields)
- fields = [halo.data_object._determine_fields(field)[0]
- for field in fields]
-
- dds = halo.halo_catalog.data_ds
- profile_data = getattr(halo, profile_storage)
-
- if overdensity_field not in profile_data:
- raise RuntimeError("virial_quantities callback requires profile of %s." %
- str(overdensity_field))
-
- overdensity = profile_data[overdensity_field]
- dfilter = np.isfinite(overdensity) & profile_data["used"] & (overdensity > 0)
-
- v_fields = {}
- for field in fields:
- if isinstance(field, tuple):
- my_field = field[-1]
- else:
- my_field = field
- v_fields[field] = my_field
- v_field = "%s_%d" % (my_field, critical_overdensity)
- if v_field not in halo.halo_catalog.quantities:
- halo.halo_catalog.quantities.append(v_field)
- vquantities = dict([("%s_%d" % (v_fields[field], critical_overdensity),
- dds.quan(0, profile_data[field].units)) \
- for field in fields])
-
- if dfilter.sum() < 2:
- halo.quantities.update(vquantities)
- return
-
- # find interpolation index
- # require a negative slope, but not monotonicity
- vod = overdensity[dfilter].to_ndarray()
- if (vod > critical_overdensity).all():
- if vod[-1] < vod[-2]:
- index = -2
- else:
- halo.quantities.update(vquantities)
- return
- elif (vod < critical_overdensity).all():
- if vod[0] > vod[1]:
- index = 0
- else:
- halo.quantities.update(vquantities)
- return
- else:
- # take first instance of downward intersection with critical value
- intersections = (vod[:-1] >= critical_overdensity) & \
- (vod[1:] < critical_overdensity)
- if not intersections.any():
- halo.quantities.update(vquantities)
- return
- index = np.where(intersections)[0][0]
-
- for field in fields:
- v_prof = profile_data[field][dfilter].to_ndarray()
- slope = np.log(v_prof[index + 1] / v_prof[index]) / \
- np.log(vod[index + 1] / vod[index])
- value = dds.quan(np.exp(slope * np.log(critical_overdensity /
- vod[index])) * v_prof[index],
- profile_data[field].units).in_cgs()
- vquantities["%s_%d" % (v_fields[field], critical_overdensity)] = value
-
- halo.quantities.update(vquantities)
-
-add_callback("virial_quantities", virial_quantities)
-
-def phase_plot(halo, output_dir=".", phase_args=None, phase_kwargs=None):
- r"""
- Make a phase plot for the halo object.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- output_dir : string
- Name of directory where profile data will be written. The full path will be
- the output_dir of the halo catalog concatenated with this directory.
- Default : "."
- phase_args : list
- List of arguments to be given to PhasePlot.
- phase_kwargs : dict
- Dictionary of keyword arguments to be given to PhasePlot.
-
- """
-
- if phase_args is None:
- phase_args = []
- if phase_kwargs is None:
- phase_kwargs = {}
-
- try:
- plot = PhasePlot(halo.data_object, *phase_args, **phase_kwargs)
- plot.save(os.path.join(halo.halo_catalog.output_dir, output_dir,
- "halo_%06d" % halo.quantities["particle_identifier"]))
- except ValueError:
- return
-
-add_callback("phase_plot", phase_plot)
-
-def delete_attribute(halo, attribute):
- r"""
- Delete attribute from halo object.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- attribute : string
- The attribute to be deleted.
-
- """
-
- if hasattr(halo, attribute):
- delattr(halo, attribute)
-
-add_callback("delete_attribute", delete_attribute)
-
-def iterative_center_of_mass(halo, radius_field="virial_radius", inner_ratio=0.1, step_ratio=0.9,
- units="pc"):
- r"""
- Adjust halo position by iteratively recalculating the center of mass while
- decreasing the radius.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- radius_field : string
- The halo quantity to be used as the radius for the sphere.
- Default: "virial_radius"
- inner_ratio : float
- The ratio of the smallest sphere radius used for calculating the center of
- mass to the initial radius. The sphere radius is reduced and center of mass
- recalculated until the sphere has reached this size.
- Default: 0.1
- step_ratio : float
- The multiplicative factor used to reduce the radius of the sphere after the
- center of mass is calculated.
- Default: 0.9
- units : str
- The units for printing out the distance between the initial and final centers.
- Default : "pc"
-
- """
- if inner_ratio <= 0.0 or inner_ratio >= 1.0:
- raise RuntimeError("iterative_center_of_mass: inner_ratio must be between 0 and 1.")
- if step_ratio <= 0.0 or step_ratio >= 1.0:
- raise RuntimeError("iterative_center_of_mass: step_ratio must be between 0 and 1.")
-
- center_orig = halo.halo_catalog.data_ds.arr([halo.quantities["particle_position_%s" % axis]
- for axis in "xyz"])
- sphere = halo.halo_catalog.data_ds.sphere(center_orig, halo.quantities[radius_field])
-
- while sphere.radius > inner_ratio * halo.quantities[radius_field]:
- new_center = sphere.quantities.center_of_mass(use_gas=True, use_particles=True)
- sphere = sphere.ds.sphere(new_center, step_ratio * sphere.radius)
-
- distance = periodic_distance(center_orig.in_units("code_length").to_ndarray(),
- new_center.in_units("code_length").to_ndarray())
- distance = halo.halo_catalog.data_ds.quan(distance, "code_length")
- mylog.info("Recentering halo %d %f %s away." %
- (halo.quantities["particle_identifier"],
- distance.in_units(units), units))
-
- for i, axis in enumerate("xyz"):
- halo.quantities["particle_position_%s" % axis] = sphere.center[i]
- del sphere
-
-add_callback("iterative_center_of_mass", iterative_center_of_mass)
diff --git a/yt/analysis_modules/halo_analysis/halo_catalog.py b/yt/analysis_modules/halo_analysis/halo_catalog.py
deleted file mode 100644
index 0eb40d5d43c..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ /dev/null
@@ -1,508 +0,0 @@
-"""
-HaloCatalog object
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import os
-
-from yt.frontends.ytdata.utilities import \
- save_as_dataset
-from yt.funcs import \
- ensure_dir, \
- get_pbar, \
- mylog
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface, \
- parallel_blocking_call, \
- parallel_objects
-
-from .halo_object import \
- Halo
-from .halo_callbacks import \
- callback_registry
-from .halo_filters import \
- filter_registry
-from .halo_finding_methods import \
- finding_method_registry
-from .halo_quantities import \
- quantity_registry
-from .halo_recipes import \
- recipe_registry
-
-class HaloCatalog(ParallelAnalysisInterface):
- r"""Create a HaloCatalog: an object that allows for the creation and association
- of data with a set of halo objects.
-
- A HaloCatalog object pairs a simulation dataset and the output from a halo finder,
- allowing the user to perform analysis on each of the halos found by the halo finder.
- Analysis is performed by providing callbacks: functions that accept a Halo object
- and perform independent analysis, return a quantity to be associated with the halo,
- or return True or False whether a halo meets various criteria. The resulting set of
- quantities associated with each halo is then written out to disk at a "halo catalog."
- This halo catalog can then be loaded in with yt as any other simulation dataset.
-
- Parameters
- ----------
- halos_ds : str
- Dataset created by a halo finder. If None, a halo finder should be
- provided with the finder_method keyword.
- data_ds : str
- Dataset created by a simulation.
- data_source : data container
- Data container associated with either the halos_ds or the data_ds.
- finder_method : str
- Halo finder to be used if no halos_ds is given.
- output_dir : str
- The top level directory into which analysis output will be written.
- Default: "."
- finder_kwargs : dict
- Arguments to pass to the halo finder if finder_method is given.
-
- Examples
- --------
-
- >>> # create profiles or overdensity vs. radius for each halo and save to disk
- >>> import yt
- >>> from yt.analysis_modules.halo_analysis.api import *
- >>> data_ds = yt.load("DD0064/DD0064")
- >>> halos_ds = yt.load("rockstar_halos/halos_64.0.bin",
- ... output_dir="halo_catalogs/catalog_0064")
- >>> hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
- >>> # filter out halos with mass < 1e13 Msun
- >>> hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
- >>> # create a sphere object with radius of 2 times the virial_radius field
- >>> hc.add_callback("sphere", factor=2.0, radius_field="virial_radius")
- >>> # make radial profiles
- >>> hc.add_callback("profile", "radius", [("gas", "overdensity")],
- ... weight_field="cell_volume", accumulation=True)
- >>> # save the profiles to disk
- >>> hc.add_callback("save_profiles", output_dir="profiles")
- >>> # create the catalog
- >>> hc.create()
-
- >>> # load in the saved halo catalog and all the profile data
- >>> halos_ds = yt.load("halo_catalogs/catalog_0064/catalog_0064.0.h5")
- >>> hc = HaloCatalog(halos_ds=halos_ds,
- output_dir="halo_catalogs/catalog_0064")
- >>> hc.add_callback("load_profiles", output_dir="profiles")
- >>> hc.load()
-
- See Also
- --------
- add_callback, add_filter, add_quantity, add_recipe
-
- """
-
- def __init__(self, halos_ds=None, data_ds=None,
- data_source=None, finder_method=None,
- finder_kwargs=None,
- output_dir="halo_catalogs/catalog"):
- ParallelAnalysisInterface.__init__(self)
- self.halos_ds = halos_ds
- self.data_ds = data_ds
- self.output_dir = ensure_dir(output_dir)
- if os.path.basename(self.output_dir) != ".":
- self.output_prefix = os.path.basename(self.output_dir)
- else:
- self.output_prefix = "catalog"
-
- if halos_ds is None:
- if data_ds is None:
- raise RuntimeError("Must specify a halos_ds, data_ds, or both.")
- if finder_method is None:
- raise RuntimeError("Must specify a halos_ds or a finder_method.")
-
- if data_source is None:
- if halos_ds is not None:
- halos_ds.index
- data_source = halos_ds.all_data()
- else:
- data_source = data_ds.all_data()
- self.data_source = data_source
-
- self.finder_method_name = finder_method
- if finder_kwargs is None:
- finder_kwargs = {}
- if finder_method is not None:
- finder_method = finding_method_registry.find(finder_method,
- **finder_kwargs)
- self.finder_method = finder_method
-
- # all of the analysis actions to be performed: callbacks, filters, and quantities
- self.actions = []
- # fields to be written to the halo catalog
- self.quantities = []
- if self.halos_ds is not None:
- self.add_default_quantities()
-
- def add_callback(self, callback, *args, **kwargs):
- r"""
- Add a callback to the halo catalog action list.
-
- A callback is a function that accepts and operates on a Halo object and
- does not return anything. Callbacks must exist within the callback_registry.
- Give additional args and kwargs to be passed to the callback here.
-
- Parameters
- ----------
- callback : string
- The name of the callback.
-
- Examples
- --------
-
- >>> # Here, a callback is defined and added to the registry.
- >>> def _say_something(halo, message):
- ... my_id = halo.quantities['particle_identifier']
- ... print "Halo %d: here is a message - %s." % (my_id, message)
- >>> add_callback("hello_world", _say_something)
-
- >>> # Now this callback is accessible to the HaloCatalog object
- >>> hc.add_callback("hello_world", "this is my message")
-
- """
- callback = callback_registry.find(callback, *args, **kwargs)
- if "output_dir" in kwargs is not None:
- ensure_dir(os.path.join(self.output_dir, kwargs["output_dir"]))
- self.actions.append(("callback", callback))
-
- def add_quantity(self, key, *args, **kwargs):
- r"""
- Add a quantity to the halo catalog action list.
-
- A quantity is a function that accepts a Halo object and return a value or
- values. These values are stored in a "quantities" dictionary associated
- with the Halo object. Quantities must exist within the quantity_registry.
- Give additional args and kwargs to be passed to the quantity function here.
-
- Parameters
- ----------
- key : string
- The name of the callback.
- field_type : string
- If not None, the quantity is the value of the field provided by the
- key parameter, taken from the halo finder dataset. This is the way
- one pulls values for the halo from the halo dataset.
- Default : None
-
- Examples
- --------
-
- >>> # pull the virial radius from the halo finder dataset
- >>> hc.add_quantity("virial_radius", field_type="halos")
-
- >>> # define a custom quantity and add it to the register
- >>> def _mass_squared(halo):
- ... # assume some entry "particle_mass" exists in the quantities dict
- ... return halo.quantities["particle_mass"]**2
- >>> add_quantity("mass_squared", _mass_squared)
-
- >>> # add it to the halo catalog action list
- >>> hc.add_quantity("mass_squared")
-
- """
- if "field_type" in kwargs:
- field_type = kwargs.pop("field_type")
- else:
- field_type = None
- prepend = kwargs.pop("prepend",False)
- if field_type is None:
- quantity = quantity_registry.find(key, *args, **kwargs)
- elif (field_type, key) in self.halos_ds.field_info:
- quantity = (field_type, key)
- else:
- raise RuntimeError("HaloCatalog quantity must be a registered function or a field of a known type.")
- self.quantities.append(key)
- if prepend:
- self.actions.insert(0, ("quantity", (key, quantity)))
- else:
- self.actions.append(("quantity", (key, quantity)))
-
- def add_filter(self, halo_filter, *args, **kwargs):
- r"""
- Add a filter to the halo catalog action list.
-
- A filter is a function that accepts a Halo object and returns either True
- or False. If True, any additional actions added to the list are carried out
- and the results are added to the final halo catalog. If False, any further
- actions are skipped and the halo will be omitted from the final catalog.
- Filters must exist within the filter_registry. Give additional args and kwargs
- to be passed to the filter function here.
-
- Parameters
- ----------
- halo_filter : string
- The name of the filter.
-
- Examples
- --------
-
- >>> # define a filter and add it to the register.
- >>> def _my_filter(halo, mass_value):
- ... return halo.quantities["particle_mass"] > YTQuantity(mass_value, "Msun")
- >>> # add it to the register
- >>> add_filter("mass_filter", _my_filter)
-
- >>> # add the filter to the halo catalog actions
- >>> hc.add_filter("mass_value", 1e12)
-
- """
-
- halo_filter = filter_registry.find(halo_filter, *args, **kwargs)
- self.actions.append(("filter", halo_filter))
-
- def add_recipe(self, recipe, *args, **kwargs):
- r"""
- Add a recipe to the halo catalog action list.
-
- A recipe is an operation consisting of a series of callbacks, quantities,
- and/or filters called in succession. Recipes can be used to store a more
- complex series of analysis tasks as a single entity.
-
- Currently, the available recipe is ``calculate_virial_quantities``.
-
- Parameters
- ----------
-
- halo_recipe : string
- The name of the recipe.
-
- Examples
- --------
-
- >>> import yt
- >>> from yt.analysis_modules.halo_analysis.api import HaloCatalog
- >>>
- >>> data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
- >>> halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
- >>> hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
- >>>
- >>> # Filter out less massive halos
- >>> hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun")
- >>>
- >>> # Calculate virial radii
- >>> hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
- >>>
- >>> hc.create()
-
- """
-
- halo_recipe = recipe_registry.find(recipe, *args, **kwargs)
- halo_recipe(self)
-
- def create(self, save_halos=False, save_catalog=True, njobs=-1, dynamic=False):
- r"""
- Create the halo catalog given the callbacks, quantities, and filters that
- have been provided.
-
- This is a wrapper around the main _run function with default arguments tuned
- for halo catalog creation. By default, halo objects are not saved but the
- halo catalog is written, opposite to the behavior of the load function.
-
- Parameters
- ----------
- save_halos : bool
- If True, a list of all Halo objects is retained under the "halo_list"
- attribute. If False, only the compiles quantities are saved under the
- "catalog" attribute.
- Default: False
- save_catalog : bool
- If True, save the final catalog to disk.
- Default: True
- njobs : int
- The number of jobs over which to divide halo analysis. Choose -1
- to allocate one processor per halo.
- Default: -1
- dynamic : int
- If False, halo analysis is divided evenly between all available processors.
- If True, parallelism is performed via a task queue.
- Default: False
-
- See Also
- --------
- load
-
- """
- self._run(save_halos, save_catalog, njobs=njobs, dynamic=dynamic)
-
- def load(self, save_halos=True, save_catalog=False, njobs=-1, dynamic=False):
- r"""
- Load a previously created halo catalog.
-
- This is a wrapper around the main _run function with default arguments tuned
- for reloading halo catalogs and associated data. By default, halo objects are
- saved and the halo catalog is not written, opposite to the behavior of the
- create function.
-
- Parameters
- ----------
- save_halos : bool
- If True, a list of all Halo objects is retained under the "halo_list"
- attribute. If False, only the compiles quantities are saved under the
- "catalog" attribute.
- Default: True
- save_catalog : bool
- If True, save the final catalog to disk.
- Default: False
- njobs : int
- The number of jobs over which to divide halo analysis. Choose -1
- to allocate one processor per halo.
- Default: -1
- dynamic : int
- If False, halo analysis is divided evenly between all available processors.
- If True, parallelism is performed via a task queue.
- Default: False
-
- See Also
- --------
- create
-
- """
- self._run(save_halos, save_catalog, njobs=njobs, dynamic=dynamic)
-
- @parallel_blocking_call
- def _run(self, save_halos, save_catalog, njobs=-1, dynamic=False):
- r"""
- Run the requested halo analysis.
-
- Parameters
- ----------
- save_halos : bool
- If True, a list of all Halo objects is retained under the "halo_list"
- attribute. If False, only the compiles quantities are saved under the
- "catalog" attribute.
- save_catalog : bool
- If True, save the final catalog to disk.
- njobs : int
- The number of jobs over which to divide halo analysis. Choose -1
- to allocate one processor per halo.
- Default: -1
- dynamic : int
- If False, halo analysis is divided evenly between all available processors.
- If True, parallelism is performed via a task queue.
- Default: False
-
- See Also
- --------
- create, load
-
- """
- self.catalog = []
- if save_halos: self.halo_list = []
-
- if self.halos_ds is None:
- # Find the halos and make a dataset of them
- self.halos_ds = self.finder_method(self.data_ds)
- if self.halos_ds is None:
- mylog.warning('No halos were found for {0}'.format(\
- self.data_ds.basename))
- if save_catalog:
- self.halos_ds = self.data_ds
- self.save_catalog()
- self.halos_ds = None
- return
- self.halos_ds.index
-
- # Assign ds and data sources appropriately
- self.data_source = self.halos_ds.all_data()
-
- # Add all of the default quantities that all halos must have
- self.add_default_quantities('all')
-
- halo_index = np.argsort(self.data_source["all", "particle_identifier"])
- # If we have just run hop or fof, halos are already divided amongst processors.
- if self.finder_method_name in ["hop", "fof"]:
- my_index = halo_index
- nhalos = self.comm.mpi_allreduce(halo_index.size, op="sum")
- else:
- my_index = parallel_objects(halo_index, njobs=njobs, dynamic=dynamic)
- nhalos = halo_index.size
-
- my_i = 0
- my_n = self.comm.size
- pbar = get_pbar("Creating catalog", nhalos, parallel=True)
- for i in my_index:
- my_i += min(my_n, nhalos - my_i)
- new_halo = Halo(self)
- halo_filter = True
- for action_type, action in self.actions:
- if action_type == "callback":
- action(new_halo)
- elif action_type == "filter":
- halo_filter = action(new_halo)
- if not halo_filter:
- pbar.update(my_i)
- break
- elif action_type == "quantity":
- key, quantity = action
- if quantity in self.halos_ds.field_info:
- new_halo.quantities[key] = \
- self.data_source[quantity][int(i)]
- elif callable(quantity):
- new_halo.quantities[key] = quantity(new_halo)
- else:
- raise RuntimeError(
- "Action must be a callback, filter, or quantity.")
-
- if halo_filter:
- for quantity in new_halo.quantities.values():
- if hasattr(quantity, "units"):
- quantity.convert_to_base()
- self.catalog.append(new_halo.quantities)
-
- if save_halos and halo_filter:
- self.halo_list.append(new_halo)
- else:
- del new_halo
-
- pbar.update(my_i)
-
- self.catalog.sort(key=lambda a:a['particle_identifier'].to_ndarray())
- if save_catalog:
- self.save_catalog()
-
- def save_catalog(self):
- "Write out hdf5 file with all halo quantities."
-
- filename = os.path.join(self.output_dir, "%s.%d.h5" %
- (self.output_prefix, self.comm.rank))
- n_halos = len(self.catalog)
- mylog.info("Saving halo catalog (%d halos) to %s." %
- (n_halos, os.path.join(self.output_dir,
- self.output_prefix)))
- extra_attrs = {"data_type": "halo_catalog",
- "num_halos": n_halos}
- data = {}
- ftypes = {}
- if n_halos > 0:
- for key in self.quantities:
- # This sets each field to be saved in the root hdf5 group,
- # as per the HaloCatalog format.
- ftypes[key] = "."
- data[key] = self.halos_ds.arr(
- [halo[key] for halo in self.catalog])
-
- save_as_dataset(self.halos_ds, filename, data,
- field_types=ftypes, extra_attrs=extra_attrs)
-
- def add_default_quantities(self, field_type='halos'):
- for field in ["particle_identifier", "particle_mass",
- "particle_position_x", "particle_position_y",
- "particle_position_z", "virial_radius"]:
- field_name = (field_type, field)
- if field_name not in self.halos_ds.field_list:
- mylog.warn("Halo dataset %s has no field %s." %
- (self.halos_ds, str(field_name)))
- continue
- self.add_quantity(field, field_type=field_type, prepend=True)
diff --git a/yt/analysis_modules/halo_analysis/halo_filters.py b/yt/analysis_modules/halo_analysis/halo_filters.py
deleted file mode 100644
index 9e5fdc151cc..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""
-Halo filter object
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013-2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.utilities.operator_registry import \
- OperatorRegistry
-from yt.utilities.on_demand_imports import \
- _scipy as scipy
-
-from .halo_callbacks import HaloCallback
-
-filter_registry = OperatorRegistry()
-
-def add_filter(name, function):
- filter_registry[name] = HaloFilter(function)
-
-class HaloFilter(HaloCallback):
- r"""
- A HaloFilter is a function that minimally takes a Halo object, performs
- some analysis, and returns either True or False. The return value determines
- whether the Halo is added to the final halo catalog being generated by the
- HaloCatalog object.
- """
- def __init__(self, function, *args, **kwargs):
- HaloCallback.__init__(self, function, args, kwargs)
-
- def __call__(self, halo):
- return self.function(halo, *self.args, **self.kwargs)
-
-def quantity_value(halo, field, operator, value, units):
- r"""
- Filter based on a value in the halo quantities dictionary.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- field : string
- The field used for the evaluation.
- operator : string
- The comparison operator to be used ("<", "<=", "==", ">=", ">", etc.)
- value : numneric
- The value to be compared against.
- units : string
- Units of the value to be compared.
-
- """
-
- if field not in halo.quantities:
- raise RuntimeError("Halo object does not contain %s quantity." % field)
-
- h_value = halo.quantities[field].in_units(units).to_ndarray()
- return eval("%s %s %s" % (h_value, operator, value))
-
-add_filter("quantity_value", quantity_value)
-
-def not_subhalo(halo, field_type="halos"):
- """
- Only return true if this halo is not a subhalo.
-
- This is used for halo finders such as Rockstar that output parent
- and subhalos together.
- """
-
- if not hasattr(halo.halo_catalog, "parent_dict"):
- halo.halo_catalog.parent_dict = \
- _create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
- return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
-add_filter("not_subhalo", not_subhalo)
-
-def _create_parent_dict(data_source, ptype="halos"):
- """
- Create a dictionary of halo parents to allow for filtering of subhalos.
-
- For a pair of halos whose distance is smaller than the radius of at least
- one of the halos, the parent is defined as the halo with the larger radius.
- Parent halos (halos with no parents of their own) have parent index values of -1.
- """
- pos = np.rollaxis(
- np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
- data_source[ptype, "particle_position_y"].in_units("Mpc"),
- data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
- rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
- ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
- parents = -1 * np.ones_like(ids, dtype="int")
- boxsize = data_source.ds.domain_width.in_units('Mpc')
- my_tree = scipy.spatial.cKDTree(pos, boxsize=boxsize)
-
- for i in range(ids.size):
- neighbors = np.array(
- my_tree.query_ball_point(pos[i], rad[i], p=2))
- if neighbors.size > 1:
- parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
-
- parents[ids == parents] = -1
- parent_dict = dict(zip(ids, parents))
- return parent_dict
diff --git a/yt/analysis_modules/halo_analysis/halo_finding_methods.py b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
deleted file mode 100644
index 4d9c98ad1be..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.analysis_modules.halo_finding.halo_objects import \
- FOFHaloFinder, HOPHaloFinder
-from yt.frontends.stream.data_structures import \
- load_particles
-from yt.units.dimensions import length
-from yt.utilities.operator_registry import \
- OperatorRegistry
-
-finding_method_registry = OperatorRegistry()
-
-def add_finding_method(name, function):
- finding_method_registry[name] = HaloFindingMethod(function)
-
-class HaloFindingMethod(object):
- r"""
- A halo finding method is a callback that performs halo finding on a
- dataset and returns a new dataset that is the loaded halo finder output.
- """
- def __init__(self, function, args=None, kwargs=None):
- self.function = function
- self.args = args
- if self.args is None: self.args = []
- self.kwargs = kwargs
- if self.kwargs is None: self.kwargs = {}
-
- def __call__(self, ds):
- return self.function(ds, *self.args, **self.kwargs)
-
-def _hop_method(ds, **finder_kwargs):
- r"""
- Run the Hop halo finding method.
- """
-
- halo_list = HOPHaloFinder(ds, **finder_kwargs)
- halos_ds = _parse_old_halo_list(ds, halo_list)
- return halos_ds
-add_finding_method("hop", _hop_method)
-
-def _fof_method(ds, **finder_kwargs):
- r"""
- Run the FoF halo finding method.
- """
-
- halo_list = FOFHaloFinder(ds, **finder_kwargs)
- halos_ds = _parse_old_halo_list(ds, halo_list)
- return halos_ds
-add_finding_method("fof", _fof_method)
-
-def _rockstar_method(ds, **finder_kwargs):
- r"""
- Run the Rockstar halo finding method.
- """
-
- from yt.frontends.rockstar.data_structures import \
- RockstarDataset
- from yt.analysis_modules.halo_finding.rockstar.api import \
- RockstarHaloFinder
-
- rh = RockstarHaloFinder(ds, **finder_kwargs)
- rh.run()
-
- if 'outbase' in finder_kwargs:
- outbase = finder_kwargs['outbase']
- else:
- outbase = "rockstar_halos"
-
- halos_ds = RockstarDataset(outbase + "/halos_0.0.bin")
- try:
- halos_ds.create_field_info()
- except ValueError:
- return None
-
- return halos_ds
-add_finding_method("rockstar", _rockstar_method)
-
-def _parse_old_halo_list(data_ds, halo_list):
- r"""
- Convert the halo list into a loaded dataset.
- """
-
- num_halos = len(halo_list)
-
- if num_halos == 0: return None
-
- # Set up fields that we want to pull from identified halos and their units
- new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x',
- 'particle_position_y','particle_position_z',
- 'virial_radius']
- new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
- # Set up a dictionary based on those fields
- # with empty arrays where we will fill in their values
- halo_properties = { f : (np.zeros(num_halos),unit) \
- for f, unit in zip(new_fields,new_units)}
-
- # Iterate through the halos pulling out their positions and virial quantities
- # and filling in the properties dictionary
- for i,halo in enumerate(halo_list):
- halo_properties['particle_identifier'][0][i] = i
- halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
- halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
- com = halo.center_of_mass().in_cgs()
- halo_properties['particle_position_x'][0][i] = com[0]
- halo_properties['particle_position_y'][0][i] = com[1]
- halo_properties['particle_position_z'][0][i] = com[2]
-
- # Define a bounding box based on original data ds
- bbox = np.array([data_ds.domain_left_edge.in_cgs(),
- data_ds.domain_right_edge.in_cgs()]).T
-
- # Create a ds with the halos as particles
- particle_ds = load_particles(halo_properties,
- bbox=bbox, length_unit = 1, mass_unit=1)
-
- # Create the field info dictionary so we can reference those fields
- particle_ds.create_field_info()
-
- for attr in ["current_redshift", "current_time",
- "domain_dimensions",
- "cosmological_simulation", "omega_lambda",
- "omega_matter", "hubble_constant"]:
- attr_val = getattr(data_ds, attr)
- setattr(particle_ds, attr, attr_val)
- particle_ds.current_time = particle_ds.current_time.in_cgs()
-
- particle_ds.unit_registry.modify("h", particle_ds.hubble_constant)
- # Comoving lengths
- for my_unit in ["m", "pc", "AU", "au"]:
- new_unit = "%scm" % my_unit
- particle_ds.unit_registry.add(new_unit, particle_ds.unit_registry.lut[my_unit][0] /
- (1 + particle_ds.current_redshift),
- length, "\\rm{%s}/(1+z)" % my_unit)
-
- return particle_ds
diff --git a/yt/analysis_modules/halo_analysis/halo_object.py b/yt/analysis_modules/halo_analysis/halo_object.py
deleted file mode 100644
index ec97a995425..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_object.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo object.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-class Halo(object):
- particles = None
- def __init__(self, halo_catalog):
- self.halo_catalog = halo_catalog
- self.quantities = {}
diff --git a/yt/analysis_modules/halo_analysis/halo_quantities.py b/yt/analysis_modules/halo_analysis/halo_quantities.py
deleted file mode 100644
index c10f39ade1e..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_quantities.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-Halo quantity object
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013-2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.utilities.operator_registry import \
- OperatorRegistry
-
-from .halo_callbacks import HaloCallback
-
-quantity_registry = OperatorRegistry()
-
-def add_quantity(name, function):
- quantity_registry[name] = HaloQuantity(function)
-
-class HaloQuantity(HaloCallback):
- r"""
- A HaloQuantity is a function that takes minimally a Halo object,
- performs some analysis, and then returns a value that is assigned
- to an entry in the Halo.quantities dictionary.
- """
- def __init__(self, function, *args, **kwargs):
- HaloCallback.__init__(self, function, args, kwargs)
-
- def __call__(self, halo):
- return self.function(halo, *self.args, **self.kwargs)
-
-def center_of_mass(halo):
- if halo.particles is None:
- raise RuntimeError("Center of mass requires halo to have particle data.")
- return (halo.particles['particle_mass'] *
- np.array([halo.particles['particle_position_x'],
- halo.particles['particle_position_y'],
- halo.particles['particle_position_z']])).sum(axis=1) / \
- halo.particles['particle_mass'].sum()
-
-add_quantity('center_of_mass', center_of_mass)
-
-def bulk_velocity(halo):
- if halo.particles is None:
- raise RuntimeError("Bulk velocity requires halo to have particle data.")
- return (halo.particles['particle_mass'] *
- np.array([halo.particles['particle_velocity_x'],
- halo.particles['particle_velocity_y'],
- halo.particles['particle_velocity_z']])).sum(axis=1) / \
- halo.particles['particle_mass'].sum()
-
-add_quantity('bulk_velocity', bulk_velocity)
diff --git a/yt/analysis_modules/halo_analysis/halo_recipes.py b/yt/analysis_modules/halo_analysis/halo_recipes.py
deleted file mode 100644
index e01742c0273..00000000000
--- a/yt/analysis_modules/halo_analysis/halo_recipes.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Halo recipe object
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2016, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.operator_registry import \
- OperatorRegistry
-
-recipe_registry = OperatorRegistry()
-
-def add_recipe(name, function):
- recipe_registry[name] = HaloRecipe(function)
-
-class HaloRecipe(object):
- r"""
- A HaloRecipe is a function that minimally takes in a Halo object
- and performs some analysis on it. This function may attach attributes
- to the Halo object, write out data, etc, but does not return anything.
- """
- def __init__(self, function, args=None, kwargs=None):
- self.function = function
- self.args = args
- if self.args is None: self.args = []
- self.kwargs = kwargs
- if self.kwargs is None: self.kwargs = {}
-
- def __call__(self, halo_catalog):
- return self.function(halo_catalog, *self.args, **self.kwargs)
-
-def calculate_virial_quantities(hc, fields,
- weight_field=None, accumulation=True,
- radius_field="virial_radius", factor=2.0,
- overdensity_field=("gas", "overdensity"),
- critical_overdensity=200):
- r"""
- Calculate virial quantities with the following procedure:
- 1. Create a sphere data container.
- 2. Create 1D radial profiles of overdensity and any requested fields.
- 3. Call virial_quantities callback to interpolate profiles for value of critical overdensity.
- 4. Delete profile and sphere objects from halo.
-
- Parameters
- ----------
- halo : Halo object
- The Halo object to be provided by the HaloCatalog.
- fields: string or list of strings
- The fields for which virial values are to be calculated.
- weight_field : string
- Weight field for profiling.
- Default : "cell_mass"
- accumulation : bool or list of bools
- If True, the profile values for a bin n are the cumulative sum of
- all the values from bin 0 to n. If -True, the sum is reversed so
- that the value for bin n is the cumulative sum from bin N (total bins)
- to n. If the profile is 2D or 3D, a list of values can be given to
- control the summation in each dimension independently.
- Default: False.
- radius_field : string
- Field to be retrieved from the quantities dictionary as
- the basis of the halo radius.
- Default: "virial_radius".
- factor : float
- Factor to be multiplied by the base radius for defining
- the radius of the sphere.
- Default: 2.0.
- overdensity_field : string or tuple of strings
- The field used as the overdensity from which interpolation is done to
- calculate virial quantities.
- Default: ("gas", "overdensity")
- critical_overdensity : float
- The value of the overdensity at which to evaluate the virial quantities.
- Overdensity is with respect to the critical density.
- Default: 200
-
- """
-
- storage = "virial_quantities_profiles"
- pfields = [field for field in fields if field != "radius"]
-
- hc.add_callback("sphere", factor=factor)
- if pfields:
- hc.add_callback("profile", ["radius"], pfields,
- weight_field=weight_field,
- accumulation=accumulation,
- storage=storage)
- hc.add_callback("profile", ["radius"], [overdensity_field],
- weight_field="cell_volume", accumulation=True,
- storage=storage)
- hc.add_callback("virial_quantities", fields,
- overdensity_field=overdensity_field,
- critical_overdensity=critical_overdensity,
- profile_storage=storage)
- hc.add_callback("delete_attribute", storage)
- hc.add_callback("delete_attribute", "data_object")
-
-add_recipe("calculate_virial_quantities", calculate_virial_quantities)
diff --git a/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py b/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py
deleted file mode 100644
index dd203866773..00000000000
--- a/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from mpi4py import MPI
-import os
-import sys
-import yt
-from yt.analysis_modules.halo_analysis.api import \
- HaloCatalog
-from yt.data_objects.particle_filters import \
- particle_filter
-yt.enable_parallelism()
-
-method = sys.argv[1]
-comm = MPI.Comm.Get_parent()
-
-methods = {"fof": {}, "hop": {},
- "rockstar": {"num_readers":1,
- "num_writers":1,
- "particle_type":"dark_matter"}}
-
-@particle_filter("dark_matter", requires=["creation_time"])
-def _dm_filter(pfilter, data):
- return data["creation_time"] <= 0.0
-
-ds = yt.load("Enzo_64/DD0043/data0043")
-ds.add_particle_filter("dark_matter")
-
-output_dir = os.path.join(os.path.dirname(__file__),
- "halo_catalogs", method)
-hc = HaloCatalog(data_ds=ds, output_dir=output_dir,
- finder_method=method, finder_kwargs=methods[method])
-hc.create()
-
-comm.Disconnect()
diff --git a/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py b/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py
deleted file mode 100644
index 3c9c6b08510..00000000000
--- a/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-HaloCatalog answer tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2017, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import os
-import shutil
-import tempfile
-
-from yt.analysis_modules.halo_analysis.api import \
- HaloCatalog, \
- add_quantity
-from yt.convenience import \
- load
-from yt.testing import \
- assert_equal
-from yt.utilities.answer_testing.framework import \
- AnswerTestingTest, \
- data_dir_load, \
- requires_ds
-
-def _nstars(halo):
- sp = halo.data_object
- return (sp["all", "creation_time"] > 0).sum()
-add_quantity("nstars", _nstars)
-
-class HaloQuantityTest(AnswerTestingTest):
- _type_name = "HaloQuantity"
- _attrs = ()
-
- def __init__(self, data_ds_fn, halos_ds_fn):
- self.data_ds_fn = data_ds_fn
- self.halos_ds_fn = halos_ds_fn
- self.ds = data_dir_load(data_ds_fn)
-
- def run(self):
- curdir = os.getcwd()
- tmpdir = tempfile.mkdtemp()
- os.chdir(tmpdir)
-
- dds = data_dir_load(self.data_ds_fn)
- hds = data_dir_load(self.halos_ds_fn)
- hc = HaloCatalog(
- data_ds=dds, halos_ds=hds,
- output_dir=os.path.join(tmpdir, str(dds)))
- hc.add_callback("sphere")
- hc.add_quantity("nstars")
- hc.create()
-
- fn = os.path.join(tmpdir, str(dds),
- "%s.0.h5" % str(dds))
- ds = load(fn)
- ad = ds.all_data()
- mi, ma = ad.quantities.extrema("nstars")
- mean = ad.quantities.weighted_average_quantity(
- "nstars", "particle_ones")
-
- os.chdir(curdir)
- shutil.rmtree(tmpdir)
-
- return np.array([mean, mi, ma])
-
- def compare(self, new_result, old_result):
- assert_equal(new_result, old_result, verbose=True)
-
-rh0 = "rockstar_halos/halos_0.0.bin"
-e64 = "Enzo_64/DD0043/data0043"
-
-@requires_ds(rh0)
-@requires_ds(e64)
-def test_halo_quantity():
- yield HaloQuantityTest(e64, rh0)
diff --git a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py b/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
deleted file mode 100644
index 86cada039e9..00000000000
--- a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import sys
-
-from yt.convenience import load
-from yt.frontends.halo_catalog.data_structures import \
- HaloCatalogDataset
-from yt.utilities.answer_testing.framework import \
- FieldValuesTest, \
- requires_ds
-
-_fields = (("halos", "particle_position_x"),
- ("halos", "particle_position_y"),
- ("halos", "particle_position_z"),
- ("halos", "particle_mass"))
-
-methods = {"fof": 2, "hop": 2, "rockstar": 3}
-decimals = {"fof": 10, "hop": 10, "rockstar": 1}
-
-e64 = "Enzo_64/DD0043/data0043"
-@requires_ds(e64, big_data=True)
-def test_halo_finders():
- from mpi4py import MPI
- filename = os.path.join(os.path.dirname(__file__),
- "run_halo_finder.py")
- for method in methods:
- comm = MPI.COMM_SELF.Spawn(sys.executable,
- args=[filename, method],
- maxprocs=methods[method])
- comm.Disconnect()
-
- fn = os.path.join(os.path.dirname(__file__),
- "halo_catalogs", method,
- "%s.0.h5" % method)
- ds = load(fn)
- assert isinstance(ds, HaloCatalogDataset)
- for field in _fields:
- yield FieldValuesTest(ds, field, particle_type=True,
- decimals=decimals[method])
diff --git a/yt/analysis_modules/halo_finding/api.py b/yt/analysis_modules/halo_finding/api.py
index 8d6b6a352ec..c0db4ee5e0c 100644
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -1,40 +1,7 @@
-"""
-API for halo_finding
+from yt.utilities.exceptions import \
+ YTModuleRemoved
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.funcs import issue_deprecation_warning
-
-issue_deprecation_warning(
- "Development of the halo_finding module has been moved to "
- "the yt_astro_analysis package. This version is deprecated "
- "and will be removed from yt in a future release. See "
- "https://github.com/yt-project/yt_astro_analysis for further "
- "information.")
-
-from .halo_objects import \
- Halo, \
- HOPHalo, \
- LoadedHalo, \
- FOFHalo, \
- HaloList, \
- HOPHaloList, \
- FOFHaloList, \
- LoadedHaloList, \
- GenericHaloFinder, \
- HOPHaloFinder, \
- FOFHaloFinder, \
- HaloFinder, \
- LoadHaloes, \
- LoadTextHalos, \
- LoadTextHaloes
+raise YTModuleRemoved(
+ "halo_finding",
+ "https://github.com/yt-project/yt_astro_analysis",
+ "https://yt-astro-analysis.readthedocs.io/")
diff --git a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
deleted file mode 100644
index c199a7bb9f2..00000000000
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*******************************************************************************
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-*******************************************************************************/
-
-//
-// EnzoFOF
-// A module for running friends-of-friends halo finding on a set of particles
-//
-
-#include "Python.h"
-#include
-#include
-#include
-#include
-#include "kd.h"
-#include "tipsydefs.h"
-
-#include "numpy/ndarrayobject.h"
-
-
-static PyObject *_FOFerror;
-
-static PyObject *
-Py_EnzoFOF(PyObject *obj, PyObject *args)
-{
- PyObject *oxpos, *oypos, *ozpos;
- PyArrayObject *xpos, *ypos, *zpos;
- float link = 0.2;
- float fPeriod[3] = {1.0, 1.0, 1.0};
- int nMembers = 8;
- int i, num_particles;
- KDFOF kd;
- int nBucket,j;
- float fEps;
- int nGroup,bVerbose=1;
- int sec,usec;
- PyArrayObject *particle_group_id;
- PyObject *return_value;
-
- xpos=ypos=zpos=NULL;
-
- if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
- &oxpos, &oypos, &ozpos, &link,
- &fPeriod[0], &fPeriod[1], &fPeriod[2],
- &nMembers))
- return PyErr_Format(_FOFerror,
- "EnzoFOF: Invalid parameters.");
-
- /* First the regular source arrays */
-
- xpos = (PyArrayObject *) PyArray_FromAny(oxpos,
- PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
- if(!xpos){
- PyErr_Format(_FOFerror,
- "EnzoFOF: xpos didn't work.");
- goto _fail;
- }
- num_particles = PyArray_SIZE(xpos);
-
- ypos = (PyArrayObject *) PyArray_FromAny(oypos,
- PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
- if((!ypos)||(PyArray_SIZE(ypos) != num_particles)) {
- PyErr_Format(_FOFerror,
- "EnzoFOF: xpos and ypos must be the same length.");
- goto _fail;
- }
-
- zpos = (PyArrayObject *) PyArray_FromAny(ozpos,
- PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
- NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
- if((!zpos)||(PyArray_SIZE(zpos) != num_particles)) {
- PyErr_Format(_FOFerror,
- "EnzoFOF: xpos and zpos must be the same length.");
- goto _fail;
- }
-
- /* let's get started with the FOF stuff */
-
- /* linking length */
- fprintf(stdout, "Link length is %f\n", link);
- fEps = link;
-
- nBucket = 16;
-
- /* initialize the kd FOF structure */
-
- kdInitFoF(&kd,nBucket,fPeriod);
-
- /* kdReadTipsyFoF(kd,stdin,bDark,bGas,bStar); */
-
- /* Copy positions into kd structure. */
-
- fprintf(stdout, "Filling in %d particles\n", num_particles);
- kd->nActive = num_particles;
- kd->p = (PARTICLEFOF *)malloc(kd->nActive*sizeof(PARTICLEFOF));
- assert(kd->p != NULL);
- for (i = 0; i < num_particles; i++) {
- kd->p[i].iOrder = i;
- kd->p[i].r[0] = (float)(*(npy_float64*) PyArray_GETPTR1(xpos, i));
- kd->p[i].r[1] = (float)(*(npy_float64*) PyArray_GETPTR1(ypos, i));
- kd->p[i].r[2] = (float)(*(npy_float64*) PyArray_GETPTR1(zpos, i));
- }
-
- kdBuildTreeFoF(kd);
- kdTimeFoF(kd,&sec,&usec);
- nGroup = kdFoF(kd,fEps);
- kdTimeFoF(kd,&sec,&usec);
- if (bVerbose) printf("Number of initial groups:%d\n",nGroup);
- nGroup = kdTooSmallFoF(kd,nMembers);
- if (bVerbose) {
- printf("Number of groups:%d\n",nGroup);
- printf("FOF CPU TIME: %d.%06d secs\n",sec,usec);
- }
- kdOrderFoF(kd);
-
- /* kdOutGroupFoF(kd,ach); */
-
- // Now we need to get the groupID, realID.
- // This will give us the index into the original array.
- // Additionally, note that we don't really need to tie the index
- // back to the ID in this code, as we can do that back in the python code.
- // All we need to do is group information.
-
- // Tags are in kd->p[i].iGroup
- particle_group_id = (PyArrayObject *)
- PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos),
- PyArray_DescrFromType(NPY_INT32));
-
- for (i = 0; i < num_particles; i++) {
- // group tag is in kd->p[i].iGroup
- *(npy_int32*)(PyArray_GETPTR1(particle_group_id, i)) =
- (npy_int32) kd->p[i].iGroup;
- }
-
- kdFinishFoF(kd);
-
- PyArray_UpdateFlags(particle_group_id,
- NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
- return_value = Py_BuildValue("N", particle_group_id);
-
- Py_DECREF(xpos);
- Py_DECREF(ypos);
- Py_DECREF(zpos);
-
- /* We don't need this, as it's done in kdFinish
- if(kd->p!=NULL)free(kd->p);
- */
-
- return return_value;
-
-_fail:
- Py_XDECREF(xpos);
- Py_XDECREF(ypos);
- Py_XDECREF(zpos);
-
- if(kd->p!=NULL)free(kd->p);
-
- return NULL;
-
-}
-
-static PyMethodDef _FOFMethods[] = {
- {"RunFOF", Py_EnzoFOF, METH_VARARGS},
- {NULL, NULL} /* Sentinel */
-};
-
-/* platform independent*/
-#ifdef MS_WIN32
-__declspec(dllexport)
-#endif
-
-PyMODINIT_FUNC
-#if PY_MAJOR_VERSION >= 3
-#define _RETVAL m
-PyInit_EnzoFOF(void)
-#else
-#define _RETVAL
-initEnzoFOF(void)
-#endif
-{
- PyObject *m, *d;
-#if PY_MAJOR_VERSION >= 3
- static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "EnzoFOF", /* m_name */
- "EnzoFOF Module", /* m_doc */
- -1, /* m_size */
- _FOFMethods, /* m_methods */
- NULL, /* m_reload */
- NULL, /* m_traverse */
- NULL, /* m_clear */
- NULL, /* m_free */
- };
- m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("EnzoFOF", _FOFMethods);
-#endif
- d = PyModule_GetDict(m);
- _FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL);
- PyDict_SetItemString(d, "error", _FOFerror);
- import_array();
- return _RETVAL;
-}
-
-/*
- * Local Variables:
- * mode: C
- * c-file-style: "python"
- * End:
- */
diff --git a/yt/analysis_modules/halo_finding/fof/README b/yt/analysis_modules/halo_finding/fof/README
deleted file mode 100644
index 36f41ef205c..00000000000
--- a/yt/analysis_modules/halo_finding/fof/README
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
- FOF v1.1
-
- A Group Finder for N-body Simulations
-
- October 26, 1994
-
-Changes from v1.0:
- o Fixed bug in tree building, this bug only affected cases where
- a very small "bucket" size was chosen and the number of particles
- was not a power of two.
-
-Included are:
- README
- Makefile
- cat1/fof.1
- kd.c
- kd.h
- main.c
- man1/fof.1
- tipsydefs.h
-
-For detailed information read the man page (either cat1/fof.1 or
-man1/fof.1).
-
-To build:
-
- > make
-
-To get further information contact:
-
- hpccsoft@astro.washington.edu
-
diff --git a/yt/analysis_modules/halo_finding/fof/fof_main.c b/yt/analysis_modules/halo_finding/fof/fof_main.c
deleted file mode 100644
index e4e7e4c1b64..00000000000
--- a/yt/analysis_modules/halo_finding/fof/fof_main.c
+++ /dev/null
@@ -1,134 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include "kd.h"
-
-
-void usage(void)
-{
- fprintf(stderr,"USAGE:\n");
- fprintf(stderr,"fof -e \n");
- fprintf(stderr," [-m ] [-dgs] [-v]\n");
- fprintf(stderr," [-o