diff --git a/.gitignore b/.gitignore index 3821f6b63ed..478794534b3 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ yt/geometry/oct_container.c yt/geometry/oct_visitors.c yt/geometry/particle_deposit.c yt/geometry/particle_oct_container.c +yt/geometry/particle_oct_container.cpp yt/geometry/particle_smooth.c yt/geometry/selection_routines.c yt/utilities/amr_utils.c @@ -34,11 +35,16 @@ yt/utilities/lib/alt_ray_tracers.c yt/utilities/lib/amr_kdtools.c yt/utilities/lib/basic_octree.c yt/utilities/lib/bitarray.c +yt/utilities/lib/bounded_priority_queue.c yt/utilities/lib/bounding_volume_hierarchy.c yt/utilities/lib/contour_finding.c +yt/utilities/lib/cykdtree/kdtree.cpp +yt/utilities/lib/cykdtree/utils.cpp +yt/utilities/lib/cyoctree.cpp yt/utilities/lib/depth_first_octree.c yt/utilities/lib/distance_queue.c yt/utilities/lib/element_mappings.c +yt/utilities/lib/ewah_bool_wrap.cpp yt/utilities/lib/fnv_hash.c yt/utilities/lib/fortran_reader.c yt/utilities/lib/freetype_writer.c @@ -55,13 +61,16 @@ yt/utilities/lib/mesh_samplers.cpp yt/utilities/lib/mesh_traversal.cpp yt/utilities/lib/mesh_triangulation.c yt/utilities/lib/mesh_utilities.c +yt/utilities/lib/pixelization_routines.cpp yt/utilities/lib/misc_utilities.c +yt/utilities/lib/particle_kdtree_tools.cpp yt/utilities/lib/particle_mesh_operations.c yt/utilities/lib/partitioned_grid.c yt/utilities/lib/primitives.c yt/utilities/lib/origami.c yt/utilities/lib/particle_mesh_operations.c yt/utilities/lib/pixelization_routines.c +yt/utilities/lib/pixelization_routines.cpp yt/utilities/lib/png_writer.c yt/utilities/lib/points_in_volume.c yt/utilities/lib/quad_tree.c @@ -71,7 +80,6 @@ yt/utilities/lib/grid_traversal.c yt/utilities/lib/marching_cubes.c yt/utilities/lib/png_writer.h yt/utilities/lib/write_array.c -yt/utilities/lib/perftools_wrap.c yt/utilities/lib/partitioned_grid.c yt/utilities/lib/volume_container.c yt/utilities/lib/lenses.c diff --git a/.gitmodules b/.gitmodules index 5bd34913d2f..d2d34bb18cd 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,4 @@ [submodule "answer-store"] path = answer-store url = https://github.com/yt-project/answer-store - branch = master + branch = yt-4.0 diff --git a/.hgignore b/.hgignore index cd0a9b6672e..78e6f7e77ee 100644 --- a/.hgignore +++ b/.hgignore @@ -37,6 +37,7 @@ yt/utilities/lib/contour_finding.c yt/utilities/lib/depth_first_octree.c yt/utilities/lib/distance_queue.c yt/utilities/lib/element_mappings.c +yt/utilities/lib/ewah_bool_wrap.cpp yt/utilities/lib/fnv_hash.c yt/utilities/lib/fortran_reader.c yt/utilities/lib/freetype_writer.c @@ -54,6 +55,7 @@ yt/utilities/lib/mesh_traversal.cpp yt/utilities/lib/mesh_triangulation.c yt/utilities/lib/mesh_utilities.c yt/utilities/lib/misc_utilities.c +yt/utilities/lib/particle_kdtree_tools.cpp yt/utilities/lib/particle_mesh_operations.c yt/utilities/lib/partitioned_grid.c yt/utilities/lib/primitives.c diff --git a/.travis.yml b/.travis.yml index 801f5103004..e7cfad98563 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ language: python -dist: xenial +dist: bionic cache: pip: true directories: @@ -9,6 +9,7 @@ addons: apt: packages: - libhdf5-serial-dev + - libnetcdf-dev - libproj-dev - proj-data - proj-bin @@ -55,9 +56,11 @@ install: fi if [[ ${TRAVIS_BUILD_STAGE_NAME} != "Lint" ]]; then if [[ $MINIMAL == 1 ]]; then + # Ensure numpy and cython are installed so dependencies that need to be built + # don't error out # The first numpy to support py3.6 is 1.12, but numpy 1.13 matches # unyt so we'll match it here. - $PIP install numpy==1.13.3 cython==0.24 + $PIP install numpy==1.13.3 cython==0.26.1 $PIP install -r tests/test_minimal_requirements.txt else # Getting cartopy installed requires getting cython and numpy installed @@ -95,9 +98,12 @@ jobs: python: 3.8 script: coverage run $(which nosetests) -c nose_unit.cfg + # This is not necessarily going to be forever -- once we merge yt-4.0 + # with master we will likely change this around to reduce the number of + # versions we test on. - stage: tests - name: "Python: 3.6 Answer Tests" - python: 3.6 + name: "Python: 3.7 Answer Tests" + python: 3.7 script: coverage run $(which nosetests) -c nose_answer.cfg after_failure: python tests/report_failed_answers.py -f -m --xunit-file "answer_nosetests.xml" diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a5fa80ade7c..f77e890b01b 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -16,12 +16,8 @@ Coding is only one way to be involved! Communication Channels ---------------------- -There are five main communication channels for yt: +There are three main communication channels for yt: - * We have an IRC channel, on ``irc.freenode.net`` in ``#yt``. - You can connect through our web - gateway without any special client, at https://yt-project.org/irc.html . - *IRC is the first stop for conversation!* * Many yt developers participate in the yt Slack community. Slack is a free chat service that many teams use to organize their work. You can get an invite to yt's Slack organization by clicking the "Join us @ Slack" button @@ -405,12 +401,6 @@ the following subdirectories: classes for data regions, covering grids, time series, and so on. This also includes derived fields and derived quantities. -``analysis_modules`` - This is where all mechanisms for processing data live. This includes - things like clump finding, halo profiling, halo finding, and so on. This - is something of a catchall, but it serves as a level of greater - abstraction that simply data selection and modification. - ``gui`` This is where all GUI components go. Typically this will be some small tool used for one or two things, which contains a launching mechanism on @@ -762,6 +752,7 @@ Source code style guide * In general, follow PEP-8 guidelines. https://www.python.org/dev/peps/pep-0008/ + * We no longer have a copyright blurb in every source file. * Classes are ``ConjoinedCapitals``, methods and functions are ``lowercase_with_underscores``. * Use 4 spaces, not tabs, to represent indentation. @@ -784,7 +775,7 @@ Source code style guide that occur on an object. See :ref:`docstrings` below for a fiducial example of a docstring. * Use only one top-level import per line. Unless there is a good reason not to, - imports should happen at the top of the file, after the copyright blurb. + imports should happen at the top of the file. * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use ``is`` or ``is not``. * If you are comparing with a numpy boolean array, just refer to the array. diff --git a/MANIFEST.in b/MANIFEST.in index 8858ce2186c..bd48ccb6303 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,14 +3,14 @@ include yt/visualization/mapserver/html/map.js include yt/visualization/mapserver/html/map_index.html include yt/utilities/mesh_types.yaml exclude scripts/pr_backport.py -recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu +recursive-include yt *.py *.pyx *.pxd *.h *.hpp README* *.txt LICENSE* *.cu recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex include doc/extensions/README doc/Makefile prune doc/source/reference/api/generated prune doc/build -recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx recursive-include yt/visualization/volume_rendering/shaders *.fragmentshader *.vertexshader +include yt/sample_data_registry.json prune yt/frontends/_skeleton recursive-include yt/frontends/amrvac *.par diff --git a/answer-store b/answer-store index 4691cccb917..fe17f9b706d 160000 --- a/answer-store +++ b/answer-store @@ -1 +1 @@ -Subproject commit 4691cccb917c971590fbde89d499c54fe0c7eaec +Subproject commit fe17f9b706d4bee227afb0fa2cd41df0049ae924 diff --git a/appveyor.yml b/appveyor.yml index 31e4c531579..ca1e282acb3 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -30,7 +30,10 @@ install: - "python --version" # Install specified version of numpy and dependencies - - "conda install --yes -c conda-forge numpy scipy nose pytest setuptools ipython Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pyyaml" + - "conda install --yes -c conda-forge numpy scipy nose pytest setuptools ipython git + Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pooch pyyaml" + - "pip install git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt" + # install yt - "pip install -e ." # Not a .NET project diff --git a/doc/Makefile b/doc/Makefile index 8cc412ee0a3..342b956aee5 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -49,8 +49,7 @@ html: ifneq ($(READTHEDOCS),True) SPHINX_APIDOC_OPTIONS=members,undoc-members,inherited-members,show-inheritance sphinx-apidoc \ -o source/reference/api/ \ - -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp* \ - ../yt/analysis_modules/halo_finding/{fof,hop} + -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp* endif $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo diff --git a/doc/helper_scripts/parse_cb_list.py b/doc/helper_scripts/parse_cb_list.py index b91976993d9..4287b355594 100644 --- a/doc/helper_scripts/parse_cb_list.py +++ b/doc/helper_scripts/parse_cb_list.py @@ -28,7 +28,7 @@ def write_docstring(f, name, cls): sig = sig.replace("**kwargs", "**field_parameters") clsproxy = "yt.visualization.plot_modifications.%s" % (cls.__name__) #docstring = "\n".join([" %s" % line for line in docstring.split("\n")]) - #print docstring + #print(docstring) f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy, docstring = "\n".join(tw.wrap(docstring)))) #docstring = docstring)) diff --git a/doc/helper_scripts/show_fields.py b/doc/helper_scripts/show_fields.py index a7ab1cce666..2c7365a6967 100644 --- a/doc/helper_scripts/show_fields.py +++ b/doc/helper_scripts/show_fields.py @@ -54,8 +54,10 @@ def _strip_ftype(field): unit_registry=ds.unit_registry) for my_unit in ["m", "pc", "AU", "au"]: new_unit = "%scm" % my_unit - ds.unit_registry.add(new_unit, base_ds.unit_registry.lut[my_unit][0], - dimensions.length, "\\rm{%s}/(1+z)" % my_unit) + my_u = Unit(my_unit, registry=ds.unit_registry) + ds.unit_registry.add(new_unit, my_u.base_value, + dimensions.length, "\\rm{%s}/(1+z)" % my_unit, + prefixable=True) @@ -143,7 +145,7 @@ def print_all_fields(fl): print(" * Units: :math:`%s`" % fix_units(df.units)) else: print(" * Units: :math:`%s`" % fix_units(df.units, in_cgs=True)) - print(" * Particle Type: %s" % (df.particle_type)) + print(" * Sampling Method: %s" % (df.sampling_type)) print() print("**Field Source**") print() diff --git a/doc/helper_scripts/table.py b/doc/helper_scripts/table.py index 40e27223578..4faf0046c85 100644 --- a/doc/helper_scripts/table.py +++ b/doc/helper_scripts/table.py @@ -23,13 +23,6 @@ ("interacting/index.html", "Interacting with yt", "Different ways -- scripting, GUIs, prompts, explorers -- to explore " + "your data."), - ("analysis_modules/index.html", "Analysis Modules", - "Discussions of some provided procedures for astrophysical analysis " + - "like halo finding and synthetic spectra. Halo finding, analyzing " + - "cosmology simulations, halo mass functions, halo profiling, light " + - "cone generator, making absorption spectrums, star particle " + - "analysis, two-point functions, halo merger trees, clump finding, " + - "radial column density, exporting to sunrise.") ]), ("Advanced Usage", [ ("advanced/index.html", "Advanced yt usage", diff --git a/doc/install_script.sh b/doc/install_script.sh index 9560a9c5cb8..0185996394e 100644 --- a/doc/install_script.sh +++ b/doc/install_script.sh @@ -32,13 +32,13 @@ INST_GIT=1 # Install git or not? If git is not already installed, yt INST_EMBREE=0 # Install dependencies needed for Embree-accelerated ray tracing INST_PYX=0 # Install PyX? Sometimes PyX can be problematic without a # working TeX installation. -INST_ROCKSTAR=0 # Install the Rockstar halo finder? INST_SCIPY=0 # Install scipy? INST_H5PY=1 # Install h5py? INST_ASTROPY=0 # Install astropy? INST_CARTOPY=0 # Install cartopy? INST_NOSE=1 # Install nose? INST_NETCDF4=1 # Install netcdf4 and its python bindings? +INST_POOCH=1 # Install pooch? INST_HG=0 # Install Mercurial or not? # This is the branch we will install from for INST_YT_SOURCE=1 @@ -134,13 +134,13 @@ function write_config echo INST_GIT=${INST_GIT} >> ${CONFIG_FILE} echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE} echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE} - echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE} echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE} echo INST_EMBREE=${INST_EMBREE} >> ${CONFIG_FILE} echo INST_H5PY=${INST_H5PY} >> ${CONFIG_FILE} echo INST_ASTROPY=${INST_ASTROPY} >> ${CONFIG_FILE} echo INST_CARTOPY=${INST_CARTOPY} >> ${CONFIG_FILE} echo INST_NOSE=${INST_NOSE} >> ${CONFIG_FILE} + echo INST_POOCH=${INST_POOCH} >> ${CONFIG_FILE} echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE} } @@ -286,17 +286,6 @@ then PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip" fi -if [ $INST_ROCKSTAR -ne 0 ] -then - if [ $INST_YT_SOURCE -eq 0 ] - then - echo "yt must be compiled from source to install support for" - echo "the rockstar halo finder. Please set INST_YT_SOURCE to 1" - echo "and re-run the install script" - exit 1 - fi -fi - echo echo echo "========================================================================" @@ -330,10 +319,6 @@ printf "%-18s = %s so I " "INST_PYX" "${INST_PYX}" get_willwont ${INST_PYX} echo "be installing PyX" -printf "%-18s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}" -get_willwont ${INST_ROCKSTAR} -echo "be installing Rockstar" - printf "%-18s = %s so I " "INST_H5PY" "${INST_H5PY}" get_willwont ${INST_H5PY} echo "be installing h5py" @@ -350,6 +335,10 @@ printf "%-18s = %s so I " "INST_NOSE" "${INST_NOSE}" get_willwont ${INST_NOSE} echo "be installing nose" +printf "%-18s = %s so I " "INST_POOCH" "${INST_POOCH}" +get_willwont ${INST_POOCH} +echo "be installing pooch" + echo echo @@ -522,6 +511,10 @@ if [ $INST_CARTOPY -ne 0 ] then YT_DEPS+=('cartopy') fi +if [ $INST_POOCH -ne 0 ] +then + YT_DEPS+=('pooch') +fi YT_DEPS+=('conda-build') if [ $INST_PY3 -eq 0 ] && [ $INST_HG -eq 1 ] then @@ -598,16 +591,6 @@ then popd &> /dev/null fi -if [ $INST_ROCKSTAR -eq 1 ] -then - echo "Building Rockstar" - ( ${GIT_EXE} clone https://github.com/yt-project/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE} - ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output) - log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar - log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE - ROCKSTAR_DIR=${DEST_DIR}/src/rockstar -fi - # conda doesn't package pyx, so we install manually with pip if [ $INST_PYX -eq 1 ] then @@ -650,13 +633,8 @@ else then echo $DEST_DIR > ${YT_DIR}/embree.cfg fi - if [ $INST_ROCKSTAR -eq 1 ] - then - echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg - ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib - fi pushd ${YT_DIR} &> /dev/null - ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit + ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit popd &> /dev/null fi @@ -697,21 +675,6 @@ echo "You can also update the init file appropriate for your shell" echo "(e.g. .bashrc, .bash_profile, .cshrc, or .zshrc) to include" echo "the same command." echo -if [ $INST_ROCKSTAR -eq 1 ] -then - if [ $MYOS = "Darwin" ] - then - LD_NAME="DYLD_LIBRARY_PATH" - else - LD_NAME="LD_LIBRARY_PATH" - fi - echo - echo "For rockstar to work, you must also set $LD_NAME:" - echo - echo " export $LD_NAME=$DEST_DIR/lib:\$$LD_NAME" - echo - echo "or whichever invocation is appropriate for your shell." -fi echo "========================================================================" echo echo "Oh, look at me, still talking when there's science to do!" diff --git a/doc/source/analyzing/analysis_modules/PPVCube.ipynb b/doc/source/analyzing/analysis_modules/PPVCube.ipynb deleted file mode 100644 index 3f404884187..00000000000 --- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb +++ /dev/null @@ -1,455 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.config import ytcfg\n", - "\n", - "import yt\n", - "import numpy as np\n", - "from yt.analysis_modules.ppv_cube.api import PPVCube\n", - "import yt.units as u" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Density: $\\rho(r) \\propto r^{\\alpha}$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where for simplicity we won't worry about the normalizations of these profiles. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we'll set up the grid and the parameters of the profiles:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# increasing the resolution will make the images in this notebook more visually appealing\n", - "nx,ny,nz = (64, 64, 64) # domain dimensions\n", - "R = 10. # outer radius of disk, kpc\n", - "r_0 = 3. # scale radius, kpc\n", - "beta = 1.4 # for the tangential velocity profile\n", - "alpha = -1. # for the radial density profile\n", - "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n", - "r = np.sqrt(x*x+y*y) # polar coordinates\n", - "theta = np.arctan2(y, x) # polar coordinates" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Second, we'll construct the data arrays for the density, temperature, and velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "dens = np.zeros((nx,ny,nz))\n", - "dens[:,:,nz//2-3:nz//2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n", - "temp = np.zeros((nx,ny,nz))\n", - "temp[:,:,nz//2-3:nz//2+3] = 1.0e5 # Isothermal\n", - "vel_theta = 100.*r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n", - "velx = np.zeros((nx,ny,nz))\n", - "vely = np.zeros((nx,ny,nz))\n", - "velx[:,:,nz//2-3:nz//2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n", - "vely[:,:,nz//2-3:nz//2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n", - "dens[r > R] = 0.0\n", - "temp[r > R] = 0.0\n", - "velx[r > R] = 0.0\n", - "vely[r > R] = 0.0" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1 `code_length`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "data = {}\n", - "data[\"density\"] = (dens,\"g/cm**3\")\n", - "data[\"temperature\"] = (temp, \"K\")\n", - "data[\"velocity_x\"] = (velx, \"km/s\")\n", - "data[\"velocity_y\"] = (vely, \"km/s\")\n", - "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n", - "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n", - "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc.set_log(\"velocity_x\", False)\n", - "slc.set_log(\"velocity_y\", False)\n", - "slc.set_log(\"velocity_magnitude\", False)\n", - "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n", - "slc.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the x-axis. We'll create a normal vector:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "i = 60.*np.pi/180.\n", - "L = [np.sin(i),0.0,np.cos(i)]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to choose the bounds in line-of-sight velocity that the data will be binned into, which is a 4-tuple in the shape of `(vmin, vmax, nbins, units)`, which specifies a linear range of `nbins` velocity bins from `vmin` to `vmax` in units of `units`. We may also optionally specify the dimensions of the data cube with the `dims` argument." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "scrolled": true - }, - "outputs": [], - "source": [ - "cube = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, method=\"sum\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Following this, we can now write this cube to a FITS file. The x and y axes of the file can be in length units, which can be optionally specified by `length_unit`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "cube.write_fits(\"cube.fits\", clobber=True, length_unit=\"kpc\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Or one can use the `sky_scale` and `sky_center` keywords to set up the coordinates in RA and Dec:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "sky_scale = (1.0, \"arcsec/kpc\")\n", - "sky_center = (30., 45.) # RA, Dec in degrees\n", - "cube.write_fits(\"cube_sky.fits\", clobber=True, sky_scale=sky_scale, sky_center=sky_center)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds_cube = yt.load(\"cube.fits\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Specifying no center gives us the center slice\n", - "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"])\n", - "slc.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Picking different velocities for the slices\n", - "new_center = ds_cube.domain_center\n", - "new_center[2] = ds_cube.spec2pixel(-100.*u.km/u.s)\n", - "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n", - "slc.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "new_center[2] = ds_cube.spec2pixel(70.0*u.km/u.s)\n", - "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n", - "slc.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "new_center[2] = ds_cube.spec2pixel(-30.0*u.km/u.s)\n", - "slc = yt.SlicePlot(ds_cube, \"z\", [\"density\"], center=new_center)\n", - "slc.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "prj = yt.ProjectionPlot(ds_cube, \"z\", [\"density\"], method=\"sum\")\n", - "prj.set_log(\"density\", True)\n", - "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n", - "prj.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `thermal_broad` keyword allows one to simulate thermal line broadening based on the temperature, and the `atomic_weight` argument is used to specify the atomic weight of the particle that is doing the emitting." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "cube2 = PPVCube(ds, L, \"density\", (-150.,150.,50,\"km/s\"), dims=200, thermal_broad=True, \n", - " atomic_weight=12.0, method=\"sum\")\n", - "cube2.write_fits(\"cube2.fits\", clobber=True, length_unit=\"kpc\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Taking a slice of this cube shows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds_cube2 = yt.load(\"cube2.fits\")\n", - "new_center = ds_cube2.domain_center\n", - "new_center[2] = ds_cube2.spec2pixel(70.0*u.km/u.s)\n", - "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n", - "slc.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "new_center[2] = ds_cube2.spec2pixel(-100.*u.km/u.s)\n", - "slc = yt.SlicePlot(ds_cube2, \"z\", [\"density\"], center=new_center)\n", - "slc.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where we can see the emission has been smeared into this velocity slice from neighboring slices due to the thermal broadening. \n", - "\n", - "Finally, the \"velocity\" or \"spectral\" axis of the cube can be changed to a different unit, such as wavelength, frequency, or energy: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (cube2.vbins[0], cube2.vbins[-1])\n", - "cube2.transform_spectral_axis(400.0,\"nm\")\n", - "print (cube2.vbins[0], cube2.vbins[-1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If a FITS file is now written from the cube, the spectral axis will be in the new units. To reset the spectral axis back to the original velocity units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "cube2.reset_spectral_axis()\n", - "print (cube2.vbins[0], cube2.vbins[-1])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf b/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf deleted file mode 100644 index 6529c17beca..00000000000 Binary files a/doc/source/analyzing/analysis_modules/ParallelHaloFinder.pdf and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb deleted file mode 100644 index a3a64de46d1..00000000000 --- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb +++ /dev/null @@ -1,245 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The change in the CMB intensity due to Compton scattering of CMB\n", - "photons off of thermal electrons in galaxy clusters, otherwise known as the\n", - "Sunyaev-Zeldovich (S-Z) effect, can to a reasonable approximation be represented by a\n", - "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n", - "contributions, and for large frequencies and high temperatures\n", - "relativistic effects are important. For computing the full S-Z signal\n", - "incorporating all of these effects, there is a library:\n", - "SZpack ([Chluba et al 2012](https://ui.adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n", - "\n", - "The `sunyaev_zeldovich` analysis module in yt makes it possible\n", - "to make projections of the full S-Z signal given the properties of the\n", - "thermal gas in the simulation using SZpack. SZpack has several different options for computing the S-Z signal, from full\n", - "integrations to very good approximations. Since a full or even a\n", - "partial integration of the signal for each cell in the projection\n", - "would be prohibitively expensive, we use the method outlined in\n", - "[Chluba et al 2013](https://ui.adsabs.harvard.edu/abs/2013MNRAS.430.3054C) to expand the\n", - "total S-Z signal in terms of moments of the projected optical depth $\\tau$, projected electron temperature $T_e$, and\n", - "velocities $\\beta_{c,\\parallel}$ and $\\beta_{c,\\perp}$ (their equation 18):" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$S(\\tau, T_{e},\\beta_{c,\\parallel},\\beta_{\\rm c,\\perp}) \\approx S_{\\rm iso}^{(0)} + S_{\\rm iso}^{(2)}\\omega^{(1)} + C_{\\rm iso}^{(1)}\\sigma^{(1)} + D_{\\rm iso}^{(2)}\\kappa^{(1)} + E_{\\rm iso}^{(2)}\\beta_{\\rm c,\\perp,SZ}^2 +~...$$\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "yt makes projections of the various moments needed for the\n", - "calculation, and then the resulting projected fields are used to\n", - "compute the S-Z signal. In our implementation, the expansion is carried out to first-order\n", - "terms in $T_e$ and zeroth-order terms in $\\beta_{c,\\parallel}$ by default, but terms up to second-order in can be optionally\n", - "included. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Installing SZpack" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "SZpack can be downloaded [here](http://www.jb.man.ac.uk/~jchluba/Science/SZpack/SZpack.html). Make\n", - "sure you install a version later than v1.1.1. For computing the S-Z\n", - "integrals, SZpack requires the [GNU Scientific Library](http://www.gnu.org/software/gsl/). For compiling\n", - "the Python module, you need to have a recent version of [swig](http://www.swig.org>) installed. After running `make` in the top-level SZpack directory, you'll need to run it in the `python` subdirectory, which is the\n", - "location of the `SZpack` module. You may have to include this location in the `PYTHONPATH` environment variable.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NOTE**: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating S-Z Projections" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once you have SZpack installed, making S-Z projections from yt\n", - "datasets is fairly straightforward:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "import yt\n", - "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n", - "\n", - "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n", - "\n", - "freqs = [90.,180.,240.]\n", - "szprj = SZProjection(ds, freqs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`freqs` is a list or array of frequencies in GHz at which the signal\n", - "is to be computed. The `SZProjection` constructor also accepts the\n", - "optional keywords, `mue` (mean molecular weight for computing the\n", - "electron number density, 1.143 is the default) and `high_order` (set\n", - "to True to compute terms in the S-Z signal expansion up to\n", - "second-order in $T_{e,SZ}$ and $\\beta$). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once you have created the `SZProjection` object, you can use it to\n", - "make on-axis and off-axis projections:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n", - "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To make an off-axis projection, `szprj.off_axis` is called in the same way, except that the first argument is a three-component normal vector. \n", - "\n", - "Currently, only one projection can be in memory at once. These methods\n", - "create images of the projected S-Z signal at each requested frequency,\n", - "which can be accessed dict-like from the projection object (e.g.,\n", - "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n", - "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard yt\n", - "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Writing out the S-Z Projections" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may want to output the S-Z images to figures suitable for\n", - "inclusion in a paper, or save them to disk for later use. There are a\n", - "few methods included for this purpose. For PNG figures with a colorbar\n", - "and axes, use `write_png`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "szprj.write_png(\"SZ_example\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For simple output of the image data to disk, call `write_hdf5`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "szprj.write_hdf5(\"SZ_example.h5\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, for output to FITS files which can be opened or analyzed\n", - "using other programs (such as ds9), call `export_fits`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "szprj.write_fits(\"SZ_example.fits\", clobber=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "which would write all of the projections to a single FITS file,\n", - "including coordinate information in kpc. The optional keyword\n", - "`clobber` allows a previous file to be overwritten. \n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png b/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png deleted file mode 100644 index 0fd6254e09b..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg b/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg deleted file mode 100644 index 420c77fe61e..00000000000 --- a/doc/source/analyzing/analysis_modules/_images/2ptcorrelation.svg +++ /dev/null @@ -1,186 +0,0 @@ - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/analyzing/analysis_modules/_images/31micron.png b/doc/source/analyzing/analysis_modules/_images/31micron.png deleted file mode 100644 index 834b020af16..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/31micron.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png b/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png deleted file mode 100644 index 585882f3a08..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/LightCone_full_small.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/PDF.png b/doc/source/analyzing/analysis_modules/_images/PDF.png deleted file mode 100644 index 11b4213dead..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/PDF.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/PDF.svgz b/doc/source/analyzing/analysis_modules/_images/PDF.svgz deleted file mode 100644 index a42236f2663..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/PDF.svgz and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png b/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png deleted file mode 100644 index 89f2ead1730..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg b/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg deleted file mode 100644 index 5553f1de577..00000000000 --- a/doc/source/analyzing/analysis_modules/_images/ParallelHaloFinder.svg +++ /dev/null @@ -1,617 +0,0 @@ - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - 1 - 2 - 3 - 3 - 3 - - - - - - - 1 - 2 - 3 - 3 - 3 - - - A - B - C - D - - - - - - - 1 - 2 - 3 - 3 - 3 - - - - - A - B - C - D - - - - - - - - - - - i ii iii - - diff --git a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png b/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png deleted file mode 100644 index a46c5ebcfac..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_30_4.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png b/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png deleted file mode 100644 index b623e24c004..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/Photon_Simulator_34_1.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/SED.png b/doc/source/analyzing/analysis_modules/_images/SED.png deleted file mode 100644 index 545ba5bafef..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/SED.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/SFR.png b/doc/source/analyzing/analysis_modules/_images/SFR.png deleted file mode 100644 index 73f8242e296..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/SFR.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png b/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png deleted file mode 100644 index 9ca3a8160f8..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsBig.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png b/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png deleted file mode 100644 index 70d31e9475f..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeCellsSmall.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png b/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png deleted file mode 100644 index aebfd704b81..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/TreecodeOpeningAngleBig.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/bubbles.png b/doc/source/analyzing/analysis_modules/_images/bubbles.png deleted file mode 100644 index 04179fcb7aa..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/bubbles.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png b/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png deleted file mode 100644 index db3bbe6544f..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/ds9_bubbles.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png b/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png deleted file mode 100644 index 211f90c04f9..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/ds9_sloshing.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/dsquared.png b/doc/source/analyzing/analysis_modules/_images/dsquared.png deleted file mode 100644 index 5f43aac436a..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/dsquared.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/dust_continuum.png b/doc/source/analyzing/analysis_modules/_images/dust_continuum.png deleted file mode 100644 index d33fc1d9646..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/dust_continuum.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png b/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png deleted file mode 100644 index 2aebfa2e361..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/halo_mass_function.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/lightray.png b/doc/source/analyzing/analysis_modules/_images/lightray.png deleted file mode 100644 index cf83394bafb..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/lightray.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png b/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png deleted file mode 100644 index 3e4ac3cdade..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/merger_tree_ex.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg b/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg deleted file mode 100644 index b065071eae7..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/mw3_0420.jpg and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/profiles.png b/doc/source/analyzing/analysis_modules/_images/profiles.png deleted file mode 100644 index ea641d31dce..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/profiles.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/projections.png b/doc/source/analyzing/analysis_modules/_images/projections.png deleted file mode 100644 index a0c6e27109b..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/projections.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/spectrum_full.png b/doc/source/analyzing/analysis_modules/_images/spectrum_full.png deleted file mode 100644 index 6c38eeafdd3..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/spectrum_full.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png b/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png deleted file mode 100644 index e9d109c0cc3..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/spectrum_zoom.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png deleted file mode 100644 index db7312edce1..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz deleted file mode 100644 index 8c070fee9c3..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes0.svgz and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png deleted file mode 100644 index 4212a3c9dfc..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz deleted file mode 100644 index 9cd18fc932b..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes1.svgz and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png deleted file mode 100644 index b07b72ab04d..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.png and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz b/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz deleted file mode 100644 index 50167c99cdb..00000000000 Binary files a/doc/source/analyzing/analysis_modules/_images/struct_fcn_subvolumes2.svgz and /dev/null differ diff --git a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst deleted file mode 100644 index a88e2ff5bcc..00000000000 --- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst +++ /dev/null @@ -1,441 +0,0 @@ -.. _absorption_spectrum: - -Creating Absorption Spectra -=========================== - -.. note:: - - Development of the AbsorptionSpectrum module has been moved to the - Trident package. This version is deprecated and will be removed from yt - in a future release. See https://github.com/trident-project/trident - for further information. - -Absorption line spectra are spectra generated using bright background sources -to illuminate tenuous foreground material and are primarily used in studies -of the circumgalactic medium and intergalactic medium. These spectra can -be created using the -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` -and -:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay` -analysis modules. - -The -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class -and its workhorse method -:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum` -return two arrays, one with wavelengths, the other with the normalized -flux values at each of the wavelength values. It can also output a text file -listing all important lines. - -For example, here is an absorption spectrum for the wavelength range from 900 -to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4: - -.. image:: _images/spectrum_full.png - :width: 500 - -And a zoom-in on the 1425-1450 Angstrom window: - -.. image:: _images/spectrum_zoom.png - :width: 500 - -Method for Creating Absorption Spectra --------------------------------------- - -Once a -:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay` -has been created traversing a dataset using the :ref:`light-ray-generator`, -a series of arrays store the various fields of the gas parcels (represented -as cells) intersected along the ray. -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` -steps through each element of the -:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s -arrays and calculates the column density for desired ion by multiplying its -number density with the path length through the cell. Using these column -densities along with temperatures to calculate thermal broadening, Voigt -profiles are deposited on to a featureless background spectrum. By default, -the peculiar velocity of the gas is included as a Doppler redshift in addition -to any cosmological redshift of the data dump itself. - -Subgrid Deposition -^^^^^^^^^^^^^^^^^^ - -For features not resolved (i.e. possessing narrower width than the spectral -resolution), -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` -performs subgrid deposition. The subgrid deposition algorithm creates a number -of smaller virtual bins, by default the width of the virtual bins is 1/10th -the width of the spectral feature. The Voigt profile is then deposited -into these virtual bins where it is resolved, and then these virtual bins -are numerically integrated back to the resolution of the original spectral bin -size, yielding accurate equivalent widths values. -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` -informs the user how many spectral features are deposited in this fashion. - -Tutorial on Creating an Absorption Spectrum -------------------------------------------- - -Initializing `AbsorptionSpectrum` Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To instantiate an -:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` -object, the arguments required are the -minimum and maximum wavelengths (assumed to be in Angstroms), and the number -of wavelength bins to span this range (including the endpoints) - -.. code-block:: python - - from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum - - sp = AbsorptionSpectrum(900.0, 1800.0, 10001) - -Adding Features to the Spectrum -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Absorption lines and continuum features can then be added to the spectrum. -To add a line, you must know some properties of the line: the rest wavelength, -f-value, gamma value, and the atomic mass in amu of the atom. That line must -be tied in some way to a field in the dataset you are loading, and this field -must be added to the LightRay object when it is created. Below, we will -add the H Lyman-alpha line, which is tied to the neutral hydrogen field -('H_number_density'). - -.. code-block:: python - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10) - -In the above example, the *field* argument tells the spectrum generator which -field from the ray data to use to calculate the column density. The -``label_threshold`` keyword tells the spectrum generator to add all lines -above a column density of 10 :superscript:`10` cm :superscript:`-2` to the -text line list output at the end. If None is provided, as is the default, -no lines of this type will be added to the text list. - -Continuum features with optical depths that follow a power law can also be -added. Like adding lines, you must specify details like the wavelength -and the field in the dataset and LightRay that is tied to this feature. -The wavelength refers to the location at which the continuum begins to be -applied to the dataset, and as it moves to lower wavelength values, the -optical depth value decreases according to the defined power law. The -normalization value is the column density of the linked field which results -in an optical depth of 1 at the defined wavelength. Below, we add the hydrogen -Lyman continuum. - -.. code-block:: python - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 912.323660 # Angstroms - normalization = 1.6e17 - index = 3.0 - - sp.add_continuum(my_label, field, wavelength, normalization, index) - -Making the Spectrum -^^^^^^^^^^^^^^^^^^^ - -Once all the lines and continua are added, it is time to make a spectrum out -of some light ray data. - -.. code-block:: python - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.fits', - line_list_file='lines.txt') - -A spectrum will be made using the specified ray data and the wavelength and -flux arrays will also be returned. If you set the optional -``use_peculiar_velocity`` keyword to False, the lines will not incorporate -doppler redshifts to shift the deposition of the line features. - -Three output file formats are supported for writing out the spectrum: fits, -hdf5, and ascii. The file format used is based on the extension provided -in the ``output_file`` keyword: ``.fits`` for a fits file, -``.h5`` for an hdf5 file, and anything else for an ascii file. - -.. note:: To write out a fits file, you must install the `astropy `_ python library in order to access the astropy.io.fits module. You can usually do this by simply running `pip install astropy` at the command line. - -Generating Spectra in Parallel -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The `AbsorptionSpectrum` analysis module can be run in parallel simply by -following the procedures laid out in :ref:`parallel-computation` for running -yt scripts in parallel. Spectrum generation is parallelized using a multi-level -strategy where each absorption line is deposited by a different processor. -If the number of available processors is greater than the number of lines, -then the deposition of individual lines will be divided over multiple -processors. - -Fitting Absorption Spectra -========================== - -.. sectionauthor:: Hilary Egan - -This tool can be used to fit absorption spectra, particularly those -generated using the (``AbsorptionSpectrum``) tool. For more details -on its uses and implementation please see (`Egan et al. (2013) -`_). If you find this tool useful we -encourage you to cite accordingly. - -Loading an Absorption Spectrum ------------------------------- - -To load an absorption spectrum created by -(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), -we specify the output file name. It is advisable to use either an .h5 -or .fits file, rather than an ascii file to save the spectrum as rounding -errors produced in saving to a ascii file will negatively impact fit quality. - -.. code-block:: python - - f = h5py.File('spectrum.h5', mode='r') - wavelength = f["wavelength"][:] - flux = f['flux'][:] - f.close() - -Specifying Species Properties ------------------------------ - -Before fitting a spectrum, you must specify the properties of all the -species included when generating the spectrum. - -The physical properties needed for each species are the rest wavelength, -f-value, gamma value, and atomic mass. These will be the same values -as used to generate the initial absorption spectrum. These values are -given in list form as some species generate multiple lines (as in the -OVI doublet). The number of lines is also specified on its own. - -To fine tune the fitting procedure and give results in a minimal -number of optimizing steps, we specify expected maximum and minimum -values for the column density, Doppler parameter, and redshift. These -values can be well outside the range of expected values for a typical line -and are mostly to prevent the algorithm from fitting to negative values -or becoming numerically unstable. - -Common initial guesses for Doppler parameter and column density should also -be given. These values will not affect the specific values generated by -the fitting algorithm, provided they are in a reasonably appropriate range -(ie: within the range given by the max and min values for the parameter). - -For a spectrum containing both the H Lyman-alpha line and the OVI doublet, -we set up a fit as shown below. - -.. code-block:: python - - HI_parameters = {'name':'HI', - 'f': [.4164], - 'Gamma':[6.265E8], - 'wavelength':[1215.67], - 'numLines':1, - 'maxN': 1E22, 'minN':1E11, - 'maxb': 300, 'minb':1, - 'maxz': 6, 'minz':0, - 'init_b':30, - 'init_N':1E14} - - OVI_parameters = {'name':'OVI', - 'f':[.1325,.06580], - 'Gamma':[4.148E8,4.076E8], - 'wavelength':[1031.9261,1037.6167], - 'numLines':2, - 'maxN':1E17,'minN':1E11, - 'maxb':300, 'minb':1, - 'maxz':6, 'minz':0, - 'init_b':20, - 'init_N':1E12} - - speciesDicts = {'HI':HI_parameters,'OVI':OVI_parameters} - - -Generating Fit of Spectrum --------------------------- - -After loading a spectrum and specifying the properties of the species -used to generate the spectrum, an appropriate fit can be generated. - -.. code-block:: python - - orderFits = ['OVI','HI'] - - fitted_lines, fitted_flux = generate_total_fit(wavelength, - flux, orderFits, speciesDicts) - -The orderFits variable is used to determine in what order the species -should be fitted. This may affect the results of the resulting fit, -as lines may be fit as an incorrect species. For best results, it is -recommended to fit species the generate multiple lines first, as a fit -will only be accepted if all of the lines are fit appropriately using -a single set of parameters. At the moment no cross correlation between -lines of different species is performed. - -The parameters of the lines that are needed to fit the spectrum are contained -in the ``fitted_lines`` variable. Each species given in ``orderFits`` will -be a key in the ``fitted_lines`` dictionary. The entry for each species -key will be another dictionary containing entries for 'N','b','z', and -'group#' which are the column density, Doppler parameter, redshift, -and associate line complex respectively. The i :superscript:`th` line -of a given species is then given by the parameters ``N[i]``, ``b[i]``, -and ``z[i]`` and is part of the same complex (and was fitted at the same time) -as all lines with the same group number as ``group#[i]``. - -The ``fitted_flux`` is an ndarray of the same size as ``flux`` and -``wavelength`` that contains the cumulative absorption spectrum generated -by the lines contained in ``fitted_lines``. - -Saving a Spectrum Fit ---------------------- - -Saving the results of a fitted spectrum for further analysis is -accomplished automatically using the h5 file format. A group -is made for each species that is fit, and each species group has -a group for the corresponding N, b, z, and group# values. - -.. _fitting_procedure: - -Procedure for Generating Fits ------------------------------ - -.. sectionauthor:: Hilary Egan - -To generate a fit for a spectrum -:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` -is called. -This function controls the identification of line complexes, the fit -of a series of absorption lines for each appropriate species, checks of -those fits, and returns the results of the fits. - -Finding Line Complexes ----------------------- - -Line complexes are found using the -:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes` -function. The process by which line complexes are found involves walking -through the array of flux in order from minimum to maximum wavelength, and -finding series of spatially contiguous cells whose flux is less than some -limit. These regions are then checked in terms of an additional flux limit -and size. The bounds of all the passing regions are then listed and returned. -Those bounds that cover an exceptionally large region of wavelength space will -be broken up if a suitable cut point is found. This method is only appropriate -for noiseless spectra. - -The optional parameter ``complexLim`` (default = 0.999), controls the limit -that triggers the identification of a spatially contiguous region of flux -that could be a line complex. This number should be very close to 1 but not -exactly equal. It should also be at least an order of magnitude closer to 1 -than the later discussed ``fitLim`` parameter, because a line complex where -the flux of the trough is very close to the flux of the edge can be incredibly -unstable when optimizing. - -The ``fitLim`` parameter controls what is the maximum flux that the trough -of the region can have and still be considered a line complex. This -effectively controls the sensitivity to very low column absorbers. Default -value is ``fitLim`` = 0.99. If a region is identified where the flux of the -trough is greater than this value, the region is simply ignored. - -The ``minLength`` parameter controls the minimum number of array elements -that an identified region must have. This value must be greater than or -equal to 3 as there are a minimum of 3 free parameters that must be fit. -Default is ``minLength`` = 3. - -The ``maxLength`` parameter controls the maximum number of array elements -that an identified region can have before it is split into separate regions. -Default is ``maxLength`` = 1000. This should be adjusted based on the -resolution of the spectrum to remain appropriate. The value correspond -to a wavelength of roughly 50 angstroms. - -The ``splitLim`` parameter controls how exceptionally large regions are split. -When such a region is identified by having more array elements than -``maxLength``, the point of maximum flux (or minimum absorption) in the -middle two quartiles is identified. If that point has a flux greater than -or equal to ``splitLim``, then two separate complexes are created: one from -the lower wavelength edge to the minimum absorption point and the other from -the minimum absorption point to the higher wavelength edge. The default -value is ``splitLim`` =.99, but it should not drastically affect results, so -long as the value is reasonably close to 1. - -Fitting a Line Complex ----------------------- - -After a complex is identified, it is fitted by iteratively adding and -optimizing a set of Voigt Profiles for a particular species until the -region is considered successfully fit. The optimizing is accomplished -using scipy's least squares optimizer. This requires an initial estimate -of the parameters to be fit (column density, b-value, redshift) for each -line. - -Each time a line is added, the guess of the parameters is based on -the difference between the line complex and the fit so far. For the first line -this just means the initial guess is based solely on the flux of the line -complex. The column density is given by the initial column density given -in the species parameters dictionary. If the line is saturated (some portion -of the flux with a value less than .1) than the larger initial column density -guess is chosen. If the flux is relatively high (all values >.9) than the -smaller initial guess is given. These values are chosen to make optimization -faster and more stable by being closer to the actual value, but the final -results of fitting should not depend on them as they merely provide a -starting point. - -After the parameters for a line are optimized for the first time, the -optimized parameters are then used for the initial guess on subsequent -iterations with more lines. - -The complex is considered successfully fit when the sum of the squares of -the difference between the flux generated from the fit and the desired flux -profile is less than ``errBound``. ``errBound`` is related to the optional -parameter to -:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`, -``maxAvgError`` by the number of array elements in the region such that -``errBound`` = number of elements * ``maxAvgError``. - -There are several other conditions under which the cycle of adding and -optimizing lines will halt. If the error of the optimized fit from adding -a line is an order of magnitude worse than the error of the fit without -that line, then it is assumed that the fitting has become unstable and -the latest line is removed. Lines are also prevented from being added if -the total number of lines is greater than the number of elements in the flux -array being fit divided by 3. This is because there must not be more free -parameters in a fit than the number of points to constrain them. - -Checking Fit Results --------------------- - -After an acceptable fit for a region is determined, there are several steps -the algorithm must go through to validate the fits. - -First, the parameters must be in a reasonable range. This is a check to make -sure that the optimization did not become unstable and generate a fit that -diverges wildly outside the region where the fit was performed. This way, even -if particular complex cannot be fit, the rest of the spectrum fitting still -behaves as expected. The range of acceptability for each parameter is given -in the species parameter dictionary. These are merely broad limits that will -prevent numerical instability rather than physical limits. - -In cases where a single species generates multiple lines (as in the OVI -doublet), the fits are then checked for higher wavelength lines. Originally -the fits are generated only considering the lowest wavelength fit to a region. -This is because we perform the fitting of complexes in order from the lowest -wavelength to the highest, so any contribution to a complex being fit must -come from the lower wavelength as the higher wavelength contributions would -already have been subtracted out after fitting the lower wavelength. - -Saturated Lyman Alpha Fitting Tools ------------------------------------ - -In cases where a large or saturated line (there exists a point in the complex -where the flux is less than .1) fails to be fit properly at first pass, a -more robust set of fitting tools is used to try and remedy the situation. -The basic approach is to simply try a much wider range of initial parameter -guesses in order to find the true optimization minimum, rather than getting -stuck in a local minimum. A set of hard coded initial parameter guesses -for Lyman alpha lines is given by the function -:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`. -Also included in these parameter guesses is an initial guess of a high -column cool line overlapping a lower column warm line, indicative of a -broad Lyman alpha (BLA) absorber. diff --git a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst deleted file mode 100644 index 1eb73e93c2f..00000000000 --- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst +++ /dev/null @@ -1,156 +0,0 @@ -.. _ellipsoid_analysis: - -Halo Ellipsoid Analysis -======================= -.. sectionauthor:: Geoffrey So - -.. warning:: This functionality is currently broken and needs to - be updated to make use of the :ref:`halo_catalog` framework. - Anyone interested in doing so should contact the yt-dev list. - -Purpose -------- - -The purpose of creating this feature in yt is to analyze field -properties that surround dark matter haloes. Originally, this was -usually done with the sphere 3D container, but since many halo -particles are linked together in a more elongated shape, I thought it -would be better to use an ellipsoid 3D container to wrap around the -particles. This way, less of the empty-of-particle space around the -halo would be included when doing the analysis of field properties -where the particles are suppose to occupy. - -General Overview ----------------- - -In order to use the ellipsoid 3D container object, one must supply it -with a center, the magnitude of the semi-principle axes, the direction -of the first semi-principle axis, the tilt angle (rotation angle about -the y axis that will align the first semi-principle axis with the x -axis once it is aligned in the x-z plane.) - -Once those parameters are determined, the function "ellipsoid" will -return the 3D object, and users will be able to get field attributes -from the data object just as they would from spheres, cylinders etc. - -Example -------- - -To use the ellipsoid container to get field information, you -will have to first determine the ellipsoid's parameters. This can be -done with the haloes obtained from halo finding, but essentially it -takes the information: - - #. Center position x,y,z - #. List of particles position x,y,z - -And calculates the ellipsoid information needed for the 3D container. - -What I usually do is get this information from the halo finder output -files in the .h5 HDF5 binary format. I load them into memory using the -LoadHaloes() function instead of reading in the ASCII output. - -Halo Finding -~~~~~~~~~~~~ -.. code-block:: python - - import yt - from yt.analysis_modules.halo_finding.api import * - - ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - halo_list = HaloFinder(ds) - halo_list.dump('MyHaloList') - -Ellipsoid Parameters -~~~~~~~~~~~~~~~~~~~~ -.. code-block:: python - - import yt - from yt.analysis_modules.halo_finding.api import * - - ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - haloes = LoadHaloes(ds, 'MyHaloList') - -Once the halo information is saved you can load it into the data -object "haloes", you can get loop over the list of haloes and do - -.. code-block:: python - - ell_param = haloes[0].get_ellipsoid_parameters() - -This will return 6 items - -#. The center of mass as an array. -#. A as a float. (Must have A>=B) -#. B as a float. (Must have B>=C) -#. C as a float. (Must have C > cell size) -#. e0 vector as an array. (now normalized automatically in the code) -#. tilt as a float. - -The center of mass would be the same one as returned by the halo -finder. The A, B, C are the largest to smallest magnitude of the -ellipsoid's semi-principle axes. "e0" is the largest semi-principle -axis vector direction that would have magnitude A but normalized. -The "tilt" is an angle measured in radians. It can be best described -as after the rotation about the z-axis to align e0 to x in the x-y -plane, and then rotating about the y-axis to align e0 completely to -the x-axis, the angle remaining to rotate about the x-axis to align -both e1 to the y-axis and e2 to the z-axis. - -Ellipsoid 3D Container -~~~~~~~~~~~~~~~~~~~~~~ - -Once the parameters are obtained from the get_ellipsoid_parameters() -function, or picked at random by the user, it can be input into the -ellipsoid container as: - -.. code-block:: python - - ell = ds.ellipsoid(ell_param[0], - ell_param[1], - ell_param[2], - ell_param[3], - ell_param[4], - ell_param[5]) - dens = ell.quantities['TotalQuantity']('density')[0] - -This way, "ell" will be the ellipsoid container, and "dens" will be -the total density of the ellipsoid in an unigrid simulation. One can -of course use this container object with parameters that they come up -with, the ellipsoid parameters do not have to come from the Halo -Finder. And of course, one can use the ellipsoid container with other -derived fields or fields that they are interested in. - -Drawbacks ---------- - -Since this is a first attempt, there are many drawbacks and corners -cut. Many things listed here will be amended when I have time. - -* The ellipsoid 3D container like the boolean object, do not contain - particle position and velocity information. -* This currently assume periodic boundary condition, so if an - ellipsoid center is at the edge, it will return part of the opposite - edge field information. Will try to put in the option to turn off - periodicity in the future. -* This method gives a minimalistic ellipsoid centered around the - center of mass that contains all the particles, but sometimes people - prefer an inertial tensor triaxial ellipsoid described in - `Dubinski, Carlberg 1991 - `_. I have that - method composed but it is not fully tested yet. -* The method to obtain information from the halo still uses the center - of mass as the center of the ellipsoid, so it is not making the - smallest ellipsoid that contains the particles as possible. To - start at the center of the particles based on position will require - an O(:math:`N^2`) operation, right now I'm trying to limit - everything to O(:math:`N`) operations. If particle count does not - get too large, I may implement the O(:math:`N^2`) operation. -* Currently the list of haloes can be analyzed using object - parallelism (one halo per core), but I'm not sure if haloes will get - big enough soon that other forms of parallelism will be needed to - analyze them due to memory constraint. -* This has only been tested on unigrid simulation data, not AMR. In - unigrid simulations, I can take "dens" from the example and divide - it by the total number of cells to get the average density, in AMR - one would need to do an volume weighted average instead. diff --git a/doc/source/analyzing/analysis_modules/exporting.rst b/doc/source/analyzing/analysis_modules/exporting.rst deleted file mode 100644 index 9636c707f0e..00000000000 --- a/doc/source/analyzing/analysis_modules/exporting.rst +++ /dev/null @@ -1,8 +0,0 @@ -Exporting to External Radiation Transport Codes -=============================================== - -.. toctree:: - :maxdepth: 2 - - sunrise_export - radmc3d_export \ No newline at end of file diff --git a/doc/source/analyzing/analysis_modules/halo_analysis.rst b/doc/source/analyzing/analysis_modules/halo_analysis.rst deleted file mode 100644 index aaca23bcb58..00000000000 --- a/doc/source/analyzing/analysis_modules/halo_analysis.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _halo-analysis: - -Halo Analysis -============= - -This section covers halo finding, performing extra analysis on halos, -and the halo mass function calculator. If you already have halo -catalogs and simply want to load them into yt, see -:ref:`halo-catalog-data`. - -.. toctree:: - :maxdepth: 2 - - halo_catalogs - halo_mass_function - halo_transition - halo_merger_tree - ellipsoid_analysis diff --git a/doc/source/analyzing/analysis_modules/halo_catalogs.rst b/doc/source/analyzing/analysis_modules/halo_catalogs.rst deleted file mode 100644 index ea7cfe1099c..00000000000 --- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst +++ /dev/null @@ -1,503 +0,0 @@ -.. _halo_catalog: - -Halo Finding and Analysis -========================= - -In yt-3.x, halo finding and analysis are combined into a single -framework called the -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`. -This framework is substantially different from the halo analysis -machinery available in yt-2.x and is entirely backward incompatible. -For a direct translation of various halo analysis tasks using yt-2.x -to yt-3.x, see :ref:`halo-transition`. - -.. _halo_catalog_finding: - -Halo Finding ------------- - -If you already have a halo catalog, either produced by one of the methods -below or in a format described in :ref:`halo-catalog-data`, and want to -perform further analysis, skip to :ref:`halo_catalog_analysis`. - -Three halo finding methods exist within yt. These are: - -* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985) - `_) -* :ref:`hop_finding`: `Eisenstein and Hut (1998) - `_. -* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that - scales well and does substructure finding (`Behroozi et al. - 2011 `_) - -Halo finding is performed through the creation of a -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` -object. The dataset on which halo finding is to be performed should -be loaded and given to the -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` -along with the ``finder_method`` keyword to specify the method to be -used. - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_analysis.api import HaloCatalog - - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, finder_method='hop') - hc.create() - -The ``finder_method`` options should be given as "fof", "hop", or -"rockstar". Each of these methods has their own set of keyword -arguments to control functionality. These can specified in the form -of a dictionary using the ``finder_kwargs`` keyword. - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_analysis.api import HaloCatalog - - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, finder_method='fof', - finder_kwargs={"ptype": "stars", - "padding": 0.02}) - hc.create() - -For a full list of keywords for each halo finder, see -:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`, -:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`, -and -:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`. - -.. _fof_finding: - -FOF -^^^ - -This is a basic friends-of-friends algorithm. See -`Efstathiou et al. (1985) -`_ for more -details as well as -:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`. - -.. _hop_finding: - -HOP -^^^ - -The version of HOP used in yt is an upgraded version of the -`publicly available HOP code -`_. Support -for 64-bit floats and integers has been added, as well as -parallel analysis through spatial decomposition. HOP builds -groups in this fashion: - -#. Estimates the local density at each particle using a - smoothing kernel. - -#. Builds chains of linked particles by 'hopping' from one - particle to its densest neighbor. A particle which is - its own densest neighbor is the end of the chain. - -#. All chains that share the same densest particle are - grouped together. - -#. Groups are included, linked together, or discarded - depending on the user-supplied over density - threshold parameter. The default is 160.0. - -See the `HOP method paper -`_ for -full details as well as -:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`. - -.. _rockstar_finding: - -Rockstar -^^^^^^^^ - -Rockstar uses an adaptive hierarchical refinement of friends-of-friends -groups in six phase-space dimensions and one time dimension, which -allows for robust (grid-independent, shape-independent, and noise- -resilient) tracking of substructure. The code is prepackaged with yt, -but also `separately available `_. The lead -developer is Peter Behroozi, and the methods are described in -`Behroozi et al. 2011 `_. -In order to run the Rockstar halo finder in yt, make sure you've -:ref:`installed it so that it can integrate with yt `. - -At the moment, Rockstar does not support multiple particle masses, -instead using a fixed particle mass. This will not affect most dark matter -simulations, but does make it less useful for finding halos from the stellar -mass. In simulations where the highest-resolution particles all have the -same mass (ie: zoom-in grid based simulations), one can set up a particle -filter to select the lowest mass particles and perform the halo finding -only on those. See the this cookbook recipe for an example: -:ref:`cookbook-rockstar-nested-grid`. - -To run the Rockstar Halo finding, you must launch python with MPI and -parallelization enabled. While Rockstar itself does not require MPI to run, -the MPI libraries allow yt to distribute particle information across multiple -nodes. - -.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes - connected by an Infiniband network can be problematic. Therefore, for now - we recommend forcing the use of the non-Infiniband network (e.g. Ethernet) - using this flag: ``--mca btl ^openib``. - For example, here is how Rockstar might be called using 24 cores: - ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``. - -The script above configures the Halo finder, launches a server process which -disseminates run information and coordinates writer-reader processes. -Afterwards, it launches reader and writer tasks, filling the available MPI -slots, which alternately read particle information and analyze for halo -content. - -The RockstarHaloFinder class has these options that can be supplied to the -halo catalog through the ``finder_kwargs`` argument: - -* ``dm_type``, the index of the dark matter particle. Default is 1. -* ``outbase``, This is where the out*list files that Rockstar makes should be - placed. Default is 'rockstar_halos'. -* ``num_readers``, the number of reader tasks (which are idle most of the - time.) Default is 1. -* ``num_writers``, the number of writer tasks (which are fed particles and - do most of the analysis). Default is MPI_TASKS-num_readers-1. - If left undefined, the above options are automatically - configured from the number of available MPI tasks. -* ``force_res``, the resolution that Rockstar uses for various calculations - and smoothing lengths. This is in units of Mpc/h. - If no value is provided, this parameter is automatically set to - the width of the smallest grid element in the simulation from the - last data snapshot (i.e. the one where time has evolved the - longest) in the time series: - ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``. -* ``total_particles``, if supplied, this is a pre-calculated - total number of dark matter - particles present in the simulation. For example, this is useful - when analyzing a series of snapshots where the number of dark - matter particles should not change and this will save some disk - access time. If left unspecified, it will - be calculated automatically. Default: ``None``. -* ``dm_only``, if set to ``True``, it will be assumed that there are - only dark matter particles present in the simulation. - This option does not modify the halos found by Rockstar, however - this option can save disk access time if there are no star particles - (or other non-dark matter particles) in the simulation. Default: ``False``. - -Rockstar dumps halo information in a series of text (halo*list and -out*list) and binary (halo*bin) files inside the ``outbase`` directory. -We use the halo list classes to recover the information. - -Inside the ``outbase`` directory there is a text file named ``datasets.txt`` -that records the connection between ds names and the Rockstar file names. - -.. _rockstar-installation: - -Installing Rockstar -""""""""""""""""""" - -Because of changes in the Rockstar API over time, yt only currently works with -a slightly older version of Rockstar. This version of Rockstar has been -slightly patched and modified to run as a library inside of yt. By default it -is not installed with yt, but installation is very easy. The -:ref:`install-script` used to install yt from source has a line: -``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``. You can -rerun this installer script over the top of an existing installation, and -it will only install components missing from the existing installation. -You can do this as follows. Put your freshly modified install_script in -the parent directory of the yt installation directory (e.g. the parent of -``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer: - -.. code-block:: bash - - cd $YT_DEST - cd .. - vi install_script.sh // or your favorite editor to change INST_ROCKSTAR=1 - bash < install_script.sh - -This will download Rockstar and install it as a library in yt. - -.. _halo_catalog_analysis: - -Extra Halo Analysis -------------------- - -As a reminder, all halo catalogs created by the methods outlined in -:ref:`halo_catalog_finding` as well as those in the formats discussed in -:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets. -Once a halo catalog has been created, further analysis can be performed -by providing both the halo catalog and the original simulation dataset to -the -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`. - -.. code-block:: python - - halos_ds = yt.load('rockstar_halos/halos_0.0.bin') - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds) - -A data object can also be supplied via the keyword ``data_source``, -associated with either dataset, to control the spatial region in -which halo analysis will be performed. - -The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` -allows the user to create a pipeline of analysis actions that will be -performed on all halos in the existing catalog. The analysis can be -performed in parallel with separate processors or groups of processors -being allocated to perform the entire pipeline on individual halos. -The pipeline is setup by adding actions to the -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`. -Each action is represented by a callback function that will be run on -each halo. There are four types of actions: - -* :ref:`halo_catalog_filters` -* :ref:`halo_catalog_quantities` -* :ref:`halo_catalog_callbacks` -* :ref:`halo_catalog_recipes` - -A list of all available filters, quantities, and callbacks can be found in -:ref:`halo_analysis_ref`. -All interaction with this analysis can be performed by importing from -halo_analysis. - -.. _halo_catalog_filters: - -Filters -^^^^^^^ - -A filter is a function that returns True or False. If the return value -is True, any further queued analysis will proceed and the halo in -question will be added to the final catalog. If the return value False, -further analysis will not be performed and the halo will not be included -in the final catalog. - -An example of adding a filter: - -.. code-block:: python - - hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun') - -Currently quantity_value is the only available filter, but more can be -added by the user by defining a function that accepts a halo object as -the first argument and then adding it as an available filter. If you -think that your filter may be of use to the general community, you can -add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a -pull request. - -An example of defining your own filter: - -.. code-block:: python - - def my_filter_function(halo): - - # Define condition for filter - filter_value = True - - # Return a boolean value - return filter_value - - # Add your filter to the filter registry - add_filter("my_filter", my_filter_function) - - # ... Later on in your script - hc.add_filter("my_filter") - -.. _halo_catalog_quantities: - -Quantities -^^^^^^^^^^ - -A quantity is a call back that returns a value or values. The return values -are stored within the halo object in a dictionary called "quantities." At -the end of the analysis, all of these quantities will be written to disk as -the final form of the generated halo catalog. - -Quantities may be available in the initial fields found in the halo catalog, -or calculated from a function after supplying a definition. An example -definition of center of mass is shown below. Currently available quantities -are center_of_mass and bulk_velocity. Their definitions are available in -``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that -your quantity may be of use to the general community, add it to -``halo_quantities.py`` and issue a pull request. Default halo quantities are: - -* ``particle_identifier`` -- Halo ID (e.g. 0 to N) -* ``particle_mass`` -- Mass of halo -* ``particle_position_x`` -- Location of halo -* ``particle_position_y`` -- Location of halo -* ``particle_position_z`` -- Location of halo -* ``virial_radius`` -- Virial radius of halo - -An example of adding a quantity: - -.. code-block:: python - - hc.add_quantity('center_of_mass') - -An example of defining your own quantity: - -.. code-block:: python - - def my_quantity_function(halo): - # Define quantity to return - quantity = 5 - - return quantity - - # Add your filter to the filter registry - add_quantity('my_quantity', my_quantity_function) - - - # ... Later on in your script - hc.add_quantity("my_quantity") - -This quantity will then be accessible for functions called later via the -*quantities* dictionary that is associated with the halo object. - -.. code-block:: python - - def my_new_function(halo): - print(halo.quantities["my_quantity"]) - add_callback("print_quantity", my_new_function) - - # ... Anywhere after "my_quantity" has been called - hc.add_callback("print_quantity") - -.. _halo_catalog_callbacks: - -Callbacks -^^^^^^^^^ - -A callback is actually the super class for quantities and filters and -is a general purpose function that does something, anything, to a Halo -object. This can include hanging new attributes off the Halo object, -performing analysis and writing to disk, etc. A callback does not return -anything. - -An example of using a pre-defined callback where we create a sphere for -each halo with a radius that is twice the saved ``radius``. - -.. code-block:: python - - hc.add_callback("sphere", factor=2.0) - -Currently available callbacks are located in -``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may -be added by using the syntax shown below. If you think that your -callback may be of use to the general community, add it to -halo_callbacks.py and issue a pull request. - -An example of defining your own callback: - -.. code-block:: python - - def my_callback_function(halo): - # Perform some callback actions here - x = 2 - halo.x_val = x - - # Add the callback to the callback registry - add_callback('my_callback', my_callback_function) - - - # ... Later on in your script - hc.add_callback("my_callback") - -.. _halo_catalog_recipes: - -Recipes -^^^^^^^ - -Recipes allow you to create analysis tasks that consist of a series of -callbacks, quantities, and filters that are run in succession. An example -of this is -:func:`~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities`, -which calculates virial quantities by first creating a sphere container, -performing 1D radial profiles, and then interpolating to get values at a -specified threshold overdensity. All of these operations are separate -callbacks, but the recipes allow you to add them to your analysis pipeline -with one call. For example, - -.. code-block:: python - - hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"]) - -The available recipes are located in -``yt/analysis_modules/halo_analysis/halo_recipes.py``. New recipes can be -created in the following manner: - -.. code-block:: python - - def my_recipe(halo_catalog, fields, weight_field=None): - # create a sphere - halo_catalog.add_callback("sphere") - # make profiles - halo_catalog.add_callback("profile", ["radius"], fields, - weight_field=weight_field) - # save the profile data - halo_catalog.add_callback("save_profiles", output_dir="profiles") - - # add recipe to the registry of recipes - add_recipe("profile_and_save", my_recipe) - - - # ... Later on in your script - hc.add_recipe("profile_and_save", ["density", "temperature"], - weight_field="cell_mass") - -Note, that unlike callback, filter, and quantity functions that take a ``Halo`` -object as the first argument, recipe functions should take a ``HaloCatalog`` -object as the first argument. - -Running the Pipeline --------------------- - -After all callbacks, quantities, and filters have been added, the -analysis begins with a call to HaloCatalog.create. - -.. code-block:: python - - hc.create() - -The save_halos keyword determines whether the actual Halo objects -are saved after analysis on them has completed or whether just the -contents of their quantities dicts will be retained for creating the -final catalog. The looping over halos uses a call to parallel_objects -allowing the user to control how many processors work on each halo. -The final catalog is written to disk in the output directory given -when the -:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` -object was created. - -All callbacks, quantities, and filters are stored in an actions list, -meaning that they are executed in the same order in which they were added. -This enables the use of simple, reusable, single action callbacks that -depend on each other. This also prevents unnecessary computation by allowing -the user to add filters at multiple stages to skip remaining analysis if it -is not warranted. - -Saving and Reloading Halo Catalogs ----------------------------------- - -A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` -saved to disk can be reloaded as a yt dataset with the -standard call to ``yt.load``. See :ref:`halocatalog` for a demonstration -of loading and working only with the catalog. -Any side data, such as profiles, can be reloaded -with a ``load_profiles`` callback and a call to -:func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`. - -.. code-block:: python - - hds = yt.load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5") - hc = HaloCatalog(halos_ds=hds, - output_dir="halo_catalogs/catalog_0046") - hc.add_callback("load_profiles", output_dir="profiles", - filename="virial_profiles") - hc.load() - -Halo Catalog in Action ----------------------- - -For a full example of how to use these methods together see -:ref:`halo-analysis-example`. diff --git a/doc/source/analyzing/analysis_modules/halo_mass_function.rst b/doc/source/analyzing/analysis_modules/halo_mass_function.rst deleted file mode 100644 index 33cb7aa9057..00000000000 --- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. _halo_mass_function: - -.. note:: - - This module has been deprecated as it no longer functions correctly and is - unmaintained. The code has been moved to the `yt attic - `__. If you'd like to take it - over, please do! - -Halo Mass Function -================== - -The Halo Mass Function extension is capable of outputting the halo mass function -for a collection halos (input), and/or an analytical fit over a given mass range -for a set of specified cosmological parameters. -This extension is based on code generously provided by Brian O'Shea. - -General Overview ----------------- - -A halo mass function can be created for the halos identified in a cosmological -simulation, as well as analytic fits using any arbitrary set of cosmological -parameters. In order to create a mass function for simulated halos, they must -first be identified (using HOP, FOF, or Rockstar, see -:ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of -halo masses will then be found, and can be compared to the analytic prediction -at the same redshift and using the same cosmological parameters as were used -in the simulation. Care should be taken in this regard, as the analytic fit -requires the specification of cosmological parameters that are not necessarily -stored in the halo or simulation datasets, and must be specified by the user. -Efforts have been made to set reasonable defaults for these parameters, but -setting them to identically match those used in the simulation will produce a -much better comparison. - -Analytic halo mass functions can also be created without a halo dataset by -providing either a simulation dataset or specifying cosmological parameters by -hand. yt includes 5 analytic fits for the halo mass function which can be -selected. - - -Analytical Fits ---------------- - -There are five analytical fits to choose from. - - 1. `Press-Schechter (1974) `_ - 2. `Jenkins (2001) `_ - 3. `Sheth-Tormen (2002) `_ - 4. `Warren (2006) `_ - 5. `Tinker (2008) `_ - -We encourage reading each of the primary sources. -In general, we recommend the Warren fitting function because it matches -simulations over a wide range of masses very well. -The Warren fitting function is the default (equivalent to not specifying -``fitting_function`` in ``HaloMassFcn()``, below). -The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which -appears to fit HOP threshold=80.0 fairly well. - - -Basic Halo Mass Function Creation ---------------------------------- - -The simplest way to create a halo mass function object is to simply pass it no -arguments and let it use the default cosmological parameters. - -.. code-block:: python - - from yt.analysis_modules.halo_mass_function.api import * - - hmf = HaloMassFcn() - -This will create a HaloMassFcn object off of which arrays holding the information -about the analytic mass function hang. Creating the halo mass function for a set -of simulated halos requires only the loaded halo dataset to be passed as an -argument. This also creates the analytic mass function using all parameters that -can be extracted from the halo dataset, at the same redshift, spanning a similar -range of halo masses. - -.. code-block:: python - - from yt.mods import * - from yt.analysis_modules.halo_mass_function.api import * - - my_halos = load("rockstar_halos/halos_0.0.bin") - hmf = HaloMassFcn(halos_ds=my_halos) - -A simulation dataset can be passed along with additional cosmological parameters -to create an analytic mass function. - -.. code-block:: python - - from yt.mods import * - from yt.analysis_modules.halo_mass_function.api import * - - my_ds = load("RD0027/RedshiftOutput0027") - hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, - sigma8 = 0.8, log_mass_min=5, log_mass_max=9) - -The analytic mass function can be created for a set of arbitrary cosmological -parameters without any dataset being passed as an argument. - -.. code-block:: python - - from yt.mods import * - from yt.analysis_modules.halo_mass_function.api import * - - hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, - omega_lambda0=0.73, hubble0=0.7, this_redshift=10, - log_mass_min=5, log_mass_max=9, fitting_function=5) - -Keyword Arguments ------------------ - -* **simulation_ds** (*Simulation dataset object*) - The loaded simulation dataset, used to set cosmological parameters. - Default : None. - -* **halos_ds** (*Halo dataset object*) - The halos from a simulation to be used for creation of the - halo mass function in the simulation. - Default : None. - -* **make_analytic** (*bool*) - Whether or not to calculate the analytic mass function to go with - the simulated halo mass function. Automatically set to true if a - simulation dataset is provided. - Default : True. - -* **omega_matter0** (*float*) - The fraction of the universe made up of matter (dark and baryonic). - Default : 0.2726. - -* **omega_lambda0** (*float*) - The fraction of the universe made up of dark energy. - Default : 0.7274. - -* **omega_baryon0** (*float*) - The fraction of the universe made up of baryonic matter. This is not - always stored in the dataset and should be checked by hand. - Default : 0.0456. - -* **hubble0** (*float*) - The expansion rate of the universe in units of 100 km/s/Mpc. - Default : 0.704. - -* **sigma8** (*float*) - The amplitude of the linear power spectrum at z=0 as specified by - the rms amplitude of mass-fluctuations in a top-hat sphere of radius - 8 Mpc/h. This is not always stored in the dataset and should be - checked by hand. - Default : 0.86. - -* **primordial_index** (*float*) - This is the index of the mass power spectrum before modification by - the transfer function. A value of 1 corresponds to the scale-free - primordial spectrum. This is not always stored in the dataset and - should be checked by hand. - Default : 1.0. - -* **this_redshift** (*float*) - The current redshift. - Default : 0. - -* **log_mass_min** (*float*) - The log10 of the mass of the minimum of the halo mass range. This is - set automatically by the range of halo masses if a simulated halo - dataset is provided. If a halo dataset if not provided and no value - is specified, it will be set to 5. Units: M_solar - Default : None. - -* **log_mass_max** (*float*) - The log10 of the mass of the maximum of the halo mass range. This is - set automatically by the range of halo masses if a simulated halo - dataset is provided. If a halo dataset if not provided and no value - is specified, it will be set to 16. Units: M_solar - Default : None. - -* **num_sigma_bins** (*float*) - The number of bins (points) to use for the calculation of the - analytic mass function. - Default : 360. - -* **fitting_function** (*int*) - Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, - 3 = Sheth-Tormen, 4 = Warren, 5 = Tinker - Default : 4. - -Outputs -------- - -A HaloMassFnc object has several arrays hanging off of it containing the - -* **masses_sim**: Halo masses from simulated halos. Units: M_solar - -* **n_cumulative_sim**: Number density of halos with mass greater than the - corresponding mass in masses_sim. Units: comoving Mpc^-3 - -* **masses_analytic**: Masses used for the generation of the analytic mass - function. Units: M_solar - -* **n_cumulative_analytic**: Number density of halos with mass greater then - the corresponding mass in masses_analytic. Units: comoving Mpc^-3 - -* **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM. - -After the mass function has been created for both simulated halos and the -corresponding analytic fits, they can be plotted though something along the -lines of - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_mass_function.api import * - import matplotlib.pyplot as plt - - my_halos = yt.load("rockstar_halos/halos_0.0.bin") - hmf = HaloMassFcn(halos_ds=my_halos) - - plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim) - plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic) - -Attached to ``hmf`` is the convenience function ``write_out``, which saves the -halo mass function to a text file. (continued from above) -.. code-block:: python - - hmf.write_out(prefix='hmf', analytic=True, simulated=True) - -This writes the files ``hmf-analytic.dat`` with columns: - -* mass [Msun] -* cumulative number density of halos [comoving Mpc^-3] -* (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3] - -and the file ``hmf-simulated.dat`` with columns: - -* mass [Msun] -* cumulative number density of halos [comoving Mpc^-3] diff --git a/doc/source/analyzing/analysis_modules/halo_merger_tree.rst b/doc/source/analyzing/analysis_modules/halo_merger_tree.rst deleted file mode 100644 index c66e32248b7..00000000000 --- a/doc/source/analyzing/analysis_modules/halo_merger_tree.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _merger_tree: - -Halo Merger Tree -================ - -The ``yt`` merger tree was removed as of :code:`yt-3.0`. This -functionality can still be found in :code:`yt-2.x`. However, -the recommended option is to use the -`ytree `_ package, which can be -installed via pip: - -.. code-block:: bash - - pip install ytree - -For more information on ``ytree``, see the documentation -`here `__. diff --git a/doc/source/analyzing/analysis_modules/halo_transition.rst b/doc/source/analyzing/analysis_modules/halo_transition.rst deleted file mode 100644 index 507595c9cdb..00000000000 --- a/doc/source/analyzing/analysis_modules/halo_transition.rst +++ /dev/null @@ -1,111 +0,0 @@ -.. _halo-transition: - -Transitioning From yt-2 to yt-3 -=============================== - -If you're used to halo analysis in yt-2.x, here's a guide to -how to update your analysis pipeline to take advantage of -the new halo catalog infrastructure. If you're starting -from scratch, see :ref:`halo_catalog`. - -Finding Halos -------------- - -Previously, halos were found using calls to ``HaloFinder``, -``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is -encouraged that you find the halos upon creation of the halo catalog -by supplying a value to the ``finder_method`` keyword when calling -``HaloCatalog``. Currently, only halos found using rockstar or a -previous instance of a halo catalog are able to be loaded -using the ``halos_ds`` keyword. - -To pass additional arguments to the halo finders -themselves, supply a dictionary to ``finder_kwargs`` where -each key in the dictionary is a keyword of the halo finder -and the corresponding value is the value to be passed for -that keyword. - -Getting Halo Information ------------------------- -All quantities that used to be present in a ``halo_list`` are -still able to be found but are not necessarily included by default. -Every halo will by default have the following properties: - -* particle_position_i (where i can be x,y,z) -* particle_mass -* virial_radius -* particle_identifier - -If other quantities are desired, they can be included by adding -the corresponding quantity before the catalog is created. See -the full halo catalog documentation for further information about -how to add these quantities and what quantities are available. - -You no longer have to iterate over halos in the ``halo_list``. -Now a halo dataset can be treated as a regular dataset and -all quantities are available by accessing ``all_data``. -Specifically, all quantities can be accessed as shown: - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_analysis.api import HaloCatalog - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, finder_method='hop') - hc.create() - ad = hc.halos_ds.all_data() - masses = ad['particle_mass'][:] - - -Prefiltering Halos ------------------- - -Prefiltering halos before analysis takes place is now done -by adding a filter before the call to create. An example -is shown below - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_analysis.api import HaloCatalog - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, finder_method='hop') - hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun") - hc.create() - -Profiling Halos ---------------- - -The halo profiler available in yt-2.x has been removed, and -profiling functionality is now completely contained within the -halo catalog. A complete example of how to profile halos by -radius using the new infrastructure is given in -:ref:`halo-analysis-example`. - -Plotting Halos --------------- - -Annotating halo locations onto a slice or projection works in -the same way as in yt-2.x, but now a halo catalog must be -passed to the annotate halo call rather than a halo list. - -.. code-block:: python - - import yt - from yt.analysis_modules.halo_analysis.api import HaloCatalog - - data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - hc = HaloCatalog(data_ds=data_ds, finder_method='hop') - hc.create() - - prj = yt.ProjectionPlot(data_ds, 'z', 'density') - prj.annotate_halos(hc) - prj.save() - -Written Data ------------- - -Data is now written out in the form of h5 files rather than -text files. The directory they are written out to is -controlled by the keyword ``output_dir``. Each quantity -is a field in the file. diff --git a/doc/source/analyzing/analysis_modules/index.rst b/doc/source/analyzing/analysis_modules/index.rst deleted file mode 100644 index 23d7a2c48ef..00000000000 --- a/doc/source/analyzing/analysis_modules/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _analysis-modules: - -Topic-Specific Analysis Modules -=============================== - -These semi-autonomous analysis modules are unique to specific subject matter -like tracking halos, generating synthetic observations, exporting output to -external visualization routines, and more. Because they are somewhat -specialized, they exist in their own corners of yt, and they do not get loaded -by default when you :code:`import yt`. Read up on these advanced tools below. - -.. toctree:: - :maxdepth: 2 - - cosmology_calculator - halo_analysis - synthetic_observation - exporting - two_point_functions - clump_finding diff --git a/doc/source/analyzing/analysis_modules/light_cone_generator.rst b/doc/source/analyzing/analysis_modules/light_cone_generator.rst deleted file mode 100644 index 0de16608277..00000000000 --- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _light-cone-generator: - -Light Cone Generator -==================== - -Light cones are created by stacking multiple datasets together to -continuously span a given redshift interval. To make a projection of a -field through a light cone, the width of individual slices is adjusted -such that each slice has the same angular size. -Each slice is randomly shifted and projected along a random axis to -ensure that the same structures are not sampled multiple times. A -recipe for creating a simple light cone projection can be found in -the cookbook under :ref:`cookbook-light_cone`. - -.. image:: _images/LightCone_full_small.png - :width: 500 - -A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from -z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual -slices. The panels shows the contributions from the 9 individual slices with -the final light cone image shown in the bottom, right. - -Configuring the Light Cone Generator ------------------------------------- - -The required arguments to instantiate a -:class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone` -object are the path to the simulation parameter file, the simulation type, the -nearest redshift, and the furthest redshift of the light cone. - -.. code-block:: python - - from yt.analysis_modules.cosmological_observation.api import \ - LightCone - - lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo', - 'Enzo', 0., 0.1) - -The additional keyword arguments are: - -* ``use_minimum_datasets`` (*bool*): If True, the minimum number of - datasets is used to connect the initial and final redshift. If False, - the light cone solution will contain as many entries as possible within - the redshift interval. Default: True. - -* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between - consecutive datasets in the returned list. Default: 0.0. - -* ``minimum_coherent_box_fraction`` (*float*): Used with - ``use_minimum_datasets`` set to False, this parameter specifies the - fraction of the total box size to be traversed before rerandomizing the - projection axis and center. This was invented to allow light cones with - thin slices to sample coherent large scale structure, but in practice does - not work so well. Try setting this parameter to 1 and see what happens. - Default: 0.0. - -* ``time_data`` (*bool*): Whether or not to include time outputs when - gathering datasets for time series. Default: True. - -* ``redshift_data`` (*bool*): Whether or not to include redshift outputs - when gathering datasets for time series. Default: True. - -* ``set_parameters`` (*dict*): Dictionary of parameters to attach to - ds.parameters. Default: None. - -* ``output_dir`` (*string*): The directory in which images and data files - will be written. Default: 'LC'. - -* ``output_prefix`` (*string*): The prefix of all images and data files. - Default: 'LightCone'. - -Creating Light Cone Solutions ------------------------------ - -A light cone solution consists of a list of datasets spanning a redshift -interval with a random orientation for each dataset. A new solution -is calculated with the -:func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution` -function: - -.. code-block:: python - - lc.calculate_light_cone_solution(seed=123456789, filename='lightcone.dat') - -The keyword argument are: - -* ``seed`` (*int*): the seed for the random number generator. Any light - cone solution can be reproduced by giving the same random seed. - Default: None. - -* ``filename`` (*str*): if given, a text file detailing the solution will be - written out. Default: None. - -Making a Light Cone Projection ------------------------------- - -With the light cone solution in place, projections with a given field of -view and resolution can be made of any available field: - -.. code-block:: python - - field = 'density' - field_of_view = (600.0, "arcmin") - resolution = (60.0, "arcsec") - lc.project_light_cone(field_of_vew, resolution, - field , weight_field=None, - save_stack=True, - save_slice_images=True) - -The field of view and resolution can be specified either as a tuple of -value and unit string or as a unitful ``YTQuantity``. -Additional keyword arguments: - -* ``weight_field`` (*str*): the weight field of the projection. This has - the same meaning as in standard projections. Default: None. - -* ``photon_field`` (*bool*): if True, the projection data for each slice is - decremented by 4 pi R :superscript:`2` , where R is the luminosity - distance between the observer and the slice redshift. Default: False. - -* ``save_stack`` (*bool*): if True, the unflatted light cone data including - each individual slice is written to an hdf5 file. Default: True. - -* ``save_final_image`` (*bool*): if True, save an image of the final light - cone projection. Default: True. - -* ``save_slice_images`` (*bool*): save images for each individual projection - slice. Default: False. - -* ``cmap_name`` (*string*): color map for images. Default: "algae". - -* ``njobs`` (*int*): The number of parallel jobs over which the light cone - projection will be split. Choose -1 for one processor per individual - projection and 1 to have all processors work together on each projection. - Default: 1. - -* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the - projections. Default: False. - -.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist. These are still available in :code:`yt-2.x`. If you would like to use these features in :code:`yt-3.x`, help is needed to port them over. Contact the yt-users mailing list if you are interested in doing this. diff --git a/doc/source/analyzing/analysis_modules/light_ray_generator.rst b/doc/source/analyzing/analysis_modules/light_ray_generator.rst deleted file mode 100644 index 6a638e8b641..00000000000 --- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst +++ /dev/null @@ -1,235 +0,0 @@ -.. _light-ray-generator: - -Light Ray Generator -=================== - -.. note:: - - Development of the LightRay module has been moved to the Trident - package. This version is deprecated and will be removed from yt - in a future release. See https://github.com/trident-project/trident - for further information. - -Light rays are similar to light cones (:ref:`light-cone-generator`) in how -they stack multiple datasets together to span a redshift interval. Unlike -light cones, which stack randomly oriented projections from each -dataset to create synthetic images, light rays use thin pencil beams to -simulate QSO sight lines. A sample script can be found in the cookbook -under :ref:`cookbook-light_ray`. - -.. image:: _images/lightray.png - -A ray segment records the information of all grid cells intersected by the -ray as well as the path length, ``dl``, of the ray through the cell. Column -densities can be calculated by multiplying physical densities by the path -length. - -Configuring the Light Ray Generator ------------------------------------ - -Below follows the creation of a light ray from multiple datasets stacked -together. However, a light ray can also be made from a single dataset. -For an example of this, see :ref:`cookbook-single-dataset-light-ray`. - -The arguments required to instantiate a -:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay` -object are the same as -those required for a -:class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone` -object: the simulation parameter file, the -simulation type, the nearest redshift, and the furthest redshift. - -.. code-block:: python - - from yt.analysis_modules.cosmological_observation.api import LightRay - lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", - simulation_type="Enzo", - near_redshift=0.0, far_redshift=0.1) - -Additional keyword arguments are: - -* ``use_minimum_datasets`` (*bool*): If True, the minimum number of datasets - is used to connect the initial and final redshift. If false, the light - ray solution will contain as many entries as possible within the redshift - interval. Default: True. - -* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between - consecutive datasets in the returned list. Default: 0.0. - -* ``max_box_fraction`` (*float*): In terms of the size of the domain, the - maximum length a light ray segment can be in order to span the redshift interval - from one dataset to another. If using a zoom-in simulation, this parameter can - be set to the length of the high resolution region so as to limit ray segments - to that size. If the high resolution region is not cubical, the smallest side - should be used. Default: 1.0 (the size of the box) - -* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum - length of a ray, in terms of the size of the domain, before the trajectory - is re-randomized. Set to 0 to have ray trajectory randomized for every - dataset. Set to np.inf (infinity) to use a single trajectory for the - entire ray. Default: 0.0. - -* ``time_data`` (*bool*): Whether or not to include time outputs when - gathering datasets for time series. Default: True. - -* ``redshift_data`` (*bool*): Whether or not to include redshift outputs - when gathering datasets for time series. Default: True. - -Making Light Ray Data ---------------------- - -Once the LightRay object has been instantiated, the -:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray` -function will trace out the rays in each dataset and collect information for all the -fields requested. The output file will be an HDF5 file containing all the -cell field values for all the cells that were intersected by the ray. A -single LightRay object can be used over and over to make multiple -randomizations, simply by changing the value of the random seed with the -``seed`` keyword. - -.. code-block:: python - - lr.make_light_ray(seed=8675309, - fields=['temperature', 'density'], - use_peculiar_velocity=True) - -The keyword arguments are: - -* ``seed`` (*int*): Seed for the random number generator. Default: None. - -* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic - boundaries. If False, ray trajectories will not be periodic. Default : True. - -* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the - region in which rays are to be generated. If None, the left edge will be - that of the domain. Default: None. - -* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of - the region in which rays are to be generated. If None, the right edge - will be that of the domain. Default: None. - -* ``min_level`` (*int*): The minimum refinement level of the spatial region in - which the ray passes. This can be used with zoom-in simulations where the - high resolution region does not keep a constant geometry. Default: None. - -* ``start_position`` (*list* of floats): Used only if creating a light ray - from a single dataset. The coordinates of the starting position of the - ray. Default: None. - -* ``end_position`` (*list* of floats): Used only if creating a light ray - from a single dataset. The coordinates of the ending position of the ray. - Default: None. - -* ``trajectory`` (*list* of floats): Used only if creating a light ray - from a single dataset. The (r, theta, phi) direction of the light ray. - Use either ``end_position`` or ``trajectory``, not both. - Default: None. - -* ``fields`` (*list*): A list of fields for which to get data. - Default: None. - -* ``solution_filename`` (*string*): Path to a text file where the - trajectories of each subray is written out. Default: None. - -* ``data_filename`` (*string*): Path to output file for ray data. - Default: None. - -* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from - the peculiar velocity of gas along the ray is calculated and added to the - cosmological redshift as the "effective" redshift. - Default: True. - -* ``redshift`` (*float*): Used with light rays made from single datasets to - specify a starting redshift for the ray. If not used, the starting - redshift will be 0 for a non-cosmological dataset and the dataset redshift - for a cosmological dataset. Default: None. - -* ``njobs`` (*int*): The number of parallel jobs over which the slices for - the halo mask will be split. Choose -1 for one processor per individual - slice and 1 to have all processors work together on each projection. - Default: 1 - -Useful Tips for Making LightRays --------------------------------- - -Below are some tips that may come in handy for creating proper LightRays. - -How many snapshots do I need? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The number of snapshots required to traverse some redshift interval depends -on the simulation box size and cosmological parameters. Before running an -expensive simulation only to find out that you don't have enough outputs -to span the redshift interval you want, have a look at -:ref:`planning-cosmology-simulations`. The functionality described there -will allow you to calculate the precise number of snapshots and specific -redshifts at which they should be written. - -My snapshots are too far apart! -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``max_box_fraction`` keyword, provided when creating the `Lightray`, -allows the user to control how long a ray segment can be for an -individual dataset. Be default, the `LightRay` generator will try to -make segments no longer than the size of the box to avoid sampling the -same structures more than once. However, this can be increased in the -case that the redshift interval between datasets is longer than the -box size. Increasing this value should be done with caution as longer -ray segments run a greater risk of coming back to somewhere near their -original position. - -What if I have a zoom-in simulation? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A zoom-in simulation has a high resolution region embedded within a -larger, low resolution volume. In this type of simulation, it is likely -that you will want the ray segments to stay within the high resolution -region. To do this, you must first specify the size of the high -resolution region when creating the `LightRay` using the -``max_box_fraction`` keyword. This will make sure that -the calculation of the spacing of the segment datasets only takes into -account the high resolution region and not the full box size. If your -high resolution region is not a perfect cube, specify the smallest side. -Then, in the call to -:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`, -use the ``left_edge`` and ``right_edge`` keyword arguments to specify the -precise location of the high resolution region. - -Technically speaking, the ray segments should no longer be periodic -since the high resolution region is only a sub-volume within the -larger domain. To make the ray segments non-periodic, set the -``periodic`` keyword to False. The LightRay generator will continue -to generate randomly oriented segments until it finds one that fits -entirely within the high resolution region. If you have a high -resolution region that can move and change shape slightly as structure -forms, use the `min_level` keyword to mandate that the ray segment only -pass through cells that are refined to at least some minimum level. - -If the size of the high resolution region is not large enough to -span the required redshift interval, the `LightRay` generator can -be configured to treat the high resolution region as if it were -periodic simply by setting the ``periodic`` keyword to True. This -option should be used with caution as it will lead to the creation -of disconnected ray segments within a single dataset. - -I want a continuous trajectory over the entire ray. -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set the ``minimum_coherent_box_fraction`` keyword argument to a very -large number, like infinity (`numpy.inf`). - -.. note:: - - As of :code:`yt-3.0`, the functionality for recording properties of - the nearest halo to each element of the ray no longer exists. This - is still available in :code:`yt-2.x`. If you would like to use this - feature in :code:`yt-3.x`, help is needed to port it over. Contact - the yt-users mailing list if you are interested in doing this. - -What Can I do with this? ------------------------- - -Once you have created a `LightRay`, you can use it to generate an -:ref:`absorption_spectrum`. In addition, you can use the -:class:`~yt.visualization.plot_modifications.RayCallback` to -:ref:`annotate-ray` on your plots. diff --git a/doc/source/analyzing/analysis_modules/photon_simulator.rst b/doc/source/analyzing/analysis_modules/photon_simulator.rst deleted file mode 100644 index c1dcf1d0c6f..00000000000 --- a/doc/source/analyzing/analysis_modules/photon_simulator.rst +++ /dev/null @@ -1,666 +0,0 @@ -.. _photon_simulator: - -Constructing Mock X-ray Observations ------------------------------------- - -.. warning:: - - The ``photon_simulator`` analysis module has been deprecated; it is - no longer being updated, and it will be removed in a future version - of yt. Users are encouraged to download and use the - `pyXSIM `_ package - instead. - -.. note:: - - If you just want to create derived fields for X-ray emission, - you should go `here `_ instead. - -The ``photon_simulator`` analysis module enables the creation of -simulated X-ray photon lists of events from datasets that yt is able -to read. The simulated events then can be exported to X-ray telescope -simulators to produce realistic observations or can be analyzed in-line. - -For detailed information about the design of the algorithm in yt, check -out `the SciPy 2014 Proceedings. `_. - -The algorithm is based off of that implemented in -`PHOX `_ for SPH datasets -by Veronica Biffi and Klaus Dolag. There are two relevant papers: - -`Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, -3545 `_ - -`Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, -1395 `_ - -The basic procedure is as follows: - -1. Using a spectral model for the photon flux given the gas properties, - and an algorithm for generating photons from the dataset loaded in - yt, produce a large number of photons in three-dimensional space - associated with the cells of the dataset. -2. Use this three-dimensional dataset as a sample from which to generate - photon events that are projected along a line of sight, Doppler and - cosmologically shifted, and absorbed by the Galactic foreground. -3. Optionally convolve these photons with instrument responses and - produce images and spectra. - -We'll demonstrate the functionality on a realistic dataset of a galaxy -cluster to get you started. - -.. note:: - - Currently, the ``photon_simulator`` analysis module only works with grid-based - data. - -Creating an X-ray observation of a dataset on disk -++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. code:: python - - import yt - #yt.enable_parallelism() # If you want to run in parallel this should go here! - from yt.analysis_modules.photon_simulator.api import * - from yt.utilities.cosmology import Cosmology - -.. note:: - - For parallel runs using ``mpi4py``, the call to ``yt.enable_parallelism`` should go *before* - the import of the ``photon_simulator`` module, as shown above. - -We're going to load up an Athena dataset of a galaxy cluster core: - -.. code:: python - - ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk", - units_override={"time_unit":(1.0,"Myr"), - "length_unit":(1.0,"Mpc"), - "mass_unit":(1.0e14,"Msun")}) - -First, to get a sense of what the resulting image will look like, let's -make a new yt field called ``"density_squared"``, since the X-ray -emission is proportional to :math:`\rho^2`, and a weak function of -temperature and metallicity. - -.. code:: python - - def _density_squared(field, data): - return data["density"]**2 - ds.add_field("density_squared", function=_density_squared, units="g**2/cm**6") - -Then we'll project this field along the z-axis. - -.. code:: python - - prj = yt.ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc")) - prj.set_cmap("density_squared", "gray_r") - prj.show() - -.. image:: _images/dsquared.png - -In this simulation the core gas is sloshing, producing spiral-shaped -cold fronts. - -.. note:: - - To work out the following examples, you should install - `AtomDB `_ and get the files from the - `xray_data `_ auxiliary - data package (see the :ref:`xray_data_README` for details on the latter). - Make sure that in what follows you specify the full path to the locations - of these files. - -To generate photons from this dataset, we have several different things -we need to set up. The first is a standard yt data object. It could -be all of the cells in the domain, a rectangular solid region, a -cylindrical region, etc. Let's keep it simple and make a sphere at the -center of the domain, with a radius of 250 kpc: - -.. code:: python - - sp = ds.sphere("c", (250., "kpc")) - -This will serve as our ``data_source`` that we will use later. Next, we -need to create the ``SpectralModel`` instance that will determine how -the data in the grid cells will generate photons. By default, two -options are available. The first, ``XSpecThermalModel``, allows one to -use any thermal model that is known to -`XSPEC `_, such as -``"mekal"`` or ``"apec"``: - -.. code:: python - - mekal_model = XSpecThermalModel("mekal", 0.01, 10.0, 2000) - -This requires XSPEC and -`PyXspec `_ to -be installed. The second option, ``TableApecModel``, utilizes the data -from the `AtomDB `_ tables. We'll use this one -here: - -.. code:: python - - apec_model = TableApecModel("$SPECTRAL_DATA/spectral", - 0.01, 20.0, 20000, - thermal_broad=False, - apec_vers="2.0.2") - -The first argument sets the location of the AtomDB files, and the next -three arguments determine the minimum energy in keV, maximum energy in -keV, and the number of linearly-spaced bins to bin the spectrum in. If -the optional keyword ``thermal_broad`` is set to ``True``, the spectral -lines will be thermally broadened. - -.. note:: - - ``SpectralModel`` objects based on XSPEC models (both the thermal - emission and Galactic absorption models mentioned below) only work - in Python 2.7, since currently PyXspec only works with Python 2.x. - -Now that we have our ``SpectralModel`` that gives us a spectrum, we need -to connect this model to a ``PhotonModel`` that will connect the field -data in the ``data_source`` to the spectral model to actually generate -photons. For thermal spectra, we have a special ``PhotonModel`` called -``ThermalPhotonModel``: - -.. code:: python - - thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3, - photons_per_chunk=100000000, - method="invert_cdf") - -Where we pass in the ``SpectralModel``, and can optionally set values for -the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If -``Z_met`` is a float, it will assume that value for the metallicity -everywhere in terms of the solar metallicity. If it is a string, it will -assume that is the name of the metallicity field (which may be spatially -varying). - -The ``ThermalPhotonModel`` iterates over "chunks" of the supplied data source -to generate the photons, to reduce memory usage and make parallelization more -efficient. For each chunk, memory is set aside for the photon energies that will -be generated. ``photons_per_chunk`` is an optional keyword argument which controls -the size of this array. For large numbers of photons, you may find that -this parameter needs to be set higher, or if you are looking to decrease memory -usage, you might set this parameter lower. - -The ``method`` keyword argument is also optional, and determines how the individual -photon energies are generated from the spectrum. It may be set to one of two values: - -* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert - it, using uniformly drawn random numbers to determine the photon energies (fast, but relies - on construction of the CDF and interpolation between the points, so for some spectra it - may not be accurate enough). -* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection - technique (accurate, but likely to be slow). - -``method="invert_cdf"`` (the default) should be sufficient for most cases. - -Next, we need to specify "fiducial" values for the telescope collecting -area, exposure time, and cosmological redshift. Remember, the initial -photon generation will act as a source for Monte-Carlo sampling for more -realistic values of these parameters later, so choose generous values so -that you have a large number of photons to sample from. We will also -construct a ``Cosmology`` object: - -.. code:: python - - A = 3000. - exp_time = 4.0e5 - redshift = 0.05 - cosmo = Cosmology() - -Now, we finally combine everything together and create a ``PhotonList`` -instance: - -.. code:: python - - photons = PhotonList.from_scratch(sp, redshift, A, exp_time, - thermal_model, center="c", - cosmology=cosmo) - -By default, the angular diameter distance to the object is determined -from the ``cosmology`` and the cosmological ``redshift``. If a -``Cosmology`` instance is not provided, one will be made from the -default cosmological parameters. The ``center`` keyword argument specifies -the center of the photon distribution, and the photon positions will be -rescaled with this value as the origin. This argument accepts the following -values: - -* A NumPy array or list corresponding to the coordinates of the center in - units of code length. -* A ``YTArray`` corresponding to the coordinates of the center in some - length units. -* ``"center"`` or ``"c"`` corresponds to the domain center. -* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. -* A two-element tuple specifying the max or min of a specific field, e.g., - ``("min","gravitational_potential")``, ``("max","dark_matter_density")`` - -If ``center`` is not specified, ``from_scratch`` will attempt to use the -``"center"`` field parameter of the ``data_source``. - -``from_scratch`` takes a few other optional keyword arguments. If your -source is local to the galaxy, you can set its distance directly, using -a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and -``cosmology`` will be ignored. Finally, if the photon generating -function accepts any parameters, they can be passed to ``from_scratch`` -via a ``parameters`` dictionary. - -At this point, the ``photons`` are distributed in the three-dimensional -space of the ``data_source``, with energies in the rest frame of the -plasma. Doppler and/or cosmological shifting of the photons will be -applied in the next step. - -The ``photons`` can be saved to disk in an HDF5 file: - -.. code:: python - - photons.write_h5_file("my_photons.h5") - -Which is most useful if it takes a long time to generate the photons, -because a ``PhotonList`` can be created in-memory from the dataset -stored on disk: - -.. code:: python - - photons = PhotonList.from_file("my_photons.h5") - -This enables one to make many simulated event sets, along different -projections, at different redshifts, with different exposure times, and -different instruments, with the same ``data_source``, without having to -do the expensive step of generating the photons all over again! - -To get a set of photon events such as that observed by X-ray telescopes, -we need to take the three-dimensional photon distribution and project it -along a line of sight. Also, this is the step at which we put in the -realistic values for the telescope collecting area, cosmological -redshift and/or source distance, and exposure time. The order of -operations goes like this: - -1. From the adjusted exposure time, redshift and/or source distance, and - telescope collecting area, determine the number of photons we will - *actually* observe. -2. Determine the plane of projection from the supplied normal vector, - and reproject the photon positions onto this plane. -3. Doppler-shift the photon energies according to the velocity along the - line of sight, and apply cosmological redshift if the source is not - local. -4. Optionally, alter the received distribution of photons via an - energy-dependent galactic absorption model. -5. Optionally, alter the received distribution of photons using an - effective area curve provided from an ancillary response file (ARF). -6. Optionally, scatter the photon energies into channels according to - the information from a redistribution matrix file (RMF). - -First, if we want to apply galactic absorption, we need to set up a -spectral model for the absorption coefficient, similar to the spectral -model for the emitted photons we set up before. Here again, we have two -options. The first, ``XSpecAbsorbModel``, allows one to use any -absorption model that XSpec is aware of that takes only the Galactic -column density :math:`N_H` as input: - -.. code:: python - - N_H = 0.1 - abs_model = XSpecAbsorbModel("wabs", N_H) - -The second option, ``TableAbsorbModel``, takes as input an HDF5 file -containing two datasets, ``"energy"`` (in keV), and ``"cross_section"`` -(in :math:`cm^2`), and the Galactic column density :math:`N_H`: - -.. code:: python - - abs_model = TableAbsorbModel("tbabs_table.h5", 0.1) - -Now we're ready to project the photons. First, we choose a line-of-sight -vector ``normal``. Second, we'll adjust the exposure time and the redshift. -Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll -specify a ``sky_center`` in RA and DEC on the sky in degrees. - -Also, we're going to convolve the photons with instrument ``responses``. -For this, you need a ARF/RMF pair with matching energy bins. This is of -course far short of a full simulation of a telescope ray-trace, but it's -a quick-and-dirty way to get something close to the real thing. We'll -discuss how to get your simulated events into a format suitable for -reading by telescope simulation codes later. If you just want to convolve -the photons with an ARF, you may specify that as the only response, but some -ARFs are unnormalized and still require the RMF for normalization. Check with -the documentation associated with these files for details. If we are using the -RMF to convolve energies, we must set ``convolve_energies=True``. - -.. code:: python - - ARF = "acisi_aimpt_cy17.arf" - RMF = "acisi_aimpt_cy17.rmf" - normal = [0.0,0.0,1.0] - events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, - absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], - convolve_energies=True, no_shifting=False, north_vector=None, - psf_sigma=None) - -In this case, we chose a three-vector ``normal`` to specify an arbitrary -line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to -project along one of those axes. - -``project_photons`` takes several other optional keyword arguments. - -* ``no_shifting`` (default ``False``) controls whether or not Doppler - shifting of photon energies is turned on. -* ``dist_new`` is a (value, unit) tuple that is used to set a new - angular diameter distance by hand instead of having it determined - by the cosmology and the value of the redshift. Should only be used - for simulations of nearby objects. -* For off-axis ``normal`` vectors, the ``north_vector`` argument can - be used to control what vector corresponds to the "up" direction in - the resulting event list. -* ``psf_sigma`` may be specified to provide a crude representation of - a PSF, and corresponds to the standard deviation (in degrees) of a - Gaussian PSF model. - -Let's just take a quick look at the raw events object: - -.. code:: python - - print(events) - -.. code:: python - - {'eobs': YTArray([ 0.32086522, 0.32271389, 0.32562708, ..., 8.90600621, - 9.73534237, 10.21614256]) keV, - 'xsky': YTArray([ 187.5177707 , 187.4887825 , 187.50733609, ..., 187.5059345 , - 187.49897546, 187.47307048]) degree, - 'ysky': YTArray([ 12.33519996, 12.3544496 , 12.32750903, ..., 12.34907707, - 12.33327653, 12.32955225]) degree, - 'ypix': array([ 133.85374195, 180.68583074, 115.14110561, ..., 167.61447493, - 129.17278711, 120.11508562]), - 'PI': array([ 27, 15, 25, ..., 609, 611, 672]), - 'xpix': array([ 86.26331108, 155.15934197, 111.06337043, ..., 114.39586907, - 130.93509652, 192.50639633])} - - -We can bin up the events into an image and save it to a FITS file. The -pixel size of the image is equivalent to the smallest cell size from the -original dataset. We can specify limits for the photon energies to be -placed in the image: - -.. code:: python - - events.write_fits_image("sloshing_image.fits", clobber=True, emin=0.5, emax=7.0) - -The resulting FITS image will have WCS coordinates in RA and Dec. It -should be suitable for plotting in -`ds9 `_, for example. -There is also a great project for opening astronomical images in Python, -called `APLpy `_: - -.. code:: python - - import aplpy - fig = aplpy.FITSFigure("sloshing_image.fits", figsize=(10,10)) - fig.show_colorscale(stretch="log", vmin=0.1, cmap="gray_r") - fig.set_axis_labels_font(family="serif", size=16) - fig.set_tick_labels_font(family="serif", size=16) - -.. image:: _images/Photon_Simulator_30_4.png - -Which is starting to look like a real observation! - -.. warning:: - - The binned images that result, even if you convolve with responses, - are still of the same resolution as the finest cell size of the - simulation dataset. If you want a more accurate simulation of a - particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_. - -We can also bin up the spectrum into energy bins, and write it to a FITS -table file. This is an example where we've binned up the spectrum -according to the unconvolved photon energy: - -.. code:: python - - events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True) - -We can also set ``bin_type="channel"``. If we have convolved our events -with response files, then any other keywords will be ignored and it will -try to make a spectrum from the channel information that is contained -within the RMF. Otherwise, the channels will be determined from the ``emin``, -``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. -For now, we'll stick with the energy spectrum, and plot it up: - -.. code:: python - - import astropy.io.fits as pyfits - f = pyfits.open("virgo_spec.fits") - pylab.loglog(f["SPECTRUM"].data.field("ENERGY"), f["SPECTRUM"].data.field("COUNTS")) - pylab.xlim(0.3, 10) - pylab.xlabel("E (keV)") - pylab.ylabel("counts/bin") - -.. image:: _images/Photon_Simulator_34_1.png - - -We can also write the events to a FITS file that is of a format that can -be manipulated by software packages like -`CIAO `_ and read in by ds9 to do more -standard X-ray analysis: - -.. code:: python - - events.write_fits_file("my_events.fits", clobber=True) - -.. warning:: We've done some very low-level testing of this feature, and - it seems to work, but it may not be consistent with standard FITS events - files in subtle ways that we haven't been able to identify. Please email - jzuhone@gmail.com if you find any bugs! - -Two ``EventList`` instances can be added together, which is useful if they were -created using different data sources: - -.. code:: python - - events3 = events1+events2 - -.. warning:: This only works if the two event lists were generated using - the same parameters! - -Finally, a new ``EventList`` can be created from a subset of an existing ``EventList``, -defined by a ds9 region (this functionality requires the -`pyregion `_ package to be installed): - -.. code:: python - - circle_events = events.filter_events("circle.reg") - -Creating a X-ray observation from an in-memory dataset -++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -It may be useful, especially for observational applications, to create -datasets in-memory and then create simulated observations from -them. Here is a relevant example of creating a toy cluster and evacuating two AGN-blown bubbles in it. - -First, we create the in-memory dataset (see :ref:`loading-numpy-array` -for details on how to do this): - -.. code:: python - - import yt - import numpy as np - from yt.utilities.physical_ratios import cm_per_kpc, K_per_keV - from yt.units import mp - from yt.utilities.cosmology import Cosmology - from yt.analysis_modules.photon_simulator.api import * - import aplpy - - R = 1000. # in kpc - r_c = 100. # in kpc - rho_c = 1.673e-26 # in g/cm^3 - beta = 1. - T = 4. # in keV - nx = 256 - - bub_rad = 30.0 - bub_dist = 50.0 - - ddims = (nx,nx,nx) - - x, y, z = np.mgrid[-R:R:nx*1j, - -R:R:nx*1j, - -R:R:nx*1j] - - r = np.sqrt(x**2+y**2+z**2) - - dens = np.zeros(ddims) - dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta) - dens[r > R] = 0.0 - temp = T*K_per_keV*np.ones(ddims) - rbub1 = np.sqrt(x**2+(y-bub_rad)**2+z**2) - rbub2 = np.sqrt(x**2+(y+bub_rad)**2+z**2) - dens[rbub1 <= bub_rad] /= 100. - dens[rbub2 <= bub_rad] /= 100. - temp[rbub1 <= bub_rad] *= 100. - temp[rbub2 <= bub_rad] *= 100. - -This created a cluster with a radius of 1 Mpc, a uniform temperature -of 4 keV, and a density distribution from a :math:`\beta`-model. We then -evacuated two "bubbles" of radius 30 kpc at a distance of 50 kpc from -the center. - -Now, we create a yt Dataset object out of this dataset: - -.. code:: python - - data = {} - data["density"] = (dens, "g/cm**3") - data["temperature"] = (temp, "K") - data["velocity_x"] = (np.zeros(ddims), "cm/s") - data["velocity_y"] = (np.zeros(ddims), "cm/s") - data["velocity_z"] = (np.zeros(ddims), "cm/s") - - bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) - - ds = yt.load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox) - -where for simplicity we have set the velocities to zero, though we -could have created a realistic velocity field as well. Now, we -generate the photon and event lists in the same way as the previous -example: - -.. code:: python - - sphere = ds.sphere("c", (1.0,"Mpc")) - - A = 3000. - exp_time = 2.0e5 - redshift = 0.05 - cosmo = Cosmology() - - apec_model = TableApecModel("/Users/jzuhone/Data/atomdb_v2.0.2", - 0.01, 20.0, 20000) - abs_model = TableAbsorbModel("tbabs_table.h5", 0.1) - - thermal_model = ThermalPhotonModel(apec_model, photons_per_chunk=40000000) - photons = PhotonList.from_scratch(sphere, redshift, A, - exp_time, thermal_model, center="c") - - - events = photons.project_photons([0.0,0.0,1.0], - responses=["acisi_aimpt_cy17.arf", - "acisi_aimpt_cy17.rmf"], - absorb_model=abs_model, - north_vector=[0.0,1.0,0.0]) - - events.write_fits_image("img.fits", clobber=True) - -which yields the following image: - -.. code:: python - - fig = aplpy.FITSFigure("img.fits", figsize=(10,10)) - fig.show_colorscale(stretch="log", vmin=0.1, vmax=600., cmap="jet") - fig.set_axis_labels_font(family="serif", size=16) - fig.set_tick_labels_font(family="serif", size=16) - -.. image:: _images/bubbles.png - :width: 80 % - -Storing events for future use and for reading-in by telescope simulators -++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -If you want a more accurate representation of an observation taken by a -particular instrument, there are tools available for such purposes. For -the *Chandra* telescope, there is the venerable -`MARX `_. For a wide range of -instruments, both existing and future, there is -`SIMX `_. We'll discuss two ways -to store your event files so that they can be input by these and other -codes. - -The first option is the most general, and the simplest: simply dump the -event data to an HDF5 file: - -.. code:: python - - events.write_h5_file("my_events.h5") - -This will dump the raw event data, as well as the associated parameters, -into the file. If you want to read these events back in, it's just as -simple: - -.. code:: python - - events = EventList.from_h5_file("my_events.h5") - -You can use event data written to HDF5 files to input events into MARX -using `this code `_. - -The second option, for use with SIMX, is to dump the events into a -SIMPUT file: - -.. code:: python - - events.write_simput_file("my_events", clobber=True, emin=0.1, emax=10.0) - -which will write two files, ``"my_events_phlist.fits"`` and -``"my_events_simput.fits"``, the former being a auxiliary file for the -latter. - -.. note:: You can only write SIMPUT files if you didn't convolve - the photons with responses, since the idea is to pass unconvolved - photons to the telescope simulator. - -The following images were made from the same yt-generated events in both MARX and -SIMX. They are 200 ks observations of the two example clusters from above -(the Chandra images have been reblocked by a factor of 4): - -.. image:: _images/ds9_sloshing.png - -.. image:: _images/ds9_bubbles.png - -In November 2015, the structure of the photon and event HDF5 files changed. To -convert an old-format file to the new format, use the ``convert_old_file`` utility: - -.. code:: python - - from yt.analysis_modules.photon_simulator.api import convert_old_file - convert_old_file("old_photons.h5", "new_photons.h5", clobber=True) - convert_old_file("old_events.h5", "new_events.h5", clobber=True) - -This utility will auto-detect the kind of file (photons or events) and will write -the correct replacement for the new version. - -At times it may be convenient to write several ``EventLists`` to disk to be merged -together later. This can be achieved with the ``merge_files`` utility. It takes a -list of - -.. code:: python - - from yt.analysis_modules.photon_simulator.api import merge_files - merge_files(["events_0.h5", "events_1.h5", "events_2.h5"], "merged_events.h5", - add_exposure_times=True, clobber=False) - -At the current time this utility is very limited, as it only allows merging of -``EventLists`` which have the same parameters, with the exception of the exposure -time. If the ``add_exposure_times`` argument to ``merge_files`` is set to ``True``, -the lists will be merged together with the exposure times added. Otherwise, the -exposure times of the different files must be equal. diff --git a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst deleted file mode 100644 index a032bf6a8ba..00000000000 --- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. _planning-cosmology-simulations: - -Planning Simulations to use LightCones or LightRays -=================================================== - -If you want to run a cosmological simulation that will have just enough data -outputs to create a light cone or light ray, the -:meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice` -function will calculate a list of redshifts outputs that will minimally -connect a redshift interval. - -.. code-block:: python - - from yt.analysis_modules.cosmological_observation.api import CosmologySplice - my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo') - my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out') - -This will write out a file, formatted for simulation type, with a list of -redshift dumps. The keyword arguments are: - -* ``decimals`` (*int*): The decimal place to which the output redshift will - be rounded. If the decimal place in question is nonzero, the redshift will - be rounded up to ensure continuity of the splice. Default: 3. - -* ``filename`` (*str*): If provided, a file will be written with the redshift - outputs in the form in which they should be given in the enzo parameter - file. Default: None. - -* ``start_index`` (*int*): The index of the first redshift output. Default: 0. diff --git a/doc/source/analyzing/analysis_modules/ppv_cubes.rst b/doc/source/analyzing/analysis_modules/ppv_cubes.rst deleted file mode 100644 index 491b91d7767..00000000000 --- a/doc/source/analyzing/analysis_modules/ppv_cubes.rst +++ /dev/null @@ -1,4 +0,0 @@ -Creating Position-Position-Velocity FITS Cubes -------------------------------------------------- - -.. notebook:: PPVCube.ipynb diff --git a/doc/source/analyzing/analysis_modules/radmc3d_export.rst b/doc/source/analyzing/analysis_modules/radmc3d_export.rst deleted file mode 100644 index bacfccef9d8..00000000000 --- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst +++ /dev/null @@ -1,203 +0,0 @@ -.. _radmc3d_export: - -Exporting to RADMC-3D -===================== - -.. sectionauthor:: Andrew Myers -.. versionadded:: 2.6 - -.. figure:: _images/31micron.png - - Above: a sample image showing the continuum dust emission image around a massive protostar - made using RADMC-3D and plotted with pyplot. - -`RADMC-3D -`_ is a -three-dimensional Monte-Carlo radiative transfer code that is capable of -handling both line and continuum emission. yt comes equipped with a -:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter` -class that exports AMR data to a format that RADMC-3D can read. Currently, only -the ASCII-style data format is supported. -In principle, this allows one to use RADMC-3D to make synthetic observations -from any simulation data format that yt recognizes. - -Continuum Emission ------------------- - -To compute thermal emission intensities, RADMC-3D needs several inputs files that -describe the spatial distribution of the dust and photon sources. To create these -files, first import the RADMC-3D exporter, which is not loaded into your environment -by default: - -.. code-block:: python - - import yt - import numpy as np - from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter, RadMC3DSource - -Next, load up a dataset and instantiate the :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`. -For this example, we'll use the "StarParticle" dataset, -available `here -`_. - -.. code-block:: python - - ds = yt.load("StarParticles/plrd01000/") - writer = RadMC3DWriter(ds) - -The first data file to create is the "amr_grid.inp" file, which describes the structure -of the AMR index. To create this file, simply call: - -.. code-block:: python - - writer.write_amr_grid() - -Next, we must give RADMC-3D information about the dust density. To do this, we -define a field that calculates the dust density in each cell. We -assume a constant dust-to-gas mass ratio of 0.01: - -.. code-block:: python - - dust_to_gas = 0.01 - def _DustDensity(field, data): - return dust_to_gas * data["density"] - ds.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3") - -We save this information into a file called "dust_density.inp". - -.. code-block:: python - - writer.write_dust_file(("gas", "dust_density"), "dust_density.inp") - -Finally, we must give RADMC-3D information about any stellar sources that are -present. To do this, we have provided the -:class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DSource` -class. For this example, we place a single source with temperature 5780 K -at the center of the domain: - -.. code-block:: python - - radius_cm = 6.96e10 - mass_g = 1.989e33 - position_cm = [0.0, 0.0, 0.0] - temperature_K = 5780.0 - star = RadMC3DSource(radius_cm, mass_g, position_cm, temperature_K) - - sources_list = [star] - wavelengths_micron = np.logspace(-1.0, 4.0, 1000) - - writer.write_source_files(sources_list, wavelengths_micron) - -The last line creates the files "stars.inp" and "wavelength_micron.inp", -which describe the locations and spectra of the stellar sources as well -as the wavelengths RADMC-3D will use in it's calculations. - -If everything goes correctly, after executing the above code, you should have -the files "amr_grid.inp", "dust_density.inp", "stars.inp", and "wavelength_micron.inp" -sitting in your working directory. RADMC-3D needs a few more configuration files to -compute the thermal dust emission. In particular, you need an opacity file, like the -"dustkappa_silicate.inp" file included in RADMC-3D, a main "radmc3d.inp" file that sets -some runtime parameters, and a "dustopac.inp" that describes the assumed composition of the dust. -yt cannot make these files for you; in the example that follows, we used a -"radmc3d.inp" file that looked like: - -:: - - nphot = 1000000 - nphot_scat = 1000000 - -which basically tells RADMC-3D to use 1,000,000 photon packets instead of the default 100,000. The -"dustopac.inp" file looked like: - -:: - - 2 - 1 - ----------------------------- - 1 - 0 - silicate - ----------------------------- - -To get RADMC-3D to compute the dust temperature, run the command: - -:: - - ./radmc3D mctherm - -in the directory that contains your "amr_grid.inp", "dust_density.inp", "stars.inp", "wavelength_micron.inp", -"radmc3d.inp", "dustkappa_silicate.inp", and "dustopac.inp" files. If everything goes correctly, you should -get a "dust_temperature.dat" file in your working directory. Once that file is generated, you can use -RADMC-3D to generate SEDs, images, and so forth. For example, to create an image at 31 microns, do the command: - -:: - - ./radmc3d image lambda 31 sizeau 30000 npix 800 - -which should create a file called "image.out". You can view this image using pyplot or whatever other -plotting package you want. To facilitate this, we provide helper functions -that parse the image.out file, returning a header dictionary with some useful metadata -and an np.array containing the image values. To plot this image in pyplot, you could do something like: - -.. code-block:: python - - import matplotlib.pyplot as plt - import numpy as np - from yt.analysis_modules.radmc3d_export.api import read_radmc3d_image - header, image = read_radmc3d_image("image.out") - - Nx = header['Nx'] - Ny = header['Ny'] - - x_hi = 0.5*header["pixel_size_cm_x"]*Nx - x_lo = -x_hi - y_hi = 0.5*header["pixel_size_cm_y"]*Ny - y_lo = -y_hi - - X = np.linspace(x_lo, x_hi, Nx) - Y = np.linspace(y_lo, y_hi, Ny) - - plt.pcolormesh(X, Y, np.log10(image), cmap='hot') - cbar = plt.colorbar() - plt.axis((x_lo, x_hi, y_lo, y_hi)) - ax = plt.gca() - ax.set_xlabel(r"$x$ (cm)") - ax.set_ylabel(r"$y$ (cm)") - cbar.set_label(r"Log Intensity (erg cm$^{-2}$ s$^{-1}$ Hz$^{-1}$ ster$^{-1}$)") - plt.savefig('dust_continuum.png') - -The resulting image should look like: - -.. image:: _images/dust_continuum.png - -This barely scratches the surface of what you can do with RADMC-3D. Our goal here is -just to describe how to use yt to export the data it knows about (densities, stellar -sources, etc.) into a format that RADMC-3D can recognize. - -Line Emission -------------- - -The file format required for line emission is slightly different. The -following script will generate two files, one called "numderdens_co.inp", -which contains the number density of CO molecules for every cell in the index, -and another called "gas-velocity.inp", which is useful if you want to include -Doppler broadening. - -.. code-block:: python - - import yt - from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter - - x_co = 1.0e-4 - mu_h = yt.YTQuantity(2.34e-24, 'g') - def _NumberDensityCO(field, data): - return (x_co/mu_h)*data["density"] - yt.add_field(("gas", "number_density_CO"), function=_NumberDensityCO, units="cm**-3") - - ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - writer = RadMC3DWriter(ds) - - writer.write_amr_grid() - writer.write_line_file(("gas", "number_density_CO"), "numberdens_co.inp") - velocity_fields = ["velocity_x", "velocity_y", "velocity_z"] - writer.write_line_file(velocity_fields, "gas_velocity.inp") diff --git a/doc/source/analyzing/analysis_modules/star_analysis.rst b/doc/source/analyzing/analysis_modules/star_analysis.rst deleted file mode 100644 index a026ac5d74c..00000000000 --- a/doc/source/analyzing/analysis_modules/star_analysis.rst +++ /dev/null @@ -1,297 +0,0 @@ -.. note:: - - This module has been deprecated as it is unmaintained. The code has been - moved to the `yt attic `__. - If you'd like to take it over, please do! - -.. _star_analysis: - -Star Particle Analysis -====================== -.. sectionauthor:: Stephen Skory -.. versionadded:: 1.6 - -This document describes tools in yt for analyzing star particles. -The Star Formation Rate tool bins stars by time to produce star formation -statistics over several metrics. -A synthetic flux spectrum and a spectral energy density plot can be calculated -with the Spectrum tool. - -.. _star_formation_rate: - -Star Formation Rate -------------------- - -This tool can calculate various star formation statistics binned over time. -As input it can accept either a yt ``data_source``, such as a region or -sphere (see :ref:`available-objects`), or arrays containing the data for -the stars you wish to analyze. - -This example will analyze all the stars in the volume: - -.. code-block:: python - - import yt - from yt.analysis_modules.star_analysis.api import StarFormationRate - ds = yt.load("Enzo_64/DD0030/data0030") - ad = ds.all_data() - sfr = StarFormationRate(ds, data_source=ad) - -or just a small part of the volume i.e. a small sphere at the center of the -simulation volume with radius 10% the box size: - -.. code-block:: python - - import yt - from yt.analysis_modules.star_analysis.api import StarFormationRate - ds = yt.load("Enzo_64/DD0030/data0030") - sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - sfr = StarFormationRate(ds, data_source=sp) - -If the stars to be analyzed cannot be defined by a ``data_source``, YTArrays can -be passed. For backward compatibility it is also possible to pass generic numpy -arrays. In this case, the units for the ``star_mass`` must be in -:math:`(\mathrm{\rm{M}_\odot})`, the ``star_creation_time`` in code units, and -the volume must be specified in :math:`(\mathrm{\rm{Mpc}})` as a float (but it -doesn't have to be correct depending on which statistic is important). - -.. code-block:: python - - import yt - from yt.analysis_modules.star_analysis.api import StarFormationRate - from yt.data_objects.particle_filters import add_particle_filter - - def Stars(pfilter, data): - return data[("all", "particle_type")] == 2 - add_particle_filter("stars", function=Stars, filtered_type='all', - requires=["particle_type"]) - - ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009") - ds.add_particle_filter('stars') - v, center = ds.find_max("density") - sp = ds.sphere(center, (50, "kpc")) - - # This puts the particle data for *all* the particles in the sphere sp - # into the arrays sm and ct. - mass = sp[("stars", "particle_mass")].in_units('Msun') - age = sp[("stars", "age")].in_units('Myr') - ct = sp[("stars", "creation_time")].in_units('Myr') - - # Pick out only old stars using Numpy array fancy indexing. - threshold = ds.quan(100.0, "Myr") - mass_old = mass[age > threshold] - ct_old = ct[age > threshold] - - sfr = StarFormationRate(ds, star_mass=mass_old, star_creation_time=ct_old, - volume=sp.volume()) - -To output the data to a text file, use the command ``.write_out``: - -.. code-block:: python - - sfr.write_out(name="StarFormationRate.out") - -In the file ``StarFormationRate.out``, there are seven columns of data: - - 1. Time (yr) - 2. Look-back time (yr) - 3. Redshift - 4. Star formation rate in this bin per year :math:`(\mathrm{\rm{M}_\odot / \rm{yr}})` - 5. Star formation rate in this bin per year per Mpc**3 :math:`(\mathrm{\rm{M}_\odot / \rm{h} / \rm{Mpc}^3})` - 6. Stars formed in this time bin :math:`(\mathrm{\rm{M}_\odot})` - 7. Cumulative stars formed up to this time bin :math:`(\mathrm{\rm{M}_\odot})` - -The output is easily plotted. This is a plot for some test data (that may or may not -correspond to anything physical) using columns #2 and #4 for the x and y -axes, respectively: - -.. image:: _images/SFR.png - :width: 640 - :height: 480 - -It is possible to access the output of the analysis without writing to disk. -Attached to the ``sfr`` object are the following arrays which are identical -to the ones that are saved to the text file as above: - - 1. ``sfr.time`` - 2. ``sfr.lookback_time`` - 3. ``sfr.redshift`` - 4. ``sfr.Msol_yr`` - 5. ``sfr.Msol_yr_vol`` - 6. ``sfr.Msol`` - 7. ``sfr.Msol_cumulative`` - -.. _synthetic_spectrum: - -Synthetic Spectrum Generator ----------------------------- - -Based on code generously provided by Kentaro Nagamine , -this will generate a synthetic spectrum for the stars using the publicly-available -tables of Bruzual & Charlot (hereafter B&C). Please see their `2003 paper -`_ for more information -and the `main data -distribution page `_ for the original data. -Based on the mass, age and metallicity of each star, a cumulative spectrum is -generated and can be output in two ways, either raw, or as a spectral -energy distribution. - -This analysis toolkit reads in the B&C data from HDF5 files that have been -converted from the original ASCII files (available at the link above). The -HDF5 files are one-quarter the size of the ASCII files, and greatly reduce -the time required to read the data off disk. The HDF5 files are available from -the main yt website `here `_. -Both the Salpeter and Chabrier models have been converted, -and it is simplest to download all the files to the same location. -Please read the original B&C sources for information on the differences between -the models. - -In order to analyze stars, first the Bruzual & Charlot data tables need to be -read in from disk. This is accomplished by initializing ``SpectrumBuilder`` and -specifying the location of the HDF5 files with the ``bcdir`` parameter. -The models are chosen with the ``model`` parameter, which is either -*"chabrier"* or *"salpeter"*. - -.. code-block:: python - - import yt - from yt.analysis_modules.star_analysis.api import SpectrumBuilder - ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009") - spec = SpectrumBuilder(ds, bcdir="bc", model="chabrier") - -In order to analyze a set of stars, use the ``calculate_spectrum`` command. -It accepts either a ``data_source``, or a set of YTarrays with the star -information. Continuing from the above example: - -.. code-block:: python - - v, center = ds.find_max("density") - sp = ds.sphere(center, (50, "kpc")) - spec.calculate_spectrum(data_source=sp) - -If a subset of stars are desired, call it like this: - -.. code-block:: python - - from yt.data_objects.particle_filters import add_particle_filter - - def Stars(pfilter, data): - return data[("all", "particle_type")] == 2 - add_particle_filter("stars", function=Stars, filtered_type='all', - requires=["particle_type"]) - - # Pick out only old stars using Numpy array fancy indexing. - threshold = ds.quan(100.0, "Myr") - mass_old = sp[("stars", "age")][age > threshold] - metal_old = sp[("stars", "metallicity_fraction")][age > threshold] - ct_old = sp[("stars", "creation_time")][age > threshold] - - spec.calculate_spectrum(star_mass=mass_old, star_creation_time=ct_old, - star_metallicity_fraction=metal_old) - -For backward compatibility numpy arrays can be used instead for ``star_mass`` -(in units :math:`\mathrm{\rm{M}_\odot}`), ``star_creation_time`` and -``star_metallicity_fraction`` (in code units). -Alternatively, when using either a ``data_source`` or individual arrays, -the option ``star_metallicity_constant`` can be specified to force all the -stars to have the same metallicity. If arrays are being used, the -``star_metallicity_fraction`` array need not be specified. - -.. code-block:: python - - # Make all the stars have solar metallicity. - spec.calculate_spectrum(data_source=sp, star_metallicity_constant=0.02) - -Newly formed stars are often shrouded by thick gas. With the ``min_age`` option -of ``calculate_spectrum``, young stars can be excluded from the spectrum. -The units are in years. -The default is zero, which is equivalent to including all stars. - -.. code-block:: python - - spec.calculate_spectrum(data_source=sp, star_metallicity_constant=0.02, - min_age=ds.quan(1.0, "Myr")) - -There are two ways to write out the data once the spectrum has been calculated. -The command ``write_out`` outputs two columns of data: - - 1. Wavelength (:math:`\text{Angstroms}`) - 2. Flux (Luminosity per unit wavelength :math:`(\mathrm{\rm{L}_\odot} / \text{Angstrom})` , where - :math:`\mathrm{\rm{L}_\odot} = 3.826 \cdot 10^{33}\, \mathrm{ergs / s}` ). - -and can be called simply, specifying the output file: - -.. code-block:: python - - spec.write_out(name="spec.out") - -The other way is to output a spectral energy density plot. Along with the -``name`` parameter, this command can also take the ``flux_norm`` option, -which is the wavelength in Angstroms of the flux to normalize the -distribution to. The default is 5200 Angstroms. This command outputs the data -in two columns: - - 1. Wavelength :math:`(\text{Angstroms})` - 2. Relative flux normalized to the flux at *flux_norm*. - -.. code-block:: python - - spec.write_out_SED(name="SED.out", flux_norm=5200) - -Below is an example of an absurd SED for universe-old stars all with -solar metallicity at a redshift of zero. Note that even in this example, -a ``ds`` is required. - -.. code-block:: python - - import yt - import numpy as np - from yt.analysis_modules.star_analysis.api import SpectrumBuilder - - ds = yt.load("Enzo_64/DD0030/data0030") - spec = SpectrumBuilder(ds, bcdir="bc", model="chabrier") - sm = np.ones(100) - ct = np.zeros(100) - spec.calculate_spectrum(star_mass=sm, star_creation_time=ct, - star_metallicity_constant=0.02) - spec.write_out_SED('SED.out') - -And the plot: - -.. image:: _images/SED.png - :width: 640 - :height: 480 - -Iterate Over a Number of Halos -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In this example below, the halos for a dataset are found, and the SED is calculated -and written out for each. - -.. code-block:: python - - import yt - from yt.analysis_modules.star_analysis.api import SpectrumBuilder - from yt.data_objects.particle_filters import add_particle_filter - from yt.analysis_modules.halo_finding.api import HaloFinder - - def Stars(pfilter, data): - return data[("all", "particle_type")] == 2 - add_particle_filter("stars", function=Stars, filtered_type='all', - requires=["particle_type"]) - - ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009") - ds.add_particle_filter('stars') - halos = HaloFinder(ds, dm_only=False) - # Set up the spectrum builder. - spec = SpectrumBuilder(ds, bcdir="bc", model="salpeter") - - # Iterate over the halos. - for halo in halos: - sp = halo.get_sphere() - spec.calculate_spectrum( - star_mass=sp[("stars", "particle_mass")], - star_creation_time=sp[("stars", "creation_time")], - star_metallicity_fraction=sp[("stars", "metallicity_fraction")]) - # Write out the SED using the default flux normalization. - spec.write_out_SED(name="halo%05d.out" % halo.id) diff --git a/doc/source/analyzing/analysis_modules/sunrise_export.rst b/doc/source/analyzing/analysis_modules/sunrise_export.rst deleted file mode 100644 index de23bcf026c..00000000000 --- a/doc/source/analyzing/analysis_modules/sunrise_export.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. _sunrise_export: - -.. note:: - - This module has been deprecated as it is unmaintained. The code has been - moved to the `yt attic `__. - If you'd like to take it over, please do! - -Exporting to Sunrise -==================== - -.. sectionauthor:: Christopher Moody -.. versionadded:: 1.8 - -.. note:: - - As of :code:`yt-3.0`, the sunrise exporter is not currently functional. - This functionality is still available in :code:`yt-2.x`. If you would like - to use these features in :code:`yt-3.x`, help is needed to port them over. - Contact the yt-users mailing list if you are interested in doing this. - -The yt-Sunrise exporter essentially takes grid cell data and translates it into a binary octree format, attaches star particles, and saves the output to a FITS file Sunrise can read. For every cell, the gas mass, metals mass (a fraction of which is later assumed to be in the form of dust), and the temperature are saved. Star particles are defined entirely by their mass, position, metallicity, and a 'radius.' This guide outlines the steps to exporting the data, troubleshoots common problems, and reviews recommended sanity checks. - -Simple Export -------------- - -The code outlined here is a barebones Sunrise export: - -.. code-block:: python - - from yt.mods import * - import numpy as na - - ds = ARTDataset(file_amr) - potential_value,center=ds.find_min('Potential_New') - root_cells = ds.domain_dimensions[0] - le = np.floor(root_cells*center) #left edge - re = np.ceil(root_cells*center) #right edge - bounds = [(le[0], re[0]-le[0]), (le[1], re[1]-le[1]), (le[2], re[2]-le[2])] - #bounds are left edge plus a span - bounds = numpy.array(bounds,dtype='int') - amods.sunrise_export.export_to_sunrise(ds, out_fits_file,subregion_bounds = bounds) - -To ensure that the camera is centered on the galaxy, we find the center by finding the minimum of the gravitational potential. The above code takes that center, and casts it in terms of which root cells should be extracted. At the moment, Sunrise accepts a strict octree, and you can only extract a 2x2x2 domain on the root grid, and not an arbitrary volume. See the optimization section later for workarounds. On my reasonably recent machine, the export process takes about 30 minutes. - -Some codes do not yet enjoy full yt support. As a result, export_to_sunrise() can manually include particles in the yt output fits file: - -.. code-block:: python - - import pyfits - - col_list = [] - col_list.append(pyfits.Column("ID", format="I", array=np.arange(mass_current.size))) - col_list.append(pyfits.Column("parent_ID", format="I", array=np.arange(mass_current.size))) - col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc")) - col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr")) - col_list.append(pyfits.Column("creation_mass", format="D", array=mass_initial, unit="Msun")) - col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr")) - col_list.append(pyfits.Column("radius", format="D", array=star_radius, unit="kpc")) - col_list.append(pyfits.Column("mass", format="D", array=mass_current, unit="Msun")) - col_list.append(pyfits.Column("age_m", format="D", array=age, unit="yr")) - col_list.append(pyfits.Column("age_l", format="D", array=age, unit="yr")) - col_list.append(pyfits.Column("metallicity", format="D",array=z)) - col_list.append(pyfits.Column("L_bol", format="D",array=np.zeros(mass_current.size))) - cols = pyfits.ColDefs(col_list) - - amods.sunrise_export.export_to_sunrise(ds, out_fits_file,write_particles=cols, - subregion_bounds = bounds) - -This code snippet takes the stars in a region outlined by the ``bounds`` variable, organizes them into pyfits columns which are then passed to export_to_sunrise. Note that yt units are in CGS, and Sunrise accepts units in (physical) kpc, kelvin, solar masses, and years. - -Remember that in Sunrise, photons are not spawned at the exact point of the star particle, but stochastically in a radius around it. Default to setting this radius to the resolution (or smoothing kernel) of your simulation - and then test that Sunrise is not sensitive to a doubling or halving of this number. - -Sanity Check: Young Stars -------------------------- - -Young stars are treated in a special way in Sunrise. Stars under 10 Myr do not emit in the normal fashion; instead they are replaced with MAPPINGS III particles that emulate the emission characteristics of star forming clusters. Among other things this involves a calculation of the local pressure, P/k, which Sunrise reports for debugging purposes and is something you should also check. - -The code snippet below finds the location of every star under 10 Myr and looks up the cell containing it: - -.. code-block:: python - - for x,a in enumerate(zip(pos,age)): #loop over stars - center = x*ds['kpc'] - grid,idx = find_cell(ds.index.grids[0],center) - pk[i] = grid['Pk'][idx] - -This code is how Sunrise calculates the pressure, so we can add our own derived field: - -.. code-block:: python - - def _Pk(field,data): - #calculate pressure over Boltzmann's constant: P/k=(n/V)T - #Local stellar ISM values are ~16500 Kcm^-3 - vol = data['cell_volume'].astype('float64')*data.ds['cm']**3.0 #volume in cm - m_g = data["cell_mass"]*1.988435e33 #mass of H in g - n_g = m_g*5.97e23 #number of H atoms - teff = data["temperature"] - val = (n_g/vol)*teff #should be of order 1e2-1e5 - return val - add_field("Pk", function=_Pk,units=r"Kcm^{-3}") - - -This snippet locates the cell containing a star and returns the grid and grid id. - -.. code-block:: python - - def find_cell(grid,position): - x=grid - #print(grid.LeftEdge) - for child in grid.Children: - if numpy.all(child.LeftEdge < position) and\ - numpy.all(child.RightEdge > position): - return find_cell(child,position) - - #if the point is not contained within any of the child grids - #find it within the extent of the current grid - le,re = x.LeftEdge,x.RightEdge - ad = x.ActiveDimensions - span = (re-le)/ad - idx = (position-le)/span - idx = numpy.floor(idx) - idx = numpy.int64(idx) - assert numpy.all(idx < ad) - return grid,idx - -Sanity Check: Gas & Stars Line Up ---------------------------------- - -If you add your star particles separately from the gas cell index, then it is worth checking that they still lined up once they've been loaded into Sunrise. This is fairly easy to do with a useful 'auxiliary' run. In Sunrise, set all of your rays to zero, (nrays_nonscatter, nrays_scatter,nrays_intensity,nrays_ir ) except for nrays_aux, and this will produce an mcrx FITS file with a gas map, a metals map, a temperature*gass_mass map and a stellar map for each camera. As long as you keep some cameras at theta,phi = 0,0 or 90,0, etc., then a standard yt projection down the code's xyz axes should look identical: - -.. code-block:: python - - pc.add_projection("density", 0, "density") - - -Convergence: High Resolution ----------------------------- - -At the moment, yt exports are the only grid data format Sunrise accepts. Otherwise, Sunrise typically inputs SPH particles or AREPO Voronoi grids. Among the many convergence checks you should perform is a high resolution check, which subdivides all leaves in the octree and copies the parent data into them, effectively increasing the resolution but otherwise not adding more information. Sunrise should yield similar results, and it is worth checking that indeed it does. Do so by just passing export_to_sunrise(...,dummy_subdivide=True). The resulting file should be slightly less than 8 times larger because of newly added cells. - -Other checks: -------------- - -Check that the width of your extracted region is at least the size of your camera's field of view. It should probably be significantly larger than your FOV, and cutting that short could throw out otherwise interesting objects. - -A good idea is to leverage yt to find the inertia tensor of the stars, find the rotation matrix that diagonalizes it, and use that to define cameras for Sunrise. Unless your code grid is aligned with your galaxy, this is required for getting edge-on or face-on shots. - -The final product: ------------------- - -.. image:: _images/mw3_0420.jpg - :width: 479 - :height: 479 - -Above is a false color image where RGB are assigned to IR, optical and UV broadband filters, respectively. - diff --git a/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst b/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst deleted file mode 100644 index 0a063a42e22..00000000000 --- a/doc/source/analyzing/analysis_modules/sunyaev_zeldovich.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. _sunyaev-zeldovich: - -Mock Observations of the Sunyaev-Zeldovich Effect -------------------------------------------------- - -.. notebook:: SZ_projections.ipynb diff --git a/doc/source/analyzing/analysis_modules/synthetic_observation.rst b/doc/source/analyzing/analysis_modules/synthetic_observation.rst deleted file mode 100644 index 3e4ca7ed018..00000000000 --- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _synthetic-observations: - -Synthetic Observation -===================== - -Methods for generating various types of synthetic observations -from simulation data. - -.. toctree:: - :maxdepth: 2 - - light_cone_generator - light_ray_generator - planning_cosmology_simulations - absorption_spectrum - star_analysis - xray_emission_fields - sunyaev_zeldovich - photon_simulator - ppv_cubes diff --git a/doc/source/analyzing/analysis_modules/two_point_functions.rst b/doc/source/analyzing/analysis_modules/two_point_functions.rst deleted file mode 100644 index b4ae754fb5d..00000000000 --- a/doc/source/analyzing/analysis_modules/two_point_functions.rst +++ /dev/null @@ -1,943 +0,0 @@ -.. note:: - - This module has been deprecated as it is unmaintained. The code has been - moved to the `yt attic `__. - If you'd like to take it over, please do! - -.. _two_point_functions: - -Two Point Functions -=================== -.. sectionauthor:: Stephen Skory -.. versionadded:: 1.7 - -.. note:: - - As of :code:`yt-3.0`, the two point function analysis module is not - currently functional. This functionality is still available in - :code:`yt-2.x`. If you would like to use these features in :code:`yt-3.x`, - help is needed to port them over. Contact the yt-users mailing list if you - are interested in doing this. - -The Two Point Functions framework (TPF) is capable of running several -multi-dimensional two point functions simultaneously on a dataset using -memory and workload parallelism. -Examples of two point functions are structure functions and two-point -correlation functions. -It can analyze the entire simulation, or a small rectangular subvolume. -The results can be output in convenient text format and in efficient -HDF5 files. - -Requirements ------------- - -The TPF relies on the Fortran kD-tree that is used -by the parallel HOP halo finder. The kD-tree is not built by default with yt -so it must be built by hand. - -Quick Example -------------- - -It is very simple to setup and run a structure point function on a dataset. -The script below will output the RMS velocity difference over the entire volume -for a range of distances. There are some brief comments given below for each -step. - -.. code-block:: python - - from yt.mods import * - from yt.analysis_modules.two_point_functions.api import * - - ds = load("data0005") - - # Calculate the S in RMS velocity difference between the two points. - # All functions have five inputs. The first two are containers - # for field values, and the second two are the raw point coordinates - # for the point pair. The fifth is the normal vector between the two points - # in r1 and r2. Not all the inputs must be used. - # The name of the function is used to name output files. - def rms_vel(a, b, r1, r2, vec): - vdiff = a - b - np.power(vdiff, 2.0, vdiff) - vdiff = np.sum(vdiff, axis=1) - return vdiff - - - # Initialize a function generator object. - # Set the input fields for the function(s), - # the number of pairs of points to calculate, how big a data queue to - # use, the range of pair separations and how many lengths to use, - # and how to divide that range (linear or log). - tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z"], - total_values=1e5, comm_size=10000, - length_number=10, length_range=[1./128, .5], - length_type="log") - - # Adds the function to the generator. An output label is given, - # and whether or not to square-root the results in the text output is given. - # Note that the items below are being added as lists. - f1 = tpf.add_function(function=rms_vel, out_labels=['RMSvdiff'], sqrt=[True]) - - # Define the bins used to store the results of the function. - f1.set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13], bin_number=1000) - - # Runs the functions. - tpf.run_generator() - - # This calculates the M in RMS and writes out a text file with - # the RMS values and the lengths. The R happens because sqrt=True in - # add_function, above. - # If one is doing turbulence, the contents of this text file are what - # is wanted for plotting. - # The file is named 'rms_vel.txt'. - tpf.write_out_means() - # Writes out the raw PDF bins and bin edges to a HDF5 file. - # The file is named 'rms_vel.h5'. - tpf.write_out_arrays() - -As an aside, note that any analysis function in yt can be accessed directly -and imported automatically using the ``amods`` construct. -Here is an abbreviated example: - -.. code-block:: python - - from yt.mods import * - ... - tpf = amods.two_point_functions.TwoPointFunctions(ds, ...) - - -Probability Distribution Function ---------------------------------- - -For a given length of separation between points, the TPF stores the -Probability Distribution Function (PDF) of the output values. -The PDF allows more varied analysis of the TPF output than storing -the function itself. -The image below assists in how to think about this. -If the function is measuring the absolute difference in temperature -between two points, for each point separation length L, the measured -differences are binned by temperature difference (delta T). -Therefore in the figure below, for a length L, the x-axis is temperature difference -(delta T), and the y-axis is the probability of finding that temperature -difference. -To find the mean temperature difference for the length L, one just needs -to multiply the value of the temperature difference bin by its probability, -and add up over all the bins. - -.. image:: _images/PDF.png - :width: 538 - :height: 494 - -How It Works ------------- - -In order to use the TPF, one must understand how it works. -When run in parallel the defined analysis volume, whether it is the full -volume or a small region, is subdivided evenly and each task is assigned -a different subvolume. -The total number of point pairs to be created per pair separation length -is ``total_values``, and each -task is given an equal share of that total. -Each task will create its share of ``total_values`` by first making -a randomly placed point in its local volume. -The second point will be placed a distance away with location set by random -values of (phi, theta) in spherical coordinates and length by the length ranges. -If that second point is inside the tasks subvolume, the functions -are evaluated and their results binned. -However, if the second point lies outside the subvolume (as in a different -tasks subvolume), the point pair is stored in a point data queue, as well as the -field values for the first point in a companion data queue. -When a task makes its share of ``total_values``, or it fills up its data -queue with points it can't fully process, it passes its queues to its neighbor on -the right. -It then receives the data queues from its neighbor on the left, and processes -the queues. -If it can evaluate a point in the received data queues, meaning it can find the -field values for the second point, it computes the functions for -that point pair, and removes that entry from the queue. -If it still needs to fulfill ``total_values``, it can put its own point pair -into that entry in the queues. -Once the queues are full of points that a task cannot process, it passes them -on. -The data communication cycle ends when all tasks have made their share of -``total_values``, and all the data queues are cleared. -When all the cycles have run, the bins are added up globally to find the -global PDF. - -Below is a two-dimensional representation of how the full simulation is -subdivided into 16 smaller subvolumes. -Each subvolume is assigned to one of 16 tasks -labelled with an integer [0-15]. -Each task is responsible for only the field -values inside its subvolume - it is completely ignorant about all the other -subvolumes. -When point separation rulers are laid down, some like the ruler -labelled A, have both points completely inside a single subvolume. -In this case, -task 5 can evaluate the function(s) on its own. -In situations like -B or C, the points lie in different subvolumes, and no one task can evaluate -the functions independently. - -.. image:: _images/struct_fcn_subvolumes0.png - :width: 403 - :height: 403 - -This next figure shows how the data queues are passed from task to task. -Once task 0 is done with its points, or its queue is full, it passes the queue -to task 1. -Likewise, 1 passes to 2, and 15 passes back around to 0, completing the circle. -If a point pair lies in the subvolumes of 0 and 15, it can take up to 15 -communication cycles for that pair to be evaluated. - -.. image:: _images/struct_fcn_subvolumes1.png - :width: 526 - :height: 403 - -Sometimes the sizes of the data fields being computed on are not very large, -and the memory-parallelism of the TPF isn't crucial. -However, if one still wants to run with lots of processors to make large amounts of -random pairs, subdividing the volumes as above is not as efficient as it could -be due to communication overhead. -By using the ``vol_ratio`` setting of TPF (see :ref:`Create the -Function Generator Object `), the full -volume can be subdivided into larger subvolumes than above, -and tasks will own non-unique copies of the fields data. -In the figure below, the two-dimensional volume has been subdivided into -four subvolumes, and four tasks each own a copy of the data in each subvolume. -As shown, the queues are handed off in the same order as before. -But in this simple example, the maximum number of communication cycles for any -point to be evaluated is three. -This means that the communication overhead will be lower and runtimes -somewhat faster. - -.. image:: _images/struct_fcn_subvolumes2.png - :width: 526 - :height: 403 - -A Step By Step Overview ------------------------ - -In order to run the TPF, these steps must be taken: - - #. Load yt (of course), and any other Python modules that are needed. - #. Define any non-default fields in the standard yt manner. - #. :ref:`tpf_fcns`. - #. :ref:`tpf_tpf`. - #. :ref:`tpf_add_fcns`. - #. :ref:`tpf_pdf`. - #. :ref:`tpf_run`. - #. :ref:`tpf_output`. - -.. _tpf_fcns: - -Define Functions -^^^^^^^^^^^^^^^^ - -All functions must adhere to these specifications: - - * There must be five input variables. The first two are arrays for the - fields needed by the function, and the next two are the raw coordinate - values for the points. The fifth input is an array with the normal - vector between each of the points in r1 and r2. - * The output must be in array format. - * The names of the functions need to be unique. - -The first two variables of a function are arrays that contain the field values. -The order of the field values in the lists is set by the call to ``TwoPointFunctions`` -(that comes later). -In the example above, ``a`` and ``b`` -contain the field velocities for the two points, respectively, in an N by M -array, where N is equal to ``comm_size`` (set in ``TwoPointFunctions``), and M -is the total number of input fields used by functions. -``a[:,0]`` and ``b[:,0]`` are the ``x-velocity`` field values because that field -is the first field given in the ``TwoPointFunctions``. - -The second two variables ``r1`` and ``r2`` are the raw point coordinates for the two points. -The fifth input is an array containing the normal vector between each pair of points. -These arrays are all N by 3 arrays. -Note that they are not used in the example above because they are not needed. - -Functions need to output in array format, with dimensionality -N by R, where R is the dimensionality of the function. -Multi-dimensional functions can be written that output -several values simultaneously. - -The names of the functions must be unique because they are used to name -output files, and name collisions will result in over-written output. - -.. _tpf_tpf: - -Create the Two Point Function Generator Object -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Before any functions can be added, the ``TwoPointFunctions`` object needs -to be created. It has these inputs: - - * ``ds`` (the only required input and is always the first term). - * Field list, required, an ordered list of field names used by the - functions. The order in this list will need to be referenced when writing - functions. Derived fields may be used here if they are defined first. - * ``left_edge``, ``right_edge``, three-element lists of floats: - Used to define a sub-region of the full volume in which to run the TDS. - Default=None, which is equivalent to running on the full volume. Both must - be set to have any effect. - * ``total_values``, integer: The number of random points to generate globally - per point separation length. If run in parallel, each task generates its - fair share of this number. - Default=1000000. - * ``comm_size``, integer: How many pairs of points that are stored in the - data queue objects on each task. Too large wastes memory, and too small will - result in longer run times due to extra communication cycles. Each unit of - ``comm_size`` costs (6 + number_of_fields)*8 bytes, where number_of_fields - is the size of the set of unique data fields used by all the functions added to the - TPF. In the RMS velocity example above, number_of_fields=3, and a - ``comm_size`` of 10,000 means each queue costs 10,000*8*(6+3) = - 720 KB per task. - Default=10000. - * ``length_type``, string ("lin" or "log"): Sets how to evenly space the point - separation lengths, either linearly or logarithmic (log10). - Default="lin". - * ``length_number``, integer: How many point separations to run. - Default=10. - * ``length_range``, two-element list of floats: Two values that define - the minimum and maximum point separations to run over. The lengths that will - be used are divided into ``length_number`` pieces evenly separated according - to ``length_type``. - Default=None, which is equivalent to [sqrt(3)*dx, min_simulation_edge/2.], where - min_simulation_edge is the length of the smallest edge (1D) of the simulation, - and dx is the smallest cell size in the dataset. The sqrt(3) is there because - that is the distance between opposite corners of a unit cube, and that - guarantees that the point pairs will be in different cells for the most - refined regions. - If the first term of the list is -1, the minimum length will be automatically - set to sqrt(3)*dx, ex: ``length_range = [-1, 10/ds['kpc']]``. - * ``vol_ratio``, integer: How to multiply-assign subvolumes to the parallel - tasks. This number must be an integer factor of the total number of tasks or - very bad things will happen. The default value of 1 will assign one task - to each subvolume, and there will be an equal number of subvolumes as tasks. - A value of 2 will assign two tasks to each subvolume and there will be - one-half as many subvolumes as tasks. - A value equal to the number of parallel tasks will result in each task - owning a complete copy of all the fields data, meaning each task will be - operating on the identical full volume. - Setting this to -1 automatically adjusts ``vol_ratio`` such that all tasks - are given the full volume. - * ``salt``, integer: A number that will be added to the random number generator - seed. Use this if a different random series of numbers is desired when - keeping everything else constant from this set: (MPI task count, - number of ruler lengths, ruler min/max, number of functions, - number of point pairs per ruler length). Default: 0. - * ``theta``, float: For random pairs of points, the second point is found by - traversing a distance along a ray set by the angle (phi, theta) from the - first point. To keep this angle constant, set ``theta`` to a value in the - range [0, pi]. Default = None, which will randomize theta for every pair of - points. - * ``phi``, float: Similar to theta above, but the range of values is - [0, 2*pi). Default = None, which will randomize phi for every pair of - points. - -.. _tpf_add_fcns: - -Add Functions -^^^^^^^^^^^^^ - -Each function is added to the TPF using the ``add_function`` command. -Each call must have the following inputs: - - #. The function name as previously defined. - #. A list with label(s) for the output(s) of the function. - Even if the function outputs only one value, this must be a list. - These labels are used for output. - #. A list with bools of whether or not to sqrt the output, in the same order - as the output label list. E.g. ``[True, False]``. - -The call to ``add_function`` returns a ``FcnSet`` object. For convenience, -it is best to store the output in a variable (as in the example above) so -it can be referenced later. -The functions can also be referenced through the ``TwoPointFunctions`` object -in the order in which they were added. -So would ``tpf[0]`` refer to the same thing as ``f1`` in the quick example, -above. - -.. _tpf_pdf: - -Set PDF Parameters -^^^^^^^^^^^^^^^^^^ - -Once the function is added to the TPF, the probability distribution -bins need to be defined for each using the command ``set_pdf_params``. -It has these inputs: - - * ``bin_type``, string or list of strings ("lin" or "log"): - How to evenly subdivide the bins over the given range. If the - function has multiple outputs, the input needs to be a list with equal - elements. - * ``bin_range``, list or list of lists: - Define the min/max values for the bins for the output(s) of the - function. - If there are multiple outputs, there must be an equal number of lists. - * ``bin_number``, integer or list of integers: How many bins to create over - the min/max range defined by ``bin_range`` evenly spaced by the ``bin_type`` - parameter. - If there are multiple outputs, there must be an equal number of integers. - -The memory costs associated with the PDF bins must be considered when writing -an analysis script. -There is one set of PDF bins created per function, per point separation length. -Each PDF bin costs product(bin_number)*8 bytes, where product(bin_number) is -the product of the entries in the bin_number list, and this is duplicated -on every task. -For multidimensional PDFs, the memory costs can grow very quickly. -For example, for 3 functions, each with two outputs, with 1000 point -separation lengths set for the TPF, and with 5000 PDF bins per output dimension, -the PDF bins will cost: 3*1000*(5000)^2*8=600 GB of memory *per task*! - -Note: ``bin_number`` actually specifies the number of *bin edges* to make, -rather than the number of bins to make. The number of bins will actually be -``bin_number``-1 because values are dropped into bins between the two closest -bin edge values, -and values outside the min/max bin edges are thrown away. -If precisely ``bin_number`` bins are wanted, add 1 when setting the PDF -parameters. - -.. _tpf_run: - -Run the TPF -^^^^^^^^^^^ - -The command ``run_generator()`` pulls the trigger and runs the TPF. -There are no inputs. - -After the generator runs, it will print messages like this, one per -function:: - - yt INFO 2010-03-13 12:46:54,541 Function rms_vel had 1 values too high and 4960 too low that were not binned. - -Consider changing the range of the PDF bins to reduce or eliminate un-binned -values. - -.. _tpf_output: - -Output the Results -^^^^^^^^^^^^^^^^^^ - -There are two ways to output data from the TPF for structure functions. - - #. The command ``write_out_means`` writes out a text file per function - that contains the means for each dimension of the function output - for each point separation length. - The file is named "function_name.txt", so in the example the file is named - "rms_vel.txt". - In the example above, the ``sqrt=True`` option is turned on, which square-roots - the mean values. Here is some example output for the RMS velocity example:: - - # length count RMSvdiff - 7.81250e-03 95040 8.00152e+04 - 1.24016e-02 100000 1.07115e+05 - 1.96863e-02 100000 1.53741e+05 - 3.12500e-02 100000 2.15070e+05 - 4.96063e-02 100000 2.97069e+05 - 7.87451e-02 99999 4.02917e+05 - 1.25000e-01 100000 5.54454e+05 - 1.98425e-01 100000 7.53650e+05 - 3.14980e-01 100000 9.57470e+05 - 5.00000e-01 100000 1.12415e+06 - - The ``count`` column lists the number of pair points successfully binned - at that point separation length. - - If the output is multidimensional, pass a list of bools to control the - sqrt column by column (``sqrt=[False, True]``) to ``add_function``. - For multidimensional functions, the means are calculated by first - collapsing the values in the PDF matrix in the other - dimensions, before multiplying the result by the bin edges for that output - dimension. So in the extremely simple fabricated case of: - - .. code-block:: python - - # Temperature difference bin edges - # dimension 0 - Tdiff_bins = [10, 100, 1000] - # Density difference bin edges - # dimension 1 - Ddiff_bins = [50,500,5000] - - # 2-D PDF for a point pair length of 0.05 - PDF = [ [ 0.3, 0.1], - [ 0.4, 0.2] ] - - What the PDF is recording is that there is a 30% probability of getting a - temperature difference between [10, 100), at the same time of getting a - density difference between [50, 500). There is a 40% probability for Tdiff - in [10, 100) and Ddiff in [500, 5000). The text output of this PDF is - calculated like this: - - .. code-block:: python - - # Temperature - T_PDF = PDF.sum(axis=0) - # ... which gets ... - T_PDF = [0.7, 0.3] - # Then to get the mean, multiply by the centers of the temperature bins. - means = [0.7, 0.3] * [55, 550] - # ... which gets ... - means = [38.5, 165] - mean = sum(means) - # ... which gets ... - mean = 203.5 - - # Density - D_PDF = PDF.sum(axis=1) - # ... which gets ... - D_PDF = [0.4, 0.6] - # As above... - means = [0.4, 0.6] * [275, 2750] - mean = sum(means) - # ... which gets ... - mean = 1760 - - The text file would look something like this:: - - # length count Tdiff Ddiff - 0.05 980242 2.03500e+02 1.76000e+3 - - #. The command ``write_out_arrays()`` writes the raw PDF bins, as well as the - bin edges for each output dimension to a HDF5 file named - ``function_name.h5``. - Here is example content for the RMS velocity script above:: - - $ h5ls rms_vel.h5 - bin_edges_00_RMSvdiff Dataset {1000} - bin_edges_names Dataset {1} - counts Dataset {10} - lengths Dataset {10} - prob_bins_00000 Dataset {999} - prob_bins_00001 Dataset {999} - prob_bins_00002 Dataset {999} - prob_bins_00003 Dataset {999} - prob_bins_00004 Dataset {999} - prob_bins_00005 Dataset {999} - prob_bins_00006 Dataset {999} - prob_bins_00007 Dataset {999} - prob_bins_00008 Dataset {999} - prob_bins_00009 Dataset {999} - - Every HDF5 file produced will have the datasets ``lengths``, - ``bin_edges_names``, and ``counts``. - ``lengths`` contains the list of the pair separation - lengths used for the TPF, and is identical to the first column in the - text output file. - ``bin_edges_names`` lists the name(s) of the dataset(s) that contain the bin - edge values. - ``counts`` contains the number of successfully binned point pairs for each - point separation length, and is equivalent to the second column in the - text output file. - In the HDF5 file above, the ``lengths`` dataset looks like this:: - - $ h5dump -d lengths rms_vel.h5 - HDF5 "rms_vel.h5" { - DATASET "lengths" { - DATATYPE H5T_IEEE_F64LE - DATASPACE SIMPLE { ( 10 ) / ( 10 ) } - DATA { - (0): 0.0078125, 0.0124016, 0.0196863, 0.03125, 0.0496063, 0.0787451, - (6): 0.125, 0.198425, 0.31498, 0.5 - } - } - } - - There are ten length values. ``prob_bins_00000`` is the PDF for pairs of - points separated by the first length value given, which is 0.0078125. - Points separated by 0.0124016 are recorded in ``prob_bins_00001``, and so - on. - The entries in the ``prob_bins`` datasets are the raw PDF for that function - for that point separation length. - If the function has multiple outputs, the arrays stored in the datasets - are multidimensional. - - ``bin_edges_names`` looks like this:: - - $ h5dump -d bin_edges_names rms_vel.h5 - HDF5 "rms_vel.h5" { - DATASET "bin_edges_names" { - DATATYPE H5T_STRING { - STRSIZE 22; - STRPAD H5T_STR_NULLPAD; - CSET H5T_CSET_ASCII; - CTYPE H5T_C_S1; - } - DATASPACE SIMPLE { ( 1 ) / ( 1 ) } - DATA { - (0): "/bin_edges_00_RMSvdiff" - } - } - } - - This gives the names of the datasets that contain the bin edges, in the - same order as the function output the data. - If the function outputs several items, there will be more than one - dataset listed in ``bin_edges-names``. - ``bin_edges_00_RMSvdiff`` therefore contains the (dimension 0) bin edges - as specified when the PDF parameters were set. - If there were other output fields, they would be named - ``bin_edges_01_outfield1``, ``bin_edges_02_outfield2`` respectively. - -.. _tpf_strategies: - -Strategies for Computational Efficiency ---------------------------------------- - -Here are a few recommendations that will make the function generator -run as quickly as possible, in particular when running in parallel. - - * Calculate how much memory the data fields and PDFs will require, and - figure out what fraction can fit on a single compute node. For example - (ignoring the PDF memory costs), if four data fields are required, and each - takes up 8GB of memory (as in each field has 1e9 doubles), 32GB total is - needed. If the analysis is being run on a machine with 4GB per node, - at least eight nodes must be used (but in practice it is often just under - 4GB available to applications, so more than eight nodes are needed). - The number of nodes gives the minimal number of MPI tasks to use, which - corresponds to the minimal volume decomposition required. - Benchmark tests show that the function generator runs the quickest - when each MPI task owns as much of the full volume as possible. - If this number of MPI tasks calculated above is fewer than desired due to - the number of pairs to be generated, instead of further subdividing the volume, - use the ``vol_ratio`` parameter to multiply-assign tasks to the same subvolume. - The total number of compute nodes will have to be increased because field - data is being duplicated in memory, but tests have shown that things run - faster in this mode. The bottom line: pick a vol_ratio that is as large - as possible. - - * The ideal ``comm_size`` appears to be around 1e5 or 1e6 in size. - - * If possible, write the functions using only Numpy functions and methods. - The input and output must be in array format, but the logic inside the function - need not be. However, it will run much slower if optimized methods are not used. - - * Run a few test runs before doing a large run so that the PDF parameters can - be correctly set. - - -Advanced Two Point Function Techniques --------------------------------------- - -Density Threshold -^^^^^^^^^^^^^^^^^ - -If points are to only be compared if they both are above some density threshold, -simply pass the density field to the function, and return a value -that lies outside the PDF min/max if the density is too low. -Here are the modifications to the RMS velocity example to do this that -requires a gas density of at least 1e-26 g cm^-3 at each point: - -.. code-block:: python - - def rms_vel(a, b, r1, r2, vec): - # Pick out points with only good densities - a_good = a[:,3] >= 1.e-26 - b_good = b[:,3] >= 1.e-26 - # Pick out the pairs with both good densities - both_good = np.bitwise_and(a_good, b_good) - # Operate only on the velocity columns - vdiff = a[:,0:3] - b[:,0:3] - np.power(vdiff, 2.0, vdiff) - vdiff = np.sum(vdiff, axis=1) - # Multiplying by a boolean array has the effect of multiplying by 1 for - # True, and 0 for False. This operation below will force pairs of not - # good points to zero, outside the PDF (see below), and leave good - # pairs unchanged. - vdiff *= both_good - return vdiff - - ... - tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z", "density"], - total_values=1e5, comm_size=10000, - length_number=10, length_range=[1./128, .5], - length_type="log") - - tpf.add_function(rms_vel, ['RMSvdiff'], [False]) - tpf[0].set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13], bin_number=1000) - -Because 0 is outside of the ``bin_range``, a pair of points that don't satisfy -the density requirements do not contribute to the PDF. -If density cutoffs are to be done in this fashion, the fractional volume that is -above the density threshold should be calculated first, and ``total_values`` -multiplied by the square of the inverse of this (which should be a multiplicative factor -greater than one, meaning more point pairs will be generated to compensate -for trashed points). - -Multidimensional PDFs -^^^^^^^^^^^^^^^^^^^^^ - -It is easy to modify the example above to output in multiple dimensions. In -this example, the ratio of the densities of the two points is recorded at -the same time as the velocity differences. - -.. code-block:: python - - from yt.mods import * - from yt.analysis_modules.two_point_functions.api import * - - ds = load("data0005") - - # Calculate the S in RMS velocity difference between the two points. - # Also store the ratio of densities (keeping them >= 1). - # All functions have four inputs. The first two are containers - # for field values, and the second two are the raw point coordinates - # for the point pair. The name of the function is used to name - # output files. - def rms_vel_D(a, b, r1, r2, vec): - # Operate only on the velocity columns - vdiff = a[:,0:3] - b[:,0:3] - np.power(vdiff, 2.0, vdiff) - vdiff = np.sum(vdiff, axis=1) - # Density ratio - Dratio = np.max(a[:,3]/b[:,3], b[:,3]/a[:,3]) - return [vdiff, Dratio] - - # Initialize a function generator object. - # Set the number of pairs of points to calculate, how big a data queue to - # use, the range of pair separations and how many lengths to use, - # and how to divide that range (linear or log). - tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z", "density"], - total_values=1e5, comm_size=10000, - length_number=10, length_range=[1./128, .5], - length_type="log") - - # Adds the function to the generator. - f1 = tpf.add_function(rms_vel, ['RMSvdiff', 'Dratio'], [True, False]) - - # Define the bins used to store the results of the function. - # Note that the bin edges can have different division, "lin" and "log". - # In particular, a bin edge of 0 doesn't play well with "log". - f1.set_pdf_params(bin_type=['log', 'lin'], - bin_range=[[5e4, 5.5e13], [1., 10000.]], - bin_number=[1000, 1000]) - - # Runs the functions. - tpf.run_generator() - - # This calculates the M in RMS and writes out a text file with - # the RMS values and the lengths. The R happens because sqrt=[True, False] - # in add_function. - # The file is named 'rms_vel_D.txt'. It will sqrt only the MS velocity column. - tpf.write_out_means() - # Writes out the raw PDF bins and bin edges to a HDF5 file. - # The file is named 'rms_vel_D.h5'. - tpf.write_out_arrays() - -Two-Point Correlation Functions -------------------------------- - -In a Gaussian random field of galaxies, the probability of finding a pair of -galaxies within the volumes :math:`dV_1` and :math:`dV_2` is - -.. math:: - - dP = n^2 dV_1 dV_2 - -where n is the average number density of galaxies. Real galaxies are not -distributed randomly, rather they tend to be clustered on a characteristic -length scale. -Therefore, the probability of two galaxies being paired is a function of -radius - -.. math:: - - dP = n^2 (1 + \xi(\mathbf{r}_{12})) dV_1 dV_2 - -where :math:`\xi(\mathbf{r}_{12})` gives the excess probability as a function of -:math:`\mathbf{r}_{12}`, -and is the two-point correlation function. -Values of :math:`\xi` greater than one mean galaxies are super-gaussian, -and visa-versa. -In order to use the TPF to calculate two point correlation functions, -the number of pairs of galaxies between the two dV volumes is measured. -A PDF is built that gives the probabilities of finding the number of pairs. -To find the excess probability, a function `write_out_correlation` does -something similar to `write_out_means` (above), but also normalizes by the -number density of galaxies and the dV volumes. -As an aside, a good rule of thumb is that -for galaxies, :math:`\xi(r) = (r_0/r)^{1.8}` where :math:`r_0=5` Mpc/h. - -.. image:: _images/2ptcorrelation.png - :width: 275 - :height: 192 - -It is possible to calculate the correlation function for galaxies using -the TPF using a script based on the example below. -Unlike the figure above, the volumes are spherical. -This script can be run in parallel. - -.. code-block:: python - - from yt.mods import * - from yt.utilities.kdtree import * - from yt.analysis_modules.two_point_functions.api import * - - # Specify the dataset on which we want to base our work. - ds = load('data0005') - - # Read in the halo centers of masses. - CoM = [] - data = file('HopAnalysis.out', 'r') - for line in data: - if '#' in line: continue - line = line.split() - xp = float(line[7]) - yp = float(line[8]) - zp = float(line[9]) - CoM.append(np.array([xp, yp, zp])) - data.close() - - # This is the same dV as in the formulation of the two-point correlation. - dV = 0.05 - radius = (3./4. * dV / np.pi)**(2./3.) - - # Instantiate our TPF object. - # For technical reasons (hopefully to be fixed someday) `vol_ratio` - # needs to be equal to the number of tasks used if this is run - # in parallel. A value of -1 automatically does this. - tpf = TwoPointFunctions(ds, ['x'], - total_values=1e7, comm_size=10000, - length_number=11, length_range=[2*radius, .5], - length_type="lin", vol_ratio=-1) - - # Build the kD tree of halos. This will be built on all - # tasks so it shouldn't be too large. - # All of these need to be set even if they're not used. - # Convert the data to fortran major/minor ordering - add_tree(1) - fKD.t1.pos = np.array(CoM).T - fKD.t1.nfound_many = np.empty(tpf.comm_size, dtype='int64') - fKD.t1.radius = radius - # These must be set because the function find_many_r_nearest - # does more than how we are using it, and it needs these. - fKD.t1.radius_n = 1 - fKD.t1.nn_dist = np.empty((fKD.t1.radius_n, tpf.comm_size), dtype='float64') - fKD.t1.nn_tags = np.empty((fKD.t1.radius_n, tpf.comm_size), dtype='int64') - # Makes the kD tree. - create_tree(1) - - # Remembering that two of the arguments for a function are the raw - # coordinates, we define a two-point correlation function as follows. - def tpcorr(a, b, r1, r2, vec): - # First, we will find out how many halos are within fKD.t1.radius of our - # first set of points, r1, which will be stored in fKD.t1.nfound_many. - fKD.t1.qv_many = r1.T - find_many_r_nearest(1) - nfirst = fKD.t1.nfound_many.copy() - # Second. - fKD.t1.qv_many = r2.T - find_many_r_nearest(1) - nsecond = fKD.t1.nfound_many.copy() - # Now we simply multiply these two arrays together. The rest comes later. - nn = nfirst * nsecond - return nn - - # Now we add the function to the TPF. - # ``corr_norm`` is used to normalize the correlation function. - tpf.add_function(function=tpcorr, out_labels=['tpcorr'], sqrt=[False], - corr_norm=dV**2 * len(CoM)**2) - - # And define how we want to bin things. - # It has to be linear bin_type because we want 0 to be in the range. - # The big end of bin_range should correspond to the square of the maximum - # number of halos expected inside dV in the volume. - tpf[0].set_pdf_params(bin_type='lin', bin_range=[0, 2500000], bin_number=1000) - - # Runs the functions. - tpf.run_generator() - - # Write out the data to "tpcorr_correlation.txt" - # The file has two columns, the first is radius, and the second is - # the value of \xi. - tpf.write_out_correlation() - - # Empty the kdtree - del fKD.t1.pos, fKD.t1.nfound_many, fKD.t1.nn_dist, fKD.t1.nn_tags - free_tree(1) - -If one wishes to operate on field values, rather than discrete objects like -halos, the situation is a bit simpler, but still a bit confusing. -In the example below, we find the two-point correlation of cells above -a particular density threshold. -Instead of constant-size spherical dVs, the dVs here are the sizes of the grid -cells at each end of the rulers. -Because there can be cells of different volumes when using AMR, -the number of pairs counted is actually the number of most-refined-cells -contained within the volume of the cell. -For one level of refinement, this means that a root-grid cell has the equivalent -of 8 refined grid cells in it. -Therefore, when the number of pairs are counted, it has to be normalized by -the volume of the cells. - -.. code-block:: python - - from yt.mods import * - from yt.utilities.kdtree import * - from yt.analysis_modules.two_point_functions.api import * - - # Specify the dataset on which we want to base our work. - ds = load('data0005') - - # We work in simulation's units, these are for conversion. - vol_conv = ds['cm'] ** 3 - sm = ds.index.get_smallest_dx()**3 - - # Our density limit, in gm/cm**3 - dens = 2e-31 - - # We need to find out how many cells (equivalent to the most refined level) - # are denser than our limit overall. - def _NumDens(data): - select = data["density"] >= dens - cv = data["cell_volume"][select] / vol_conv / sm - return (cv.sum(),) - def _combNumDens(data, d): - return d.sum() - add_quantity("TotalNumDens", function=_NumDens, - combine_function=_combNumDens, n_ret=1) - all = ds.all_data() - n = all.quantities["TotalNumDens"]() - - print(n,'n') - - # Instantiate our TPF object. - tpf = TwoPointFunctions(ds, ['density', 'cell_volume'], - total_values=1e5, comm_size=10000, - length_number=11, length_range=[-1, .5], - length_type="lin", vol_ratio=1) - - # Define the density threshold two point correlation function. - def dens_tpcorr(a, b, r1, r2, vec): - # We want to find out which pairs of Densities from a and b are both - # dense enough. The first column is density. - abig = (a[:,0] >= dens) - bbig = (b[:,0] >= dens) - both = np.bitwise_and(abig, bbig) - # We normalize by the volume of the most refined cells. - both = both.astype('float') - both *= a[:,1] * b[:,1] / vol_conv**2 / sm**2 - return both - - # Now we add the function to the TPF. - # ``corr_norm`` is used to normalize the correlation function. - tpf.add_function(function=dens_tpcorr, out_labels=['tpcorr'], sqrt=[False], - corr_norm=n**2 * sm**2) - - # And define how we want to bin things. - # It has to be linear bin_type because we want 0 to be in the range. - # The top end of bin_range should be 2^(2l)+1, where l is the number of - # levels, and bin_number=2^(2l)+2 - tpf[0].set_pdf_params(bin_type='lin', bin_range=[0, 2], bin_number=3) - - # Runs the functions. - tpf.run_generator() - - # Write out the data to "dens_tpcorr_correlation.txt" - # The file has two columns, the first is radius, and the second is - # the value of \xi. - tpf.write_out_correlation() diff --git a/doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb similarity index 82% rename from doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb rename to doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb index 002d9637f86..39a9429cb6f 100644 --- a/doc/source/analyzing/analysis_modules/XrayEmissionFields.ipynb +++ b/doc/source/analyzing/domain_analysis/XrayEmissionFields.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "> Note: If you came here trying to figure out how to create simulated X-ray photons and observations,\n", - " you should go [here](photon_simulator.html) instead." + " you should go [here](http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim/) instead." ] }, { @@ -35,7 +35,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "scrolled": false }, "outputs": [], @@ -67,9 +66,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (xray_fields)" @@ -85,13 +82,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sp = ds.sphere(\"c\", (2.0, \"Mpc\"))\n", - "print (sp.quantities.total_quantity(\"xray_luminosity_0.5_7.0_keV\"))" + "print (sp.quantities.total_quantity((\"gas\",\"xray_luminosity_0.5_7.0_keV\")))" ] }, { @@ -105,12 +100,12 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "scrolled": false }, "outputs": [], "source": [ - "slc = yt.SlicePlot(ds, 'z', ['xray_emissivity_0.5_7.0_keV','xray_photon_emissivity_0.5_7.0_keV'],\n", + "slc = yt.SlicePlot(ds, 'z', [('gas', 'xray_emissivity_0.5_7.0_keV'),\n", + " ('gas', 'xray_photon_emissivity_0.5_7.0_keV')],\n", " width=(0.75, \"Mpc\"))\n", "slc.show()" ] @@ -128,7 +123,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "scrolled": false }, "outputs": [], @@ -151,9 +145,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (xray_fields2)" @@ -163,22 +155,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the intensity fields:" + "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the energy intensity field:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, "scrolled": false }, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds2, \"x\", [\"xray_intensity_0.5_2.0_keV\", \"xray_photon_intensity_0.5_2.0_keV\"],\n", + "prj = yt.ProjectionPlot(ds2, \"x\", (\"gas\",\"xray_intensity_0.5_2.0_keV\"),\n", " center=\"max\", width=(40, \"kpc\"))\n", "prj.set_zlim(\"xray_intensity_0.5_2.0_keV\", 1.0e-32, 5.0e-24)\n", - "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n", "prj.show()" ] }, @@ -193,12 +183,43 @@ " abundance information from your dataset. Finally, if your dataset contains no abundance information,\n", " a primordial hydrogen mass fraction (X = 0.76) will be assumed." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, if you want to place the source at a local, non-cosmological distance, you can forego the `redshift` and `cosmology` arguments and supply a `dist` argument instead, which is either a `(value, unit)` tuple or a `YTQuantity`. Note that here the redshift is assumed to be zero. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "xray_fields3 = yt.add_xray_emissivity_field(ds2, 0.5, 2.0, dist=(1.0,\"Mpc\"), metallicity=(\"gas\", \"metallicity\"), \n", + " table_type='cloudy')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "prj = yt.ProjectionPlot(ds2, \"x\", (\"gas\", \"xray_photon_intensity_0.5_2.0_keV\"),\n", + " center=\"max\", width=(40, \"kpc\"))\n", + "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n", + "prj.show()" + ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python [default]", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -212,7 +233,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.7.1" } }, "nbformat": 4, diff --git a/doc/source/analyzing/analysis_modules/clump_finding.rst b/doc/source/analyzing/domain_analysis/clump_finding.rst similarity index 100% rename from doc/source/analyzing/analysis_modules/clump_finding.rst rename to doc/source/analyzing/domain_analysis/clump_finding.rst diff --git a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst b/doc/source/analyzing/domain_analysis/cosmology_calculator.rst similarity index 100% rename from doc/source/analyzing/analysis_modules/cosmology_calculator.rst rename to doc/source/analyzing/domain_analysis/cosmology_calculator.rst diff --git a/doc/source/analyzing/domain_analysis/index.rst b/doc/source/analyzing/domain_analysis/index.rst new file mode 100644 index 00000000000..d844a6c1a1b --- /dev/null +++ b/doc/source/analyzing/domain_analysis/index.rst @@ -0,0 +1,76 @@ +.. _domain-analysis: + +Domain-Specific Analysis +======================== + +yt powers a number modules that provide specialized analysis tools +relevant to one or a few domains. Some of these are internal to yt, +but many exist as external packages, either maintained by the yt +project or independently. + +Internal Analysis Modules +------------------------- + +These modules exist within yt itself. + +.. note:: + + As of yt version 3.5, most of the astrophysical analysis tools + have been moved to the :ref:`yt-astro` and :ref:`attic` + packages. See below for more information. + +.. toctree:: + :maxdepth: 2 + + cosmology_calculator + clump_finding + xray_emission_fields + +External Analysis Modules +------------------------- + +These are external packages maintained by the yt project. + +.. _yt-astro: + +yt Astro Analysis +^^^^^^^^^^^^^^^^^ + +Source: https://github.com/yt-project/yt_astro_analysis + +Documentation: https://yt-astro-analysis.readthedocs.io/ + +The ``yt_astro_analysis`` package houses most of the astrophysical +analysis tools that were formerly in the ``yt.analysis_modules`` +import. These include halo finding, custom halo analysis, synthetic +observations, and exports to radiative transfer codes. See +:ref:`yt_astro_analysis:modules` for a list of available +functionality. + +.. _attic: + +yt Attic +^^^^^^^^ + +Source: https://github.com/yt-project/yt_attic + +Documentation: https://yt-attic.readthedocs.io/ + +The ``yt_attic`` contains former yt analysis modules that have +fallen by the wayside. These may have small bugs or were simply +not kept up to date as yt evolved. Tools in here are looking for +a new owner and a new home. If you find something in here that +you'd like to bring back to life, either by adding it to +:ref:`yt-astro` or as part of your own package, you are welcome +to it! If you'd like any help, let us know! See +:ref:`yt_attic:attic_modules` for a list of inventory of the +attic. + +Extensions +---------- + +There are a number of independent, yt-related packages for things +like visual effects, interactive widgets, synthetic absorption +spectra, X-ray observations, and merger-trees. See the +`yt Extensions ` page for +a list of available extension packages. diff --git a/doc/source/analyzing/analysis_modules/xray_data_README.rst b/doc/source/analyzing/domain_analysis/xray_data_README.rst similarity index 100% rename from doc/source/analyzing/analysis_modules/xray_data_README.rst rename to doc/source/analyzing/domain_analysis/xray_data_README.rst diff --git a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst b/doc/source/analyzing/domain_analysis/xray_emission_fields.rst similarity index 100% rename from doc/source/analyzing/analysis_modules/xray_emission_fields.rst rename to doc/source/analyzing/domain_analysis/xray_emission_fields.rst diff --git a/doc/source/analyzing/fields.rst b/doc/source/analyzing/fields.rst index 1595d350d77..2388c040fad 100644 --- a/doc/source/analyzing/fields.rst +++ b/doc/source/analyzing/fields.rst @@ -381,6 +381,65 @@ different magnetic field units in the different :ref:`unit systems `_, ``unyt``. + +For a detailed discussion of how to use ``unyt``, we suggest taking a look at +the unyt documentation available at https://unyt.readthedocs.io/, however yt +adds additional capabilities above and beyond what is provided by ``unyt`` +alone, we describe those capabilities below. + +Selecting data from a data object +--------------------------------- + +The data returned by yt will have units attached to it. For example, let's query +a data object for the ``('gas', 'density')`` field: + + >>> import yt + >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') + >>> dd = ds.all_data() + >>> dd['gas', 'density'] + unyt_array([4.92775113e-31, 4.94005233e-31, 4.93824694e-31, ..., + 1.12879234e-25, 1.59561490e-25, 1.09824903e-24], 'g/cm**3') + +We can see how we get back a ``unyt_array`` instance. A ``unyt_array`` is a +subclass of NumPy's NDarray type that has units attached to it: + + >>> dd['gas', 'density'].units + g/cm**3 + +It is straightforward to convert data to different units: + + >>> dd['gas', 'density'].to('Msun/kpc**3') + unyt_array([7.28103608e+00, 7.29921182e+00, 7.29654424e+00, ..., + 1.66785569e+06, 2.35761291e+06, 1.62272618e+07], 'Msun/kpc**3') + +For more details about working with ``unyt_array``, see the `the documentation +`__ for ``unyt``. + +Applying Units to Data +---------------------- + +A ``unyt_array`` can be created from a list, tuple, or NumPy array using +multiplication with a ``Unit`` object. For convenience, each yt dataset has a +``units`` attribute one can use to obtain unit objects for this purpose: + + >>> data = np.random.random((100, 100)) + >>> data_with_units = data * ds.units.gram + +All units known to the dataset will be available via ``ds.units``, including +code units and comoving units. + +Derived Field Units +------------------- + +Special care often needs to be taken to ensure the result of a derived field +will come out in the correct units. The yt unit system will double-check for you +to make sure you are not accidentally making a unit conversion mistake. To see +what that means in practice, let's define a derived field corresponding to the +square root of the gas density: + + >>> import yt + >>> import numpy as np + + >>> def root_density(field, data): + ... return np.sqrt(data['gas', 'density']) + + >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') + + >>> ds.add_field(("gas", "root_density"), units="(g/cm**3)**(1/2)", + ... function=root_density, sampling_type='cell') + + >>> ad = ds.all_data() + >>> ad['gas', 'root_density'] + unyt_array([7.01979425e-16, 7.02855059e-16, 7.02726614e-16, ..., + 3.35975050e-13, 3.99451486e-13, 1.04797377e-12], 'sqrt(g)/cm**(3/2)') + +No special unit logic needs to happen inside of the function: the result of +``np.sqrt`` will have the correct units: + + >>> np.sqrt(ad['gas', 'density']) + unyt_array([7.01979425e-16, 7.02855059e-16, 7.02726614e-16, ..., + 3.35975050e-13, 3.99451486e-13, 1.04797377e-12], 'sqrt(g)/cm**(3/2)') + +One could also specify any other units that have dimensions of square root of +density and yt would automatically convert the return value of the field +function to the specified units. An error would be raised if the units are not +dimensionally equivalent to the return value of the field function. + +Code Units +---------- + +All yt datasets are associated with a "code" unit system that corresponds to +whatever unit system the data is represented in on-disk. Let's take a look at +the data in an Enzo simulation, specifically the ``("enzo", "Density")`` field: + + >>> import yt + >>> ds = yt.load('Enzo_64/DD0043/data0043') + >>> ad = ds.all_data() + >>> ad["enzo", "Density"] + unyt_array([6.74992726e-02, 6.12111635e-02, 8.92988636e-02, ..., + 9.09875931e+01, 5.66932465e+01, 4.27780263e+01], 'code_mass/code_length**3') + +we see we get back data from yt in units of ``code_mass/code_length**3``. This +is the density unit formed out of the base units of mass and length in the +internal unit system in the simulation. We can see the values of these units by +looking at the ``length_unit`` and ``mass_unit`` attributes of the dataset +object: + + >>> ds.length_unit + unyt_quantity(128, 'Mpccm/h') + >>> ds.mass_unit + unyt_quantity(4.89045159e+50, 'g') + +And we can see that both of these have values of 1 in the code unit system. + + >>> ds.length_unit.to('code_length') + unyt_quantity(1., 'code_length') + >>> ds.mass_unit.to('code_mass') + unyt_quantity(1., 'code_mass') + +In addition to ``length_unit`` and ``mass_unit``, there are also ``time_unit``, +``velocity_unit``, and ``magnetic_unit`` attributes for this dataset. Some +frontends also define a ``density_unit``, ``pressure_unit``, +``temperature_unit``, and ``specific_energy`` attribute. If these are not defined +then the corresponding unit is calculated from the base length, mass, and time unit. +Each of these attributes corresponds to a unit in the code unit system: + + >>> [un for un in dir(ds.units) if un.startswith('code')] + ['code_density', + 'code_length', + 'code_magnetic', + 'code_mass', + 'code_metallicity', + 'code_pressure', + 'code_specific_energy', + 'code_temperature', + 'code_time', + 'code_velocity'] + +You can use these unit names to convert arbitrary data into a dataset's code +unit system: + + >>> u = ds.units + >>> data = 10**-30 * u.g / u.cm**3 + >>> data.to('code_density') + unyt_quantity(0.36217187, 'code_density') + +Note how in this example we used ``ds.units`` instead of the top-level ``unyt`` +namespace or ``yt.units``. This is because the units from ``ds.units`` know +about the dataset's code unit system and can convert data into it. Unit objects +from ``unyt`` or ``yt.units`` will not know about any particular dataset's unit +system. + +Comoving units for Cosmological Simulations +------------------------------------------- + +The length unit of the dataset I used above uses a cosmological unit: + + >>> print(ds.length_unit) + 128 Mpccm/h + +In English, this says that the length unit is 128 megaparsecs in the comoving +frame, scaled as if the hubble constant were 100 km/s/Mpc. Although :math:`h` +isn't really a unit, yt treats it as one for the purposes of the unit system. + +As an aside, `Darren Croton's research note `_ +on the history, use, and interpretation of :math:`h` as it appears in the +astronomical literature is pretty much required reading for anyone who has to +deal with factors of :math:`h` every now and then. + +In yt, comoving length unit symbols are named following the pattern ``< length +unit >cm``, i.e. ``pccm`` for comoving parsec or ``mcm`` for a comoving +meter. A comoving length unit is different from the normal length unit by a +factor of :math:`(1+z)`: + + >>> u = ds.units + >>> print((1*u.Mpccm)/(1*u.Mpc)) + 0.9986088499304777 dimensionless + >>> 1 / (1 + ds.current_redshift) + 0.9986088499304776 + +As we saw before, h is treated like any other unit symbol. It has dimensionless +units, just like a scalar: + + >>> (1*u.Mpc)/(1*u.Mpc/u.h) + unyt_quantity(0.71, '(dimensionless)') + >>> ds.hubble_constant + 0.71 + +Using parsec as an example, + + * ``pc`` + Proper parsecs, :math:`\rm{pc}`. + + * ``pccm`` + Comoving parsecs, :math:`\rm{pc}/(1+z)`. + + * ``pccm/h`` + Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`. + + * ``pc/h`` + Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`. + +Overriding Code Unit Defintions +------------------------------- + +On occasion, you might have a dataset for a supported frontend that does not +have the conversions to code units accessible or you may want to change them +outright. ``yt`` provides a mechanism so that one may provide their own code +unit definitions to ``yt.load``, which override the default rules for a given +frontend for defining code units. + +This is provided through the ``units_override`` argument to ``yt.load``. We'll +use an example of an Athena dataset. First, a call to ``yt.load`` without +``units_override``: + + >>> ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk") + >>> ds.length_unit + unyt_quantity(1., 'cm') + >>> ds.mass_unit + unyt_quantity(1., 'g') + >>> ds.time_unit + unyt_quantity(1., 's') + >>> sp1 = ds1.sphere("c", (0.1, "unitary")) + >>> print(sp1["gas", "density"]) + [0.05134981 0.05134912 0.05109047 ... 0.14608461 0.14489453 0.14385277] g/cm**3 + +This particular simulation is of a galaxy cluster merger so these density values +are way, way too high. This is happening because Athena does not encode any +information about the unit system used in the simulation or the output data, so +yt cannot infer that information and must make an educated guess. In this case +it incorrectly assumes the data are in CGS units. + +However, we know `a priori` what the unit system *should* be, and we can supply +a ``units_override`` dictionary to ``yt.load`` to override the incorrect +assumptions yt is making about this dataset. Let's define: + + >>> units_override = {"length_unit": (1.0, "Mpc"), + ... "time_unit": (1.0, "Myr"), + ... "mass_unit": (1.0e14, "Msun")} + +The ``units_override`` dictionary can take the following keys: + + * ``length_unit`` + * ``time_unit`` + * ``mass_unit`` + * ``magnetic_unit`` + * ``temperature_unit`` + +and the associated values can be ``(value, "unit")`` tuples, ``unyt_quantity`` +instances, or floats (in the latter case they are assumed to have the +corresponding cgs unit). Now let's reload the dataset using our +``units_override`` dict: + + >>> ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk", + ... units_override=units_override) + >>> sp = ds.sphere("c",(0.1,"unitary")) + >>> print(sp["gas", "density"]) + [3.47531683e-28 3.47527018e-28 3.45776515e-28 ... 9.88689766e-28 + 9.80635384e-28 9.73584863e-28] g/cm**3 + +and we see how the data now have much more sensible values for a galaxy cluster +merge simulation. + +Comparing Units From Different Simulations +------------------------------------------ + +The code units from different simulations will have different conversions to +physical coordinates. This can get confusing when working with data from more +than one simulation or from a single simulation where the units change with +time. + +As an example, let's load up two enzo datasets from different redshifts in the +same cosmology simulation, one from high redshift: + + >>> ds1 = yt.load('Enzo_64/DD0002/data0002') + >>> ds1.current_redshift + 7.8843748886903 + >>> ds1.length_unit + unyt_quantity(128, 'Mpccm/h') + >>> ds1.length_unit.in_cgs() + unyt_quantity(6.26145538e+25, 'cm') + +And another from low redshift: + + >>> ds2 = yt.load('Enzo_64/DD0043/data0043') + >>> ds2.current_redshift + 0.0013930880640796 + >>> ds2.length_unit + unyt_quantity(128, 'Mpccm/h') + >>> ds2.length_unit.in_cgs() + unyt_quantity(5.55517285e+26, 'cm') + +Now despite the fact that ``'Mpccm/h'`` means different things for the two +datasets, it's still a well-defined operation to take the ratio of the two +length units: + + >>> ds2.length_unit / ds1.length_unit + unyt_quantity(8.87201539, '(dimensionless)') + +Because code units and comoving units are defined relative to a physical unit +system, ``unyt`` is able to give the correct answer here. So long as the result +comes out dimensionless or in a physical unit then the answer will be +well-defined. However, if we want the answer to come out in the internal units +of one particular dataset, additional care must be taken. For an example where +this might be an issue, let's try to compute the sum of two comoving distances +from each simulation: + + >>> d1 = 12 * ds1.units.Mpccm + >>> d2 = 12 * ds2.units.Mpccm + >>> d1 + d2 + unyt_quantity(118.46418468, 'Mpccm') + >>> d2 + d1 + unyt_quantity(13.35256754, 'Mpccm') + +So this is definitely weird - addition appears to not be associative anymore! +However, both answers are correct, the confusion is arising because ``"Mpccm"`` +is ambiguous in these expressions. In situations like this, ``unyt`` will use +the definition for units from the leftmost term in an expression, so the first +example is returning data in high-redshift comoving megaparsecs, while the +second example returns data in low-redshift comoving megaparsecs. + +Wherever possible it's best to do calculations in physical units when working +with more than one dataset. If you need to use comoving units or code units then +extra care must be taken in your code to avoid ambiguity. + diff --git a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb deleted file mode 100644 index 9cf99769b2f..00000000000 --- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb +++ /dev/null @@ -1,744 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Dimensional analysis" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The fastest way to get into the unit system is to explore the quantities that live in the `yt.units` namespace:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import meter, gram, kilogram, second, joule\n", - "print (kilogram*meter**2/second**2 == joule)\n", - "print (kilogram*meter**2/second**2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import m, kg, s, W\n", - "kg*m**2/s**3 == W" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import kilometer\n", - "three_kilometers = 3*kilometer\n", - "print (three_kilometers)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import gram, kilogram\n", - "print (gram+kilogram)\n", - "\n", - "print (kilogram+gram)\n", - "\n", - "print (kilogram/gram)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These unit symbols are all instances of a new class we've added to yt 3.0, `YTQuantity`. `YTQuantity` is useful for storing a single data point." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "type(kilogram)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We also provide `YTArray`, which can store arrays of quantities:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "arr = [3,4,5]*kilogram\n", - "\n", - "print (arr)\n", - "\n", - "print (type(arr))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Creating arrays and quantities" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Most people will interact with the new unit system using `YTArray` and `YTQuantity`. These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`. `YTArray` is intended to store array data, while `YTQuantity` is intended to store scalars in a particular unit system.\n", - "\n", - "There are two ways to create arrays and quantities. The first is to explicitly create it by calling the class constructor and supplying a unit string:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units.yt_array import YTArray\n", - "\n", - "sample_array = YTArray([1,2,3], 'g/cm**3')\n", - "\n", - "print (sample_array)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The unit string can be an arbitrary combination of metric unit names. Just a few examples:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units.yt_array import YTQuantity\n", - "from yt.units import kboltz\n", - "from numpy.random import random\n", - "import numpy as np\n", - "\n", - "print (\"Length:\")\n", - "print (YTQuantity(random(), 'm'))\n", - "print (YTQuantity(random(), 'cm'))\n", - "print (YTQuantity(random(), 'Mpc'))\n", - "print (YTQuantity(random(), 'AU'))\n", - "print ('')\n", - "\n", - "print (\"Time:\")\n", - "print (YTQuantity(random(), 's'))\n", - "print (YTQuantity(random(), 'min'))\n", - "print (YTQuantity(random(), 'hr'))\n", - "print (YTQuantity(random(), 'day'))\n", - "print (YTQuantity(random(), 'yr'))\n", - "print ('')\n", - "\n", - "print (\"Mass:\")\n", - "print (YTQuantity(random(), 'g'))\n", - "print (YTQuantity(random(), 'kg'))\n", - "print (YTQuantity(random(), 'Msun'))\n", - "print ('')\n", - "\n", - "print (\"Energy:\")\n", - "print (YTQuantity(random(), 'erg'))\n", - "print (YTQuantity(random(), 'g*cm**2/s**2'))\n", - "print (YTQuantity(random(), 'eV'))\n", - "print (YTQuantity(random(), 'J'))\n", - "print ('')\n", - "\n", - "print (\"Temperature:\")\n", - "print (YTQuantity(random(), 'K'))\n", - "print ((YTQuantity(random(), 'eV')/kboltz).in_cgs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Dimensional arrays and quantities can also be created by multiplication with another array or quantity:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import kilometer\n", - "print (kilometer)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "three_kilometers = 3*kilometer\n", - "print (three_kilometers)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When working with a YTArray with complicated units, you can use `unit_array` and `unit_quantity` to conveniently apply units to data:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "test_array = YTArray(np.random.random(20), 'erg/s')\n", - "\n", - "print (test_array)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`unit_quantity` returns a `YTQuantity` with a value of 1.0 and the same units as the array it is a attached to." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (test_array.unit_quantity)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`unit_array` returns a `YTArray` with the same units and shape as the array it is a attached to and with all values set to 1.0." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (test_array.unit_array)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These are useful when doing arithmetic:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (test_array + 1.0*test_array.unit_quantity)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (test_array + np.arange(20)*test_array.unit_array)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`. You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (test_array.uq)\n", - "\n", - "print (test_array.unit_quantity == test_array.uq)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from numpy import array_equal\n", - "\n", - "print (test_array.ua)\n", - "\n", - "print (array_equal(test_array.ua, test_array.unit_array))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Unit metadata is encoded in the `units` attribute that hangs off of `YTArray` or `YTQuantity` instances:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import kilometer, erg\n", - "\n", - "print (\"kilometer's units:\", kilometer.units)\n", - "print (\"kilometer's dimensions:\", kilometer.units.dimensions)\n", - "\n", - "print ('')\n", - "\n", - "print (\"erg's units:\", erg.units)\n", - "print (\"erg's dimensions: \", erg.units.dimensions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Arithmetic with `YTQuantity` and `YTArray`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Of course it wouldn't be very useful if all we could do is create data with units. The real power of the new unit system is that we can add, subtract, multiply, and divide using quantities and dimensional arrays:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "a = YTQuantity(3, 'cm')\n", - "b = YTQuantity(3, 'm')\n", - "\n", - "print (a+b)\n", - "print (b+a)\n", - "print ('')\n", - "\n", - "print ((a+b).in_units('ft'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "a = YTQuantity(42, 'mm')\n", - "b = YTQuantity(1, 's')\n", - "\n", - "print (a/b)\n", - "print ((a/b).in_cgs())\n", - "print ((a/b).in_mks())\n", - "print ((a/b).in_units('km/s'))\n", - "print ('')\n", - "\n", - "print (a*b)\n", - "print ((a*b).in_cgs())\n", - "print ((a*b).in_mks())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "m = YTQuantity(35, 'g')\n", - "a = YTQuantity(9.8, 'm/s**2')\n", - "\n", - "print (m*a)\n", - "print ((m*a).in_units('dyne'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import G, kboltz\n", - "\n", - "print (\"Newton's constant: \", G)\n", - "print (\"Newton's constant in MKS: \", G.in_mks(), \"\\n\")\n", - "\n", - "print (\"Boltzmann constant: \", kboltz)\n", - "print (\"Boltzmann constant in MKS: \", kboltz.in_mks())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "rho = YTQuantity(1, 'g/cm**3')\n", - "t_ff = (G*rho)**(-0.5)\n", - "\n", - "print (t_ff)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "An exception is raised if we try to do a unit operation that doesn't make any sense:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.utilities.exceptions import YTUnitOperationError\n", - "\n", - "a = YTQuantity(3, 'm')\n", - "b = YTQuantity(5, 'erg')\n", - "\n", - "try:\n", - " print (a+b)\n", - "except YTUnitOperationError as e:\n", - " print (e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A plain `ndarray` or a `YTArray` created with empty units is treated as a dimensionless quantity and can be used in situations where unit consistency allows it to be used: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "a = YTArray([1.,2.,3.], 'm')\n", - "b = np.array([2.,2.,2.])\n", - "\n", - "print (\"a: \", a)\n", - "print (\"b: \", b)\n", - "print (\"a*b: \", a*b)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "c = YTArray([2,2,2])\n", - "\n", - "print (\"c: \", c)\n", - "print (\"a*c: \", a*c)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Saving and Loading `YTArray`s to/from disk" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### HDF5" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To write to HDF5, use `write_hdf5`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')\n", - "my_temp = YTArray(np.random.random(10), 'K')\n", - "my_dens.write_hdf5(\"my_data.h5\", dataset_name=\"density\")\n", - "my_temp.write_hdf5(\"my_data.h5\", dataset_name=\"temperature\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.\n", - "\n", - "We can use the `from_hdf5` classmethod to read the data back in:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "read_dens = YTArray.from_hdf5(\"my_data.h5\", dataset_name=\"density\")\n", - "print (read_dens)\n", - "print (my_dens)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "my_vels = YTArray(np.random.normal(10), 'km/s')\n", - "info = {\"source\":\"galaxy cluster\",\"user\":\"jzuhone\"}\n", - "my_vels.write_hdf5(\"my_data.h5\", dataset_name=\"velocity\", info=info)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### ASCII" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import yt\n", - "a = YTArray(np.random.random(size=10), \"cm\")\n", - "b = YTArray(np.random.random(size=10), \"g\")\n", - "c = YTArray(np.random.random(size=10), \"s\")\n", - "yt.savetxt(\"my_data.dat\", [a,b,c], header='My cool data', footer='Data is over', delimiter=\"\\t\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%%bash \n", - "more my_data.dat" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "bb, cc = yt.loadtxt(\"my_data.dat\", usecols=(1,2), delimiter=\"\\t\")\n", - "print (bb)\n", - "print (b)\n", - "print ('')\n", - "print (cc)\n", - "print (c)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb deleted file mode 100644 index d35be58a9bf..00000000000 --- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb +++ /dev/null @@ -1,697 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the past, querying a data object with a field name returned a NumPy `ndarray` . In the new unit system, data object queries will return a `YTArray`, a subclass of `ndarray` that preserves all of the nice properties of `ndarray`, including broadcasting, deep and shallow copies, and views. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Selecting data from an object" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`YTArray` is 'unit-aware'. Let's show how this works in practice using a sample Enzo dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import yt\n", - "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n", - "\n", - "dd = ds.all_data()\n", - "maxval, maxloc = ds.find_max('density')\n", - "\n", - "dens = dd['density']" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (maxval)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dens)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mass = dd['cell_mass']\n", - "\n", - "print (\"Cell Masses in CGS: \\n\", mass, \"\\n\")\n", - "print (\"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\")\n", - "print (\"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\")\n", - "print (\"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dx = dd['dx']\n", - "print (\"Cell dx in code units: \\n\", dx, \"\\n\")\n", - "print (\"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\")\n", - "print (\"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\")\n", - "print (\"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Unit conversions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n", - "\n", - "* `in_units`\n", - "* `in_cgs`\n", - "* `in_mks`\n", - "* `in_base`\n", - "* `convert_to_units`\n", - "* `convert_to_cgs`\n", - "* `convert_to_mks`\n", - "* `convert_to_base`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first method, `in_units`, returns a copy of the array in the units denoted by a string argument:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['density'].in_units('Msun/pc**3'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`in_cgs` and `in_mks` return a copy of the array converted to CGS and MKS units, respectively:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['pressure'])\n", - "print (dd['pressure'].in_cgs())\n", - "print (dd['pressure'].in_mks())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['pressure'].in_base('imperial')) # Imperial/English base units\n", - "print (dd['pressure'].in_base('galactic')) # Base units of kpc, Msun, Myr\n", - "print (dd['pressure'].in_base('planck')) # Base units in the Planck system\n", - "print (dd['pressure'].in_base()) # defaults to cgs if no argument given" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`in_base` also takes the `\"code\"` argument to convert the `YTArray` into the base units of the dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['pressure'].in_base(\"code\")) # The IsolatedGalaxy dataset from above" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The rest of the methods do in-place conversions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dens = dd['density']\n", - "print (dens)\n", - "\n", - "dens.convert_to_units('Msun/pc**3')\n", - "print (dens)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One possibly confusing wrinkle when using in-place conversions is if you try to query `dd['density']` again, you'll find that it has been converted to solar masses per cubic parsec:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['density'])\n", - "\n", - "dens.convert_to_units('g/cm**3')\n", - "\n", - "print (dens)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n", - "\n", - "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy. This example shows how we can construct a field with density units from two other fields that have units of mass and volume:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['cell_mass'])\n", - "print (dd['cell_volume'].in_units('cm**3'))\n", - "\n", - "print ((dd['cell_mass']/dd['cell_volume']).in_cgs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Electrostatic/Electromagnetic Units" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Electromagnetic units can be a bit tricky, because the units for such quantities in different unit systems can have entirely different dimensions, even if they are meant to represent the same physical quantities. For example, in the SI system of units, current in Amperes is a fundamental unit of measure, so the unit of charge \"coulomb\" is equal to one ampere-second. On the other hand, in the Gaussian/CGS system, there is no equivalent base electromagnetic unit, and the electrostatic charge unit \"esu\" is equal to one $\\mathrm{cm^{3/2}g^{-1/2}s^{-1}}$ (which does not have any apparent physical significance). `yt` recognizes this difference:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "q1 = yt.YTArray(1.0,\"C\") # coulombs\n", - "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n", - "\n", - "print (\"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions)\n", - "print (\"units =\", q2.in_cgs().units, \", dims =\", q2.units.dimensions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "B1 = yt.YTArray(1.0,\"T\") # tesla\n", - "B2 = yt.YTArray(1.0,\"gauss\") # gauss\n", - "\n", - "print (\"units =\", B1.in_mks().units, \", dims =\", B1.units.dimensions)\n", - "print (\"units =\", B2.in_cgs().units, \", dims =\", B2.units.dimensions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To convert between these two systems, use [Unit Equivalencies](unit_equivalencies.html)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Working with views and converting to ndarray" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are two ways to convert the data into a numpy array. The most straightforward and safe way to do this is to create a copy of the array data. The following cell demonstrates four equivalent ways of doing this, in increasing degree of terseness." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "dens = dd['cell_mass']\n", - "\n", - "print (dens.to_ndarray())\n", - "print (np.array(dens))\n", - "print (dens.value)\n", - "print (dens.v)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since we have a copy of the data, we can mess with it however we wish without disturbing the original data returned by the yt data object.\n", - "\n", - "There is yet another way to return a copy of the array data in a `YTArray` or the floating-point value of a `YTQuantity`, which also allows for the possibility to convert to different units. This is done using the `to_value` method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(dens.to_value()) # Don't change units\n", - "print(dens.to_value(\"Msun\")) # Change units to solar masses\n", - "print(dens[0].to_value(\"lbm\")) # Pick the first value and change its units to pounds" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Another way to touch the raw array data is to get a _view_. A numpy view is a lightweight array interface to a memory buffer. There are four ways to create views of YTArray instances:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd['cell_mass'].ndarray_view())\n", - "print (dd['cell_mass'].view(np.ndarray))\n", - "print (dd['cell_mass'].ndview)\n", - "print (dd['cell_mass'].d)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When working with views, remember that you are touching the raw array data and no longer have any of the unit checking provided by the unit system. This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "density_values = dd['density'].d\n", - "density_values[0:10] = 0\n", - "\n", - "# The original array was updated\n", - "print (dd['density'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Round-Trip Conversions to and from Other Unit Systems" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](https://astropy.readthedocs.io/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some examples of converting from AstroPy units to yt:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "from astropy import units as u\n", - "\n", - "x = 42.0 * u.meter\n", - "y = yt.YTQuantity.from_astropy(x)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (x, type(x))\n", - "print (y, type(y))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "a = np.random.random(size=10) * u.km/u.s\n", - "b = yt.YTArray.from_astropy(a)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (a, type(a))\n", - "print (b, type(b))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "temp = dd[\"temperature\"]\n", - "atemp = temp.to_astropy()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (temp, type(temp))\n", - "print (atemp, type(atemp))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and quantities:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "from yt.units import kboltz\n", - "kb = kboltz.to_astropy()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (kboltz, type(kboltz))\n", - "print (kb, type(kb))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As a sanity check, you can show that it works round-trip:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "k1 = kboltz.to_astropy()\n", - "k2 = yt.YTQuantity.from_astropy(kb)\n", - "print(k1)\n", - "print(k2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "c = yt.YTArray.from_astropy(a)\n", - "d = c.to_astropy()\n", - "print (a == d)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also do the same thing with unitful quantities from the [Pint package](https://pint.readthedocs.org), using essentially the same procedure:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "from pint import UnitRegistry\n", - "ureg = UnitRegistry()\n", - "v = 1000.*ureg.km/ureg.s\n", - "w = yt.YTQuantity.from_pint(v)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (v, type(v))\n", - "print (w, type(w))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "ptemp = temp.to_pint()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (temp, type(temp))\n", - "print (ptemp, type(ptemp))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Defining New Units" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "yt also provides a way to define your own units. Suppose you wanted to define a new unit for \"miles per hour\", the familiar \"mph\", which is not already in yt. One can do this by calling `yt.define_unit()`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "yt.define_unit(\"mph\", (1.0, \"mile/hr\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once this unit is defined, it can be used in the same way as any other unit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from yt.units import clight\n", - "print (clight.to('mph'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to define a new unit which is prefixable (like SI units), you can set `prefixable=True` when defining the unit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from yt import YTQuantity\n", - "yt.define_unit(\"L\", (1000.0, \"cm**3\"), prefixable=True)\n", - "print (YTQuantity(1.0, \"mL\").to(\"cm**3\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`yt.define_unit()` defines new units for all yt operations. However, new units can be defined for particular datasets only as well using `ds.define_unit()`, which has the same signature:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "ds.define_unit(\"M_star\", (2.0e13, \"Msun\"))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dd = ds.all_data()\n", - "print(dd.quantities.total_mass().to(\"M_star\"))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [default]", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.1" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb deleted file mode 100644 index 8f16e1261a0..00000000000 --- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb +++ /dev/null @@ -1,433 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In yt 3.0, we want to make it easier to access \"raw\" simulation data that a code writes directly to disk. The new unit system makes it much simpler to convert back and forth between physical coordinates and the unscaled \"raw\" coordinate system used internally in the simulation code. In some cases, this conversion involves transforming to comoving coordinates, so that is also covered here." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Code units" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import yt\n", - "ds = yt.load('Enzo_64/DD0043/data0043')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object. Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "reg = ds.unit_registry\n", - "\n", - "for un in reg.keys():\n", - " if un.startswith('code_'):\n", - " fmt_tup = (un, reg.lut[un][0], str(reg.lut[un][1]))\n", - " print (\"Unit name: {:<15}\\nCGS conversion: {:<15}\\nDimensions: {:<15}\\n\".format(*fmt_tup))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "fmt_tup" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Most of the time you will not have to deal with the unit registry. For example, the conversion factors to code units are stored as attributes of the dataset object:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (\"Length unit: \", ds.length_unit)\n", - "print (\"Time unit: \", ds.time_unit)\n", - "print (\"Mass unit: \", ds.mass_unit)\n", - "print (\"Velocity unit: \", ds.velocity_unit)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Conversion factors will be supplied in CGS by default. We can also ask what the conversion factors are in code units." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (\"Length unit: \", ds.length_unit.in_units('code_length'))\n", - "print (\"Time unit: \", ds.time_unit.in_units('code_time'))\n", - "print (\"Mass unit: \", ds.mass_unit.in_units('code_mass'))\n", - "print (\"Velocity unit: \", ds.velocity_unit.in_units('code_velocity'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "as expected, all the conversion factors are unity in code units." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also play with unit conversions on `ds.domain_width`. First, we see for enzo how code length units are defined relative to the domain width:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds.domain_width" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds.domain_width.in_cgs()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds.domain_width.in_units('Mpccm/h')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Comoving units" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This last example uses a cosmological unit. In English, I asked for the domain width in comoving megaparsecs, scaled as if the hubble constant were 100 km/s/Mpc. Although $h$ isn't really a unit, yt treats it as one for the purposes of the unit system. \n", - "\n", - "As an aside, Darren Croton's [research note](https://arxiv.org/abs/1308.4150) on the history, use, and interpretation of $h$ as it appears in the astronomical literature is pretty much required reading for anyone who has to deal with factors of $h$ every now and then.\n", - "\n", - "In yt, comoving length unit symbols are named following the pattern `(length symbol)cm`, i.e. `pccm` for comoving parsec or `mcm` for a comoving meter. A comoving length unit is different from the normal length unit by a factor of $(1+z)$:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "z = ds.current_redshift\n", - " \n", - "print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpccm'))\n", - "print (1+z)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we saw before, $h$ is treated like any other unit symbol. It has `dimensionless` units, just like a scalar:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (ds.quan(1, 'Mpc')/ds.quan(1, 'Mpc/h'))\n", - "print (ds.hubble_constant)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These units can be used in readily used in plots and anywhere a length unit is appropriate in yt." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc = yt.SlicePlot(ds, 0, 'density', width=(128, 'Mpccm/h'))\n", - "slc.set_figure_size(6)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### The unit registry" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When you create a `YTArray` without referring to a unit registry, yt uses the default unit registry, which does not include code units or comoving units." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt import YTQuantity\n", - "\n", - "a = YTQuantity(3, 'cm')\n", - "\n", - "print (a.units.registry.keys())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When a dataset is loaded, yt infers conversion factors from the internal simulation unit system to the CGS unit system. These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols. For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n", - "\n", - "* `ds.arr()`\n", - "* `ds.quan()`\n", - "\n", - "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "a = ds.quan(3, 'code_length')\n", - "\n", - "print (a)\n", - "print (a.in_cgs())\n", - "print (a.in_units('Mpccm/h'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "b = ds.arr([3, 4, 5], 'Mpccm/h')\n", - "print (b)\n", - "print (b.in_cgs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Overriding Code Unit Definitions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "On occasion, you might have a dataset for a supported frontend that does not have the conversions to code units accessible (for example, Athena data) or you may want to change them outright. `yt` provides a mechanism so that one may provide their own code unit definitions to `load`, which override the default rules for a given frontend for defining code units. This is provided through the `units_override` dictionary. We'll use an example of an Athena dataset. First, a call to `load` without `units_override`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds1 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\")\n", - "print (ds1.length_unit)\n", - "print (ds1.mass_unit)\n", - "print (ds1.time_unit)\n", - "sp1 = ds1.sphere(\"c\",(0.1,\"unitary\"))\n", - "print (sp1[\"density\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is a galaxy cluster dataset, so it is not likely that the units of density are correct. We happen to know that the unit definitions are different, so we can override the units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "units_override = {\"length_unit\":(1.0,\"Mpc\"),\n", - " \"time_unit\":(1.0,\"Myr\"),\n", - " \"mass_unit\":(1.0e14,\"Msun\")}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`units_override` can take the following keys:\n", - "\n", - "* `length_unit`\n", - "* `time_unit`\n", - "* `mass_unit`\n", - "* `magnetic_unit`\n", - "* `temperature_unit`\n", - "\n", - "and the associated values can be (value, unit) tuples, `YTQuantities`, or floats (in the latter case they are assumed to have the corresponding cgs unit). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds2 = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)\n", - "print (ds2.length_unit)\n", - "print (ds2.mass_unit)\n", - "print (ds2.time_unit)\n", - "sp2 = ds2.sphere(\"c\",(0.1,\"unitary\"))\n", - "print (sp2[\"density\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This option should be used very carefully, and *only* if you know that the dataset does not provide units or that the unit definitions generated are incorrect for some reason. " - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb deleted file mode 100644 index f24b98057b3..00000000000 --- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb +++ /dev/null @@ -1,125 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets. Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional array instances produced by different unit systems. Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n", - "\n", - "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# A high redshift output from z ~ 8\n", - "import yt\n", - "\n", - "ds1 = yt.load('Enzo_64/DD0002/data0002')\n", - "print (\"z = %s\" % ds1.current_redshift)\n", - "print (\"Internal length units = %s\" % ds1.length_unit)\n", - "print (\"Internal length units in cgs = %s\" % ds1.length_unit.in_cgs())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# A low redshift output from z ~ 0\n", - "ds2 = yt.load('Enzo_64/DD0043/data0043')\n", - "print (\"z = %s\" % ds2.current_redshift)\n", - "print (\"Internal length units = %s\" % ds2.length_unit)\n", - "print (\"Internal length units in cgs = %s\" % ds2.length_unit.in_cgs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Given that these are from the same simulation in comoving units, the CGS length units are different by a factor of $(1+z_1)/(1+z_2)$:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (ds2.length_unit.in_cgs()/ds1.length_unit.in_cgs() == (1+ds1.current_redshift)/(1+ds2.current_redshift))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It's not necessary to convert to CGS units either. yt will automatically account for the fact that a comoving megaparsec in the first output is physically different compared to a comoving megaparsec in the second output." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (ds2.length_unit/ds1.length_unit)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Time series analysis is also straightforward. Since dimensional arrays and quantities carry around the conversion factors to CGS with them, we can safely pickle them, share them with other processors, or combine them without worrying about differences in unit definitions.\n", - "\n", - "The following snippet, which iterates over a time series and saves the `length_unit` quantity to a storage dictionary. This should work correctly on one core or in a script run in parallel." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import yt\n", - "yt.enable_parallelism()\n", - "\n", - "ts = yt.load(\"Enzo_64/DD????/data????\")\n", - "\n", - "storage = {}\n", - "\n", - "for sto, ds in ts.piter(storage=storage):\n", - " sto.result_id = float(ds.current_time.in_units('Gyr'))\n", - " sto.result = ds.length_unit\n", - "\n", - "if yt.is_root():\n", - " for t in sorted(storage.keys()):\n", - " print (t, storage[t].in_units('Mpc'))" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3.0 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/doc/source/analyzing/units/5)_Units_and_plotting.ipynb b/doc/source/analyzing/units/5)_Units_and_plotting.ipynb deleted file mode 100644 index f4fe9b49479..00000000000 --- a/doc/source/analyzing/units/5)_Units_and_plotting.ipynb +++ /dev/null @@ -1,185 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It's now easy to adjust the units of a field you are plotting.\n", - "\n", - "> Note: the following examples use `SlicePlot`, but the same thing should work for `ProjectionPlot`, `OffAxisSlicePlot`, and `OffAxisProjectionPlot`." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, let's create a new `SlicePlot`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import yt\n", - "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n", - "slc = yt.SlicePlot(ds, 2, 'density', center=[0.5, 0.5, 0.5], width=(15, 'kpc'))\n", - "slc.set_figure_size(6)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The units used to scale the colorbar can be adjusted by calling the `set_unit` function that is attached to the plot object. This example creates a plot of density in code units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc.set_unit('density', 'code_mass/code_length**3')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example creates a plot of gas density in solar masses per cubic parsec:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc.set_unit('density', 'Msun/pc**3')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `set_unit` function will accept any unit string that is dimensionally equivalent to the plotted field. If it is supplied a unit that is not dimensionally equivalent, it will raise an error:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.utilities.exceptions import YTUnitConversionError\n", - "\n", - "try:\n", - " slc.set_unit('density', 'Msun')\n", - "except YTUnitConversionError as e:\n", - " print (e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly, set_unit is defined for `ProfilePlot` and `PhasePlot` instances as well.\n", - "\n", - "To illustrate this point, let's first create a new `ProfilePlot`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "dd = ds.all_data()\n", - "plot = yt.ProfilePlot(dd, 'density', 'temperature', weight_field='cell_mass')\n", - "plot.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And adjust the unit of the y-axis:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plot.set_unit('density', 'Msun/pc**3')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Similarly for PhasePlot:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plot = yt.PhasePlot(dd, 'density', 'temperature', 'cell_mass')\n", - "plot.set_figure_size(6)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "plot.set_unit('cell_mass', 'Msun')\n", - "plot.set_unit('density', 'Msun/pc**3')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb deleted file mode 100644 index 62a0617daae..00000000000 --- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb +++ /dev/null @@ -1,318 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some physical quantities are directly related to other unitful quantities by a constant, but otherwise do not have the same units. To facilitate conversions between these quantities, `yt` implements a system of unit equivalencies (inspired by the [AstroPy implementation](http://docs.astropy.org/en/latest/units/equivalencies.html)). The possible unit equivalencies are:\n", - "\n", - "* `\"thermal\"`: conversions between temperature and energy ($E = k_BT$)\n", - "* `\"spectral\"`: conversions between wavelength, frequency, and energy for photons ($E = h\\nu = hc/\\lambda, c = \\lambda\\nu$)\n", - "* `\"mass_energy\"`: conversions between mass and energy ($E = mc^2$)\n", - "* `\"lorentz\"`: conversions between velocity and Lorentz factor ($\\gamma = 1/\\sqrt{1-(v/c)^2}$)\n", - "* `\"schwarzschild\"`: conversions between mass and Schwarzschild radius ($R_S = 2GM/c^2$)\n", - "* `\"compton\"`: conversions between mass and Compton wavelength ($\\lambda = h/mc$)\n", - "\n", - "The following unit equivalencies only apply under conditions applicable for an ideal gas with a constant mean molecular weight $\\mu$ and ratio of specific heats $\\gamma$:\n", - "\n", - "* `\"number_density\"`: conversions between density and number density ($n = \\rho/\\mu{m_p}$)\n", - "* `\"sound_speed\"`: conversions between temperature and sound speed for an ideal gas ($c_s^2 = \\gamma{k_BT}/\\mu{m_p}$)\n", - "\n", - "A `YTArray` or `YTQuantity` can be converted to an equivalent using `in_units` (previously described in [Fields and Unit Conversion](fields_and_unit_conversion.html)), where the unit and the equivalence name are provided as additional arguments:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import yt\n", - "from yt import YTQuantity\n", - "import numpy as np\n", - "\n", - "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n", - "\n", - "dd = ds.all_data()\n", - "\n", - "print (dd[\"temperature\"].in_units(\"erg\", equivalence=\"thermal\"))\n", - "print (dd[\"temperature\"].in_units(\"eV\", equivalence=\"thermal\"))\n", - "\n", - "# Rest energy of the proton\n", - "from yt.units import mp\n", - "E_p = mp.in_units(\"GeV\", equivalence=\"mass_energy\")\n", - "print (E_p)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Most equivalencies can go in both directions, without any information required other than the unit you want to convert to (this is not the case for the electromagnetic equivalencies, which we'll discuss later):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from yt.units import clight\n", - "v = 0.1*clight\n", - "g = v.in_units(\"dimensionless\", equivalence=\"lorentz\")\n", - "print (g)\n", - "print (g.in_units(\"c\", equivalence=\"lorentz\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The previously described `to_value` method, which works like `in_units` except that it returns a bare NumPy array or floating-point number, also accepts equivalencies:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd[\"temperature\"].to_value(\"erg\", equivalence=\"thermal\"))\n", - "print (mp.to_value(\"GeV\", equivalence=\"mass_energy\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Special Equivalencies" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Some equivalencies can take supplemental information. The `\"number_density\"` equivalence can take a custom mean molecular weight (default is $\\mu = 0.6$):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd[\"density\"].max())\n", - "print (dd[\"density\"].in_units(\"cm**-3\", equivalence=\"number_density\").max())\n", - "print (dd[\"density\"].in_units(\"cm**-3\", equivalence=\"number_density\", mu=0.75).max())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `\"sound_speed\"` equivalence optionally takes the ratio of specific heats $\\gamma$ and the mean molecular weight $\\mu$ (defaults are $\\gamma$ = 5/3, $\\mu = 0.6$):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (dd[\"temperature\"].in_units(\"km/s\", equivalence=\"sound_speed\").mean())\n", - "print (dd[\"temperature\"].in_units(\"km/s\", equivalence=\"sound_speed\", gamma=4./3., mu=0.5).mean())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These options must be used with caution, and only if you know the underlying data adheres to these assumptions!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Electromagnetic Equivalencies" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Special, one-way equivalencies exist for converting between electromagnetic units in the cgs and SI unit systems. These exist since in the cgs system, electromagnetic units are comprised of the base units of seconds, grams and centimeters, whereas in the SI system Ampere is a base unit. For example, the dimensions of charge are completely different in the two systems:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "Q1 = YTQuantity(1.0,\"C\")\n", - "Q2 = YTQuantity(1.0,\"esu\")\n", - "print (\"Q1 dims =\", Q1.units.dimensions)\n", - "print (\"Q2 dims =\", Q2.units.dimensions)\n", - "print (\"Q1 base units =\", Q1.in_mks())\n", - "print (\"Q2 base units =\", Q2.in_cgs())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To convert from a cgs unit to an SI unit, use the \"SI\" equivalency:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from yt.units import qp # the elementary charge in esu\n", - "qp_SI = qp.in_units(\"C\", equivalence=\"SI\") # convert to Coulombs\n", - "print (qp)\n", - "print (qp_SI)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To convert from an SI unit to a cgs unit, use the \"CGS\" equivalency:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "B = YTQuantity(1.0,\"T\") # magnetic field in Tesla\n", - "print (B, B.in_units(\"gauss\", equivalence=\"CGS\")) # convert to Gauss" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Equivalencies exist between the SI and cgs dimensions of charge, current, magnetic field, electric potential, and resistance. As a neat example, we can convert current in Amperes and resistance in Ohms to their cgs equivalents, and then use them to calculate the \"Joule heating\" of a conductor with resistance $R$ and current $I$:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "I = YTQuantity(1.0,\"A\")\n", - "I_cgs = I.in_units(\"statA\", equivalence=\"CGS\")\n", - "R = YTQuantity(1.0,\"ohm\")\n", - "R_cgs = R.in_units(\"statohm\", equivalence=\"CGS\")\n", - "P = I**2*R\n", - "P_cgs = I_cgs**2*R_cgs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The dimensions of current and resistance in the two systems are completely different, but the formula gives us the power dissipated dimensions of energy per time, so the dimensions and the result should be the same, which we can check:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (P_cgs.units.dimensions == P.units.dimensions)\n", - "print (P.in_units(\"W\"), P_cgs.in_units(\"W\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Determining Valid Equivalencies" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If a certain equivalence does not exist for a particular unit, then an error will be thrown:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from yt.utilities.exceptions import YTInvalidUnitEquivalence\n", - "\n", - "try:\n", - " x = v.in_units(\"angstrom\", equivalence=\"spectral\")\n", - "except YTInvalidUnitEquivalence as e:\n", - " print (e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can check if a `YTArray` has a given equivalence with `has_equivalent`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print (mp.has_equivalent(\"compton\"))\n", - "print (mp.has_equivalent(\"thermal\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To list the equivalencies available for a given `YTArray` or `YTQuantity`, use the `list_equivalencies` method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "E_p.list_equivalencies()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [default]", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.1" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/doc/source/analyzing/units/7)_Unit_Systems.ipynb b/doc/source/analyzing/units/7)_Unit_Systems.ipynb deleted file mode 100644 index c1fbcd4dfa2..00000000000 --- a/doc/source/analyzing/units/7)_Unit_Systems.ipynb +++ /dev/null @@ -1,491 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n", - "\n", - "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Unit Systems Available in yt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Several unit systems are already supplied for use within yt. They are:\n", - "\n", - "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n", - "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n", - "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n", - "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n", - "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n", - "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n", - "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n", - "\n", - "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import yt\n", - "yt.unit_system_registry[\"cgs\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n", - "\n", - "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "yt.unit_system_registry[\"mks\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also look at the imperial system:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "yt.unit_system_registry[\"imperial\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and the \"galactic\" system as well:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "yt.unit_system_registry[\"galactic\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Converting `YTArrays` to the Different Unit Systems" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Choosing a Unit System When Loading a Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n", - "\n", - "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n", - "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n", - "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n", - "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n", - "print (sp[\"angular_momentum_x\"]) # This is a derived field\n", - "print (sp[\"kinetic_energy\"]) # This is also a derived field" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n", - "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can take an `Enzo` dataset and express it in `\"galactic\"` units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n", - "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n", - "print (sp[\"density\"])\n", - "print (sp[\"pressure\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n", - "dd = ds_chombo.all_data()\n", - "print (dd[\"density\"])\n", - "print (dd[\"kinetic_energy\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Defining Fields So That They Can Use the Different Unit Systems" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n", - "def _momentum_x(field, data):\n", - " return data[\"density\"]*data[\"velocity_x\"]\n", - "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n", - "slc.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import clight\n", - "\n", - "def _rest_energy(field, data):\n", - " return data[\"cell_mass\"]*clight*clight\n", - "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n", - "\n", - "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n", - "\n", - "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n", - "sp[\"rest_energy\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Obtaining Physical Constants in a Specific Unit System" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n", - " unit_system = yt.unit_system_registry[name]\n", - " print (name, unit_system.constants.G)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "from yt.units import G\n", - "print (G.in_base(\"mks\"))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Defining Your Own Unit System" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "yt.unit_system_registry[\"small\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n", - "wacky_unit_system" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n", - "mksish_unit_system" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "small_unit_system[\"pressure\"] = \"udyne/mm**2\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "small_unit_system" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can do the same for a few other dimensionalities:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n", - "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n", - "small_unit_system[\"velocity\"] = \"cm/s\"\n", - "small_unit_system" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/analyzing/units/comoving_units_and_code_units.rst b/doc/source/analyzing/units/comoving_units_and_code_units.rst deleted file mode 100644 index aef984b7e6c..00000000000 --- a/doc/source/analyzing/units/comoving_units_and_code_units.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _comoving_units_and_code_units: - -Comoving units and code units -============================= - -.. notebook:: 3)_Comoving_units_and_code_units.ipynb - -.. _cosmological-units: - -Units for Cosmological Datasets -------------------------------- - -yt has additional capabilities to handle the comoving coordinate system used -internally in cosmological simulations. Simulations that use comoving -coordinates, all length units have three other counterparts corresponding to -comoving units, scaled comoving units, and scaled proper units. In all cases -'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length -unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc. - -To access these different units, yt has a common naming system. Scaled units are denoted by -dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving -units are denoted by appending ``cm`` to the end of the unit name. - -Using the parsec as an example, - -``pc`` - Proper parsecs, :math:`\rm{pc}`. - -``pccm`` - Comoving parsecs, :math:`\rm{pc}/(1+z)`. - -``pccm/h`` - Comoving parsecs normalized by the scaled Hubble constant, :math:`\rm{pc}/h/(1+z)`. - -``pc/h`` - Proper parsecs, normalized by the scaled Hubble constant, :math:`\rm{pc}/h`. diff --git a/doc/source/analyzing/units/comparing_units_from_different_datasets.rst b/doc/source/analyzing/units/comparing_units_from_different_datasets.rst deleted file mode 100644 index 279e1c7a1a2..00000000000 --- a/doc/source/analyzing/units/comparing_units_from_different_datasets.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. _comparing_units_from_different_datasets: - -Comparing units from different datasets -======================================= - -.. notebook:: 4)_Comparing_units_from_different_datasets.ipynb diff --git a/doc/source/analyzing/units/fields_and_unit_conversion.rst b/doc/source/analyzing/units/fields_and_unit_conversion.rst deleted file mode 100644 index 96515e9f37e..00000000000 --- a/doc/source/analyzing/units/fields_and_unit_conversion.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. _fields_and_unit_conversion: - -Fields and Unit Conversion -========================== - -.. notebook:: 2)_Fields_and_unit_conversion.ipynb - -Derived Fields --------------- - -.. This needs to be added outside the notebook since user-defined derived fields - require a 'fresh' kernel. - -The following example creates a derived field for the square root of the cell -volume. - -.. notebook-cell:: - - import yt - import numpy as np - - # Function defining the derived field - def root_cell_volume(field, data): - return np.sqrt(data['cell_volume']) - - # Load the dataset - ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044') - - # Add the field to the dataset, linking to the derived field function and - # units of the field - ds.add_field(("gas", "root_cell_volume"), units="cm**(3/2)", function=root_cell_volume) - - # Access the derived field like any other field - ad = ds.all_data() - ad['root_cell_volume'] - -No special unit logic needs to happen inside of the function - `np.sqrt` will -convert the units of the `density` field appropriately: - -.. notebook-cell:: - :skip_exceptions: - - import yt - import numpy as np - - ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044') - ad = ds.all_data() - - print(ad['cell_volume'].in_cgs()) - print(np.sqrt(ad['cell_volume'].in_cgs())) - -That said, it is necessary to specify the units in the call to the -:code:`add_field` function. Not only does this ensure the returned units -will be exactly what you expect, it also allows an in-place conversion of units, -just in case the function returns a field with dimensionally equivalent units. - -For example, let's redo the above example but ask for units of -:code:`Mpc**(3/2)`: - -.. notebook-cell:: - - import yt - import numpy as np - - def root_cell_volume(field, data): - return np.sqrt(data['cell_volume']) - - ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044') - - # Here we set the default units to Mpc^(3/2) - ds.add_field(("gas", "root_cell_volume"), units="Mpc**(3/2)", function=root_cell_volume) - - ad = ds.all_data() - ad['root_cell_volume'] diff --git a/doc/source/analyzing/units/index.rst b/doc/source/analyzing/units/index.rst deleted file mode 100644 index 3e1dc9258fc..00000000000 --- a/doc/source/analyzing/units/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _units: - -Symbolic Units -============== - -This section describes yt's symbolic unit capabilities. This is provided as -quick introduction for those who are already familiar with yt but want to learn -more about the unit system. Please see :ref:`analyzing` and :ref:`visualizing` -for more detail about querying, analyzing, and visualizing data in yt. - -Each subsection is a notebook. To open these notebooks in a "live" IPython session -and execute the documentation interactively, you need to download the repository -and start the IPython notebook. - -You will then need to navigate to :code:`$YT_GIT/doc/source/units` (where $YT_GIT -is the location of a clone of the yt git repository), and then start an -IPython notebook server: - -.. code:: bash - - $ ipython notebook - -.. warning:: The pre-filled out notebooks are *far* less fun than running them - yourself! - -Here are the notebooks, which have been filled in for inspection: - -.. toctree:: - :maxdepth: 1 - - symbolic_units - fields_and_unit_conversion - comoving_units_and_code_units - comparing_units_from_different_datasets - units_and_plotting - unit_equivalencies - unit_systems - -.. note:: - - The notebooks use sample datasets that are available for download at - https://yt-project.org/data. See :ref:`quickstart-introduction` for more - details. - -Let us know if you would like to contribute other example notebooks, or have -any suggestions for how these can be improved. diff --git a/doc/source/analyzing/units/symbolic_units.rst b/doc/source/analyzing/units/symbolic_units.rst deleted file mode 100644 index ad94edf1bcf..00000000000 --- a/doc/source/analyzing/units/symbolic_units.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _symbolic_units: - -Symbolic units: :code:`yt.units` -================================ - -.. notebook:: 1)_Symbolic_Units.ipynb - :skip_exceptions: diff --git a/doc/source/analyzing/units/unit_equivalencies.rst b/doc/source/analyzing/units/unit_equivalencies.rst deleted file mode 100644 index b09fc2a30b6..00000000000 --- a/doc/source/analyzing/units/unit_equivalencies.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _unit_equivalencies: - -Unit Equivalencies -================== - -.. notebook:: 6)_Unit_Equivalencies.ipynb - :skip_exceptions: diff --git a/doc/source/analyzing/units/unit_systems.rst b/doc/source/analyzing/units/unit_systems.rst deleted file mode 100644 index 18c23a37ab7..00000000000 --- a/doc/source/analyzing/units/unit_systems.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _unit_systems: - -Unit Systems -============ - -.. notebook:: 7)_Unit_Systems.ipynb -:skip_exceptions: diff --git a/doc/source/analyzing/units/units_and_plotting.rst b/doc/source/analyzing/units/units_and_plotting.rst deleted file mode 100644 index f2916c99e42..00000000000 --- a/doc/source/analyzing/units/units_and_plotting.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _units_and_plotting: - -Units and Plotting -================== - -.. notebook:: 5)_Units_and_plotting.ipynb - :skip_exceptions: diff --git a/doc/source/conf.py b/doc/source/conf.py index a7397afa81e..71f2c500727 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -68,9 +68,9 @@ # built documents. # # The short X.Y version. -version = '3.7-dev' +version = '4.0-dev' # The full version, including alpha/beta/rc tags. -release = '3.7-dev' +release = '4.0-dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -252,6 +252,9 @@ 'https://matplotlib.org/': None, 'https://docs.astropy.org/en/stable': None, 'https://pandas.pydata.org/pandas-docs/stable': None, + 'trident': ('https://trident.readthedocs.io/en/latest/', None), + 'yt_astro_analysis': ('https://yt-astro-analysis.readthedocs.io/en/latest/', None), + 'yt_attic': ('https://yt-attic.readthedocs.io/en/latest/', None), } if not on_rtd: diff --git a/doc/source/cookbook/Halo_Analysis.ipynb b/doc/source/cookbook/Halo_Analysis.ipynb deleted file mode 100644 index e5abf17ceec..00000000000 --- a/doc/source/cookbook/Halo_Analysis.ipynb +++ /dev/null @@ -1,434 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Full Halo Analysis" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Creating a Catalog" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import yt\n", - "from yt.analysis_modules.halo_analysis.api import *\n", - "import tempfile\n", - "import shutil\n", - "import os\n", - "\n", - "# Create temporary directory for storing files\n", - "tmpdir = tempfile.mkdtemp()\n", - "\n", - "# Load the data set with the full simulation information\n", - "data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Load the rockstar data files\n", - "halos_ds = yt.load('rockstar_halos/halos_0.0.bin')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Instantiate a catalog using those two parameter files\n", - "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n", - " output_dir=os.path.join(tmpdir, 'halo_catalog'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Filter out less massive halos\n", - "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Finding Radial Profiles" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n", - "hc.add_callback(\"sphere\", factor=2.0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cumulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n", - "hc.add_callback(\"profile\", [\"radius\"],\n", - " [(\"gas\", \"overdensity\")],\n", - " weight_field=\"cell_volume\", \n", - " accumulation=True,\n", - " storage=\"virial_quantities_profiles\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Define a virial radius for the halo.\n", - "hc.add_callback(\"virial_quantities\", [\"radius\"], \n", - " profile_storage = \"virial_quantities_profiles\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have calculated the virial radius, we delete the profiles we used to find it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc.add_callback('delete_attribute','virial_quantities_profiles')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc.add_callback('sphere', radius_field='radius_200', factor=5,\n", - " field_parameters=dict(virial_radius=('quantity', 'radius_200')))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc.add_callback('profile', 'virial_radius_fraction', [('gas','temperature')],\n", - " storage='virial_profiles',\n", - " weight_field='cell_mass', \n", - " accumulation=False, output_dir='profiles')\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "# Save the profiles\n", - "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc.create()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Reloading HaloCatalogs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "halos_ds = yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n", - "\n", - "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n", - " output_dir=os.path.join(tmpdir, 'halo_catalog'))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " Just as profiles are saved separately through the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc_reloaded.add_callback('load_profiles', storage='virial_profiles',\n", - " output_dir='profiles')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Calling `load` is the equivalent of calling `create` earlier, but defaults to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "hc_reloaded.load()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Plotting Radial Profiles" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "halo = hc_reloaded.halo_list[0]\n", - "\n", - "radius = halo.virial_profiles[u\"('index', 'virial_radius_fraction')\"]\n", - "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]\n", - "\n", - "# Remove output files, that are no longer needed\n", - "shutil.rmtree(tmpdir)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "plt.plot(np.array(radius), np.array(temperature))\n", - "\n", - "plt.semilogy()\n", - "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n", - "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n", - "\n", - "plt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.5.1" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/doc/source/cookbook/calculating_information.rst b/doc/source/cookbook/calculating_information.rst index a9e3804b64d..0872b814f93 100644 --- a/doc/source/cookbook/calculating_information.rst +++ b/doc/source/cookbook/calculating_information.rst @@ -56,16 +56,6 @@ information. .. yt_cookbook:: simulation_analysis.py -Smoothed Fields -~~~~~~~~~~~~~~~ - -This recipe demonstrates how to create a smoothed field, -corresponding to a user-created derived field, using the -:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method. -See :ref:`gadget-notebook` for how to work with Gadget data. - -.. yt_cookbook:: smoothed_field.py - .. _cookbook-time-series-analysis: diff --git a/doc/source/cookbook/complex_plots.rst b/doc/source/cookbook/complex_plots.rst index 4646200a871..11a1d157c6d 100644 --- a/doc/source/cookbook/complex_plots.rst +++ b/doc/source/cookbook/complex_plots.rst @@ -15,6 +15,73 @@ See :ref:`slice-plots` for more information. .. yt_cookbook:: multi_width_image.py +.. _image-resolution-primer: + +Varying the resolution of an image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This illustrates the various parameters that control the resolution +of an image, including the (deprecated) refinement level, the size of +the :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`, +and the number of pixels in the output image. + +In brief, there are three parameters that control the final resolution, +with a fourth entering for particle data that is deposited onto a mesh +(i.e. pre-4.0). Those are: + +1. `buff_size`, which can be altered with +:meth:`~yt.visualization.plot_window.PlotWindow.set_buff_size`, which +is inherited by +:class:`~yt.visualization.plot_window.AxisAlignedSlicePlot`, +:class:`~yt.visualization.plot_window.OffAxisSlicePlot`, +:class:`~yt.visualization.plot_window.ProjectionPlot`, and +:class:`~yt.visualization.plot_window.OffAxisProjectionPlot`. This +controls the number of resolution elements in the +:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`, +which can be thought of as the number of individually colored +squares (on a side) in a 2D image. `buff_size` can be set +after creating the image with +:meth:`~yt.visualization.plot_window.PlotWindow.set_buff_size`, +or during image creation with the `buff_size` argument to any +of the four preceding classes. + +2. `figure_size`, which can be altered with either +:meth:`~yt.visualization.plot_container.PlotContainer.set_figure_size` +or with :meth:`~yt.visualization.plot_container.PlotWindow.set_window_size` +(the latter simply calls +:meth:`~yt.visualization.plot_container.PlotContainer.set_figure_size`), +or can be set during image creation with the `window_size` argument. +This sets the size of the final image (including the visualization and, +if applicable, the axes and colorbar as well) in inches. + +3. `dpi`, i.e. the dots-per-inch in your final file, which can also +be thought of as the actual resolution of your image. This can +only be set on save via the `mpl_kwargs` parameter to +:meth:`~yt.visualization.plot_container.PlotContainer.save`. The +`dpi` and `figure_size` together set the true resolution of your +image (final image will be `dpi` :math:`*` `figure_size` pixels on a +side), so if these are set too low, then your `buff_size` will not +matter. On the other hand, increasing these without increasing +`buff_size` accordingly will simply blow up your resolution +elements to fill several real pixels. + +4. (only for meshed particle data) `n_ref`, the maximum nubmer of +particles in a cell in the oct-tree allowed before it is refined +(removed in yt-4.0 as particle data is no longer deposited onto +an oct-tree). For particle data, `n_ref` effectively sets the +underlying resolution of your simulation. Regardless, for either +grid data or deposited particle data, your image will never be +higher resolution than your simulation data. In other words, +if you are visualizing a region 50 kpc across that includes +data that reaches a resolution of 100 pc, then there's no reason +to set a `buff_size` (or a `dpi` :math:`*` `figure_size`) above +50 kpc/ 100 pc = 500. + +The below script demonstrates how each of these can be varied. + +.. yt_cookbook:: image_resolution.py + + Multipanel with Axes Labels ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/cookbook/cosmological_analysis.rst b/doc/source/cookbook/cosmological_analysis.rst deleted file mode 100644 index a56b692e12a..00000000000 --- a/doc/source/cookbook/cosmological_analysis.rst +++ /dev/null @@ -1,85 +0,0 @@ -Cosmological Analysis ---------------------- - -These scripts demonstrate some basic and more advanced analysis that can be -performed on cosmological simulation datasets. Most of the following -recipes are derived from functionality in yt's :ref:`analysis-modules`. - -Plotting Halos -~~~~~~~~~~~~~~ - -This is a mechanism for plotting circles representing identified particle halos -on an image. -See :ref:`halo-analysis` and :ref:`annotate-halos` for more information. - -.. yt_cookbook:: halo_plotting.py - -.. _cookbook-rockstar-nested-grid: - -Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The version of Rockstar installed with yt does not have the capability -to work on datasets with particles of different masses. Unfortunately, -many simulations possess particles of different masses, notably cosmological -zoom datasets. This recipe uses Rockstar in two different ways to generate a -HaloCatalog from the highest resolution dark matter particles (the ones -inside the zoom region). It then overlays some of those halos on a projection -as a demonstration. See :ref:`rockstar` and :ref:`annotate-halos` for -more information. - -.. yt_cookbook:: rockstar_nest.py - -.. _cookbook-halo_finding: - -Halo Profiling and Custom Analysis -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This script demonstrates the use of the halo catalog to create radial -profiles for each halo in a cosmological dataset. -See :ref:`halo_catalog` for more information. - -.. yt_cookbook:: halo_profiler.py - -.. _cookbook-light_cone: - -Light Cone Projection -~~~~~~~~~~~~~~~~~~~~~ - -This script creates a light cone projection, a synthetic observation -that stacks together projections from multiple datasets to extend over -a given redshift interval. -See :ref:`light-cone-generator` for more information. - -.. yt_cookbook:: light_cone_projection.py - -.. _cookbook-light_ray: - -Light Ray -~~~~~~~~~ - -This script demonstrates how to make a synthetic quasar sight line that -extends over multiple datasets and can be used to generate a synthetic -absorption spectrum. -See :ref:`light-ray-generator` and :ref:`absorption_spectrum` for more information. - -.. yt_cookbook:: light_ray.py - -.. _cookbook-single-dataset-light-ray: - -Single Dataset Light Ray -~~~~~~~~~~~~~~~~~~~~~~~~ - -This script demonstrates how to make a light ray from a single dataset. - -.. yt_cookbook:: single_dataset_light_ray.py - -Creating and Fitting Absorption Spectra -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This script demonstrates how to use light rays to create corresponding -absorption spectra and then fit the spectra to find absorbing -structures. -See :ref:`light-ray-generator` and :ref:`absorption_spectrum` for more information. - -.. yt_cookbook:: fit_spectrum.py diff --git a/doc/source/cookbook/fit_spectrum.py b/doc/source/cookbook/fit_spectrum.py deleted file mode 100644 index 3fc11c44851..00000000000 --- a/doc/source/cookbook/fit_spectrum.py +++ /dev/null @@ -1,104 +0,0 @@ -import yt -from yt.analysis_modules.cosmological_observation.light_ray.api import LightRay -from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum -from yt.analysis_modules.absorption_spectrum.api import generate_total_fit - -# Define a field to simulate OVI based on a constant relationship to HI -# Do *NOT* use this for science, because this is not how OVI actually behaves; -# it is just an example. - -def _OVI_number_density(field, data): - return data['H_number_density']*2.0 - -# Define a function that will accept a ds and add the new field -# defined above. This will be given to the LightRay below. -def setup_ds(ds): - ds.add_field(("gas","O_p5_number_density"), - function=_OVI_number_density, - units="cm**-3", sampling_type="cell") - -# Define species and associated parameters to add to continuum -# Parameters used for both adding the transition to the spectrum -# and for fitting -# Note that for single species that produce multiple lines -# (as in the OVI doublet), 'numLines' will be equal to the number -# of lines, and f,gamma, and wavelength will have multiple values. - -HI_parameters = {'name': 'HI', - 'field': 'H_number_density', - 'f': [.4164], - 'Gamma': [6.265E8], - 'wavelength': [1215.67], - 'mass': 1.00794, - 'numLines': 1, - 'maxN': 1E22, 'minN': 1E11, - 'maxb': 300, 'minb': 1, - 'maxz': 6, 'minz': 0, - 'init_b': 30, - 'init_N': 1E14} - -OVI_parameters = {'name': 'OVI', - 'field': 'O_p5_number_density', - 'f': [.1325, .06580], - 'Gamma': [4.148E8, 4.076E8], - 'wavelength': [1031.9261, 1037.6167], - 'mass': 15.9994, - 'numLines': 2, - 'maxN': 1E17, 'minN': 1E11, - 'maxb': 300, 'minb': 1, - 'maxz': 6, 'minz': 0, - 'init_b': 20, - 'init_N': 1E12} - -species_dicts = {'HI': HI_parameters, 'OVI': OVI_parameters} - -# Create a LightRay object extending from z = 0 to z = 0.1 -# and use only the redshift dumps. -lr = LightRay('enzo_cosmology_plus/AMRCosmology.enzo', - 'Enzo', 0.0, 0.1, - use_minimum_datasets=True, - time_data=False - ) - -# Get all fields that need to be added to the light ray -fields = ['temperature'] -for s, params in species_dicts.items(): - fields.append(params['field']) - -# Make a light ray, and set njobs to -1 to use one core -# per dataset. -lr.make_light_ray(seed=123456780, - solution_filename='lightraysolution.txt', - data_filename='lightray.h5', - fields=fields, setup_function=setup_ds, - njobs=-1) - -# Create an AbsorptionSpectrum object extending from -# lambda = 900 to lambda = 1800, with 10000 pixels -sp = AbsorptionSpectrum(900.0, 1400.0, 50000) - -# Iterate over species -for s, params in species_dicts.items(): - # Iterate over transitions for a single species - for i in range(params['numLines']): - # Add the lines to the spectrum - sp.add_line(s, params['field'], - params['wavelength'][i], params['f'][i], - params['Gamma'][i], params['mass'], - label_threshold=1.e10) - - -# Make and save spectrum -wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - -# Define order to fit species in -order_fits = ['OVI', 'HI'] - -# Fit spectrum and save fit -fitted_lines, fitted_flux = generate_total_fit(wavelength, - flux, order_fits, species_dicts, - output_file='spectrum_fit.h5') diff --git a/doc/source/cookbook/halo_analysis_example.rst b/doc/source/cookbook/halo_analysis_example.rst deleted file mode 100644 index 08e9c978624..00000000000 --- a/doc/source/cookbook/halo_analysis_example.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. _halo-analysis-example: - -Worked Example of Halo Analysis -------------------------------- - -.. notebook:: Halo_Analysis.ipynb diff --git a/doc/source/cookbook/halo_plotting.py b/doc/source/cookbook/halo_plotting.py index 2aceafcc557..c4ee65d7243 100644 --- a/doc/source/cookbook/halo_plotting.py +++ b/doc/source/cookbook/halo_plotting.py @@ -1,5 +1,4 @@ import yt -from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog # Load the dataset ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") @@ -7,11 +6,7 @@ # Load the halo list from a rockstar output for this dataset halos = yt.load('rockstar_halos/halos_0.0.bin') -# Create the halo catalog from this halo list -hc = HaloCatalog(halos_ds=halos) -hc.load() - # Create a projection with the halos overplot on top p = yt.ProjectionPlot(ds, "x", "density") -p.annotate_halos(hc) +p.annotate_halos(halos) p.save() diff --git a/doc/source/cookbook/halo_profiler.py b/doc/source/cookbook/halo_profiler.py deleted file mode 100644 index 0a8421f892b..00000000000 --- a/doc/source/cookbook/halo_profiler.py +++ /dev/null @@ -1,35 +0,0 @@ -import yt -from yt.analysis_modules.halo_analysis.api import HaloCatalog - -# Load the data set with the full simulation information -# and rockstar halos -data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') -halos_ds = yt.load('rockstar_halos/halos_0.0.bin') - -# Instantiate a catalog using those two parameter files -hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds) - -# Filter out less massive halos -hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun") - -# This recipe creates a spherical data container, computes -# radial profiles, and calculates r_200 and M_200. -hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"]) - -# Create a sphere container with radius 5x r_200. -field_params = dict(virial_radius=('quantity', 'radius_200')) -hc.add_callback('sphere', radius_field='radius_200', factor=5, - field_parameters=field_params) - -# Compute profiles of T vs. r/r_200 -hc.add_callback('profile', ['virial_radius_fraction'], - [('gas', 'temperature')], - storage='virial_profiles', - weight_field='cell_mass', - accumulation=False, output_dir='profiles') - -# Save the profiles -hc.add_callback("save_profiles", storage="virial_profiles", - output_dir="profiles") - -hc.create() diff --git a/doc/source/cookbook/image_resolution.py b/doc/source/cookbook/image_resolution.py new file mode 100644 index 00000000000..f3a02c367c5 --- /dev/null +++ b/doc/source/cookbook/image_resolution.py @@ -0,0 +1,64 @@ +import yt +import numpy as np + +# Load the dataset. We'll work with a some Gadget data to illustrate all +# the different ways in which the effective resolution can vary. Specifically, +# we'll use the GadgetDiskGalaxy dataset available at +# http://yt-project.org/data/GadgetDiskGalaxy.tar.gz + +# load the data with a refinement criteria of 2 particle per cell +# n.b. -- in yt-4.0, n_ref no longer exists as the data is no longer +# deposited only a grid. At present (03/15/2019), there is no way to +# handle non-gas data in Gadget snapshots, though that is work in progress +if int(yt.__version__[0]) < 4: + # increasing n_ref will result in a "lower resolution" (but faster) image, + # while decreasing it will go the opposite way + ds = yt.load("GadgetDiskGalaxy/snapshot_200.hdf5", n_ref=16) +else: + ds = yt.load("GadgetDiskGalaxy/snapshot_200.hdf5") + +# Create projections of the density (max value in each resolution element in the image): +prj = yt.ProjectionPlot(ds, "x", ("gas", "density"), method='mip', center='max', width=(100, 'kpc')) + +# nicen up the plot by using a better interpolation: +plot = prj.plots[list(prj.plots)[0]] +ax = plot.axes +img = ax.images[0] +img.set_interpolation('bicubic') + +# nicen up the plot by setting the background color to the minimum of the colorbar +prj.set_background_color(('gas', 'density')) + +# vary the buff_size -- the number of resolution elements in the actual visualization +# set it to 2000x2000 +buff_size = 2000 +prj.set_buff_size(buff_size) + +# set the figure size in inches +figure_size = 10 +prj.set_figure_size(figure_size) + +# if the image does not fill the plot (as is default, since the axes and +# colorbar contribute as well), then figuring out the proper dpi for a given +# buff_size and figure_size is non-trivial -- it requires finding the bbox +# for the actual image: +bounding_box = ax.get_position() +# we're going to scale to the larger of the two sides +image_size = figure_size * max([bounding_box.width, bounding_box.height]) +# now save with a dpi that's scaled to the buff_size: +dpi = np.rint(np.ceil(buff_size / image_size)) +prj.save('with_axes_colorbar.png', mpl_kwargs=dict(dpi=dpi)) + +# in the case where the image fills the entire plot (i.e. if the axes and colorbar +# are turned off), it's trivial to figure out the correct dpi from the buff_size and +# figure_size (or vice versa): + +# hide the colorbar: +prj.hide_colorbar() + +# hide the axes, while still keeping the background color correct: +prj.hide_axes(draw_frame=True) + +# save with a dpi that makes sense: +dpi = np.rint(np.ceil(buff_size / figure_size)) +prj.save('no_axes_colorbar.png', mpl_kwargs=dict(dpi=dpi)) \ No newline at end of file diff --git a/doc/source/cookbook/index.rst b/doc/source/cookbook/index.rst index 1908dfd4ac0..39cdae2f2d2 100644 --- a/doc/source/cookbook/index.rst +++ b/doc/source/cookbook/index.rst @@ -29,7 +29,6 @@ Example Scripts simple_plots calculating_information complex_plots - cosmological_analysis constructing_data_objects .. _example-notebooks: @@ -44,7 +43,6 @@ Example Notebooks gadget_notebook owls_notebook ../visualizing/transfer_function_helper - ../analyzing/analysis_modules/sunyaev_zeldovich fits_radio_cubes fits_xray_images geographic_projections diff --git a/doc/source/cookbook/light_cone_projection.py b/doc/source/cookbook/light_cone_projection.py deleted file mode 100644 index 50760d07ee8..00000000000 --- a/doc/source/cookbook/light_cone_projection.py +++ /dev/null @@ -1,37 +0,0 @@ -import yt -from yt.analysis_modules.cosmological_observation.api import \ - LightCone - -# Create a LightCone object extending from z = 0 to z = 0.1. - -# We have already set up the redshift dumps to be -# used for this, so we will not use any of the time -# data dumps. -lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo', - 'Enzo', 0., 0.1, - observer_redshift=0.0, - time_data=False) - -# Calculate a randomization of the solution. -lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt") - -# Choose the field to be projected. -field = 'szy' - -# Use the LightCone object to make a projection with a 600 arcminute -# field of view and a resolution of 60 arcseconds. -# Set njobs to -1 to have one core work on each projection -# in parallel. -lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field, - weight_field=None, - save_stack=True, - save_final_image=True, - save_slice_images=True, - njobs=-1) - -# By default, the light cone projections are kept in the LC directory, -# but this moves them back to the current directory so that they're rendered -# in our cookbook. -import shutil, glob -for file in glob.glob('LC/*png'): - shutil.move(file, '.') diff --git a/doc/source/cookbook/light_ray.py b/doc/source/cookbook/light_ray.py deleted file mode 100644 index 3ef99de5f62..00000000000 --- a/doc/source/cookbook/light_ray.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import yt -from yt.analysis_modules.cosmological_observation.api import \ - LightRay - -# Create a directory for the light rays -if not os.path.isdir("LR"): - os.mkdir('LR') - -# Create a LightRay object extending from z = 0 to z = 0.1 -# and use only the redshift dumps. -lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", - 'Enzo', 0.0, 0.1, - use_minimum_datasets=True, - time_data=False) - -# Make a light ray, and set njobs to -1 to use one core -# per dataset. -lr.make_light_ray(seed=123456789, - solution_filename='LR/lightraysolution.txt', - data_filename='LR/lightray.h5', - fields=['temperature', 'density'], - njobs=-1) - -# Optionally, we can now overplot the part of this ray that intersects -# one output from the source dataset in a ProjectionPlot -ds = yt.load('enzo_tiny_cosmology/RD0004/RD0004') -p = yt.ProjectionPlot(ds, 'z', 'density') -p.annotate_ray(lr) -p.save() diff --git a/doc/source/cookbook/particle_filter.py b/doc/source/cookbook/particle_filter.py index c55fff9558a..415a3946d22 100644 --- a/doc/source/cookbook/particle_filter.py +++ b/doc/source/cookbook/particle_filter.py @@ -48,8 +48,11 @@ def stars_old(pfilter, data): print("Mass of old stars = %g Msun" % mass_old) # Generate 4 projections: gas density, young stars, medium stars, old stars -fields = [('gas', 'density'), ('deposit', 'stars_young_cic'), - ('deposit', 'stars_medium_cic'), ('deposit', 'stars_old_cic')] - -prj = yt.ProjectionPlot(ds, 'z', fields, center="max", width=(100, 'kpc')) -prj.save() +fields = [('stars_young', 'particle_mass'), + ('stars_medium', 'particle_mass'), + ('stars_old', 'particle_mass')] + +prj1 = yt.ProjectionPlot(ds, 'z', ("gas", "density"), center="max", width=(100, "kpc")) +prj1.save() +prj2 = yt.ParticleProjectionPlot(ds, 'z', fields, center="max", width=(100, 'kpc')) +prj2.save() diff --git a/doc/source/cookbook/rockstar_nest.py b/doc/source/cookbook/rockstar_nest.py deleted file mode 100644 index 47293aaa094..00000000000 --- a/doc/source/cookbook/rockstar_nest.py +++ /dev/null @@ -1,76 +0,0 @@ -# You must run this job in parallel. -# There are several mpi flags which can be useful in order for it to work OK. -# It requires at least 3 processors in order to run because of the way in which -# rockstar divides up the work. Make sure you have mpi4py installed as per -# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt - -# Usage: mpirun -np --mca btl ^openib python this_script.py - -import yt -from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog -from yt.data_objects.particle_filters import add_particle_filter -from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder -yt.enable_parallelism() # rockstar halofinding requires parallelism - -# Create a dark matter particle filter -# This will be code dependent, but this function here is true for enzo - -def DarkMatter(pfilter, data): - filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2 - return filter - -add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \ - requires=["particle_type"]) - -# First, we make sure that this script is being run using mpirun with -# at least 3 processors as indicated in the comments above. -assert(yt.communication_system.communicators[-1].size >= 3) - -# Load the dataset and apply dark matter filter -fn = "Enzo_64/DD0043/data0043" -ds = yt.load(fn) -ds.add_particle_filter('dark_matter') - -# Determine highest resolution DM particle mass in sim by looking -# at the extrema of the dark_matter particle_mass field. -ad = ds.all_data() -min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0] - -# Define a new particle filter to isolate all highest resolution DM particles -# and apply it to dataset -def MaxResDarkMatter(pfilter, data): - return data["particle_mass"] <= 1.01 * min_dm_mass - -add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \ - filtered_type='dark_matter', requires=["particle_mass"]) -ds.add_particle_filter('max_res_dark_matter') - -# If desired, we can see the total number of DM and High-res DM particles -#if yt.is_root(): -# print("Simulation has %d DM particles." % -# ad['dark_matter','particle_type'].shape) -# print("Simulation has %d Highest Res DM particles." % -# ad['max_res_dark_matter', 'particle_type'].shape) - -# Run the halo catalog on the dataset only on the highest resolution dark matter -# particles -hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \ - finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'}) -hc.create() - -# Or alternatively, just run the RockstarHaloFinder and later import the -# output file as necessary. You can skip this step if you've already run it -# once, but be careful since subsequent halo finds will overwrite this data. -#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter") -#rhf.run() -# Load the halo list from a rockstar output for this dataset -# Create a projection with the halos overplot on top -#halos = yt.load('rockstar_halos/halos_0.0.bin') -#hc = HaloCatalog(halos_ds=halos) -#hc.load() - -# Regardless of your method of creating the halo catalog, use it to overplot the -# halos on a projection. -p = yt.ProjectionPlot(ds, "x", "density") -p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2) -p.save() diff --git a/doc/source/cookbook/simple_1d_line_plot.py b/doc/source/cookbook/simple_1d_line_plot.py index 9663df74483..572b7584fe0 100644 --- a/doc/source/cookbook/simple_1d_line_plot.py +++ b/doc/source/cookbook/simple_1d_line_plot.py @@ -5,7 +5,7 @@ # Create a line plot of the variables 'u' and 'v' with 1000 sampling points evenly spaced # between the coordinates (0, 0, 0) and (0, 1, 0) -plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0, 0, 0), (0, 1, 0), 1000) +plot = yt.LinePlot(ds, [('all', 'v'), ('all', 'u')], (0., 0., 0.), (0., 1., 0.), 1000) # Add a legend plot.annotate_legend(('all', 'v')) diff --git a/doc/source/cookbook/single_dataset_light_ray.py b/doc/source/cookbook/single_dataset_light_ray.py deleted file mode 100644 index 2fb979b839c..00000000000 --- a/doc/source/cookbook/single_dataset_light_ray.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import yt -from yt.analysis_modules.cosmological_observation.api import \ - LightRay - -ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") -lr = LightRay(ds) - -# With a single dataset, a start_position and -# end_position or trajectory must be given. -# These positions can be defined as xyz coordinates, -# but here we just use the two opposite corners of the -# simulation box. Alternatively, trajectory should -# be given as (r, theta, phi) -lr.make_light_ray(start_position=ds.domain_left_edge, - end_position=ds.domain_right_edge, - solution_filename='lightraysolution.txt', - data_filename='lightray.h5', - fields=['temperature', 'density']) - -# Optionally, we can now overplot this ray on a projection of the source -# dataset -p = yt.ProjectionPlot(ds, 'z', 'density') -p.annotate_ray(lr) -p.save() diff --git a/doc/source/cookbook/smoothed_field.py b/doc/source/cookbook/smoothed_field.py deleted file mode 100644 index 5885eead6a7..00000000000 --- a/doc/source/cookbook/smoothed_field.py +++ /dev/null @@ -1,48 +0,0 @@ -import yt - -# Load a Gadget dataset following the demonstration notebook. -fname = 'GadgetDiskGalaxy/snapshot_200.hdf5' - -unit_base = {'UnitLength_in_cm' : 3.08568e+21, - 'UnitMass_in_g' : 1.989e+43, - 'UnitVelocity_in_cm_per_s' : 100000} - -bbox_lim = 1e5 # kpc - -bbox = [[-bbox_lim, bbox_lim], - [-bbox_lim, bbox_lim], - [-bbox_lim, bbox_lim]] - -ds = yt.load(fname, unit_base=unit_base, bounding_box=bbox) - -# Create a derived field, the metal density. -def _metal_density(field, data): - density = data['PartType0', 'Density'] - Z = data['PartType0', 'metallicity'] - return density * Z - -# Add it to the dataset. -ds.add_field(('PartType0', 'metal_density'), function=_metal_density, - units="g/cm**3", particle_type=True) - - -# Add the corresponding smoothed field to the dataset. -from yt.fields.particle_fields import add_volume_weighted_smoothed_field - -add_volume_weighted_smoothed_field('PartType0', 'Coordinates', 'Masses', - 'SmoothingLength', 'Density', - 'metal_density', ds.field_info) - -# Define the region where the disk galaxy is. (See the Gadget notebook for -# details. Here I make the box a little larger than needed to eliminate the -# margin effect.) -center = ds.arr([31996, 31474, 28970], "code_length") -box_size = ds.quan(250, "code_length") -left_edge = center - box_size/2*1.1 -right_edge = center + box_size/2*1.1 -box = ds.box(left_edge=left_edge, right_edge=right_edge) - -# And make a projection plot! -yt.ProjectionPlot(ds, 'z', - ('deposit', 'PartType0_smoothed_metal_density'), - center=center, width=box_size, data_source=box).save() diff --git a/doc/source/developing/building_the_docs.rst b/doc/source/developing/building_the_docs.rst index 9bfc3df69f3..163629af8e3 100644 --- a/doc/source/developing/building_the_docs.rst +++ b/doc/source/developing/building_the_docs.rst @@ -52,7 +52,7 @@ functionality and pare it down to its minimum. Add some comment lines to describe what it is that you're doing along the way. Place this ``.py`` file in the ``source/cookbook/`` directory, and then link to it explicitly in one of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, -``cosmological_analysis.rst``, etc.), and add some description of what the script +etc.), and add some description of what the script actually does. We recommend that you use one of the `sample data sets `_ in your recipe. When the full docs are built, each of the cookbook recipes is executed dynamically on diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index 184c2ff9291..fb0224a8607 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -6,6 +6,35 @@ Loading Data This section contains information on how to load data into yt, as well as some important caveats about different data formats. +:: _loading-sample-data: + +Sample Data +----------- + +The `yt` community has provided a large number of sample datasets, which are +accessible from https://yt-project.org/data/ . `yt` also provides a helper +function, `yt.load_sample`, that can load from a set of sample datasets. The +quickstart notebooks in this documentation utilize this. + +The files are, in general, named identically to their listings on the data +catalog page. For instance, you can load `IsolatedGalaxy` by executing: + +.. code-block:: python + + import yt + + ds = yt.load_sample("IsolatedGalaxy") + +To find a list of all available datasets, you can call `load_sample` without any arguments, and it will return a list of the names that can be supplied: + +.. code-block:: python + + import yt + + yt.load_sample() + +This will return a list of possible filenames; more information can be accessed on the data catalog. + .. _loading-amrvac-data: AMRVAC Data @@ -118,8 +147,6 @@ Appropriate errors are thrown for other combinations. .. note Ghost cells exist in .dat files but never read by yt. - - .. _loading-art-data: ART Data @@ -300,11 +327,14 @@ larger than this. Alternative values for the following simulation parameters may be specified using a ``parameters`` dict, accepting the following keys: -* ``Gamma``: ratio of specific heats, Type: Float +* ``gamma``: ratio of specific heats, Type: Float. If not specified, + :math:`\gamma = 5/3` is assumed. * ``geometry``: Geometry type, currently accepts ``"cartesian"`` or - ``"cylindrical"`` + ``"cylindrical"``. Default is ``"cartesian"``. * ``periodicity``: Is the domain periodic? Type: Tuple of boolean values - corresponding to each dimension + corresponding to each dimension. Defaults to ``True`` in all directions. +* ``mu``: mean molecular weight, Type: Float. If not specified, :math:`\mu = 0.6` + (for a fully ionized primordial plasma) is assumed. .. code-block:: python @@ -373,6 +403,18 @@ This means that the yt fields, e.g. ``("gas","density")``, ``("athena_pp","density")``, ``("athena_pp","vel1")``, ``("athena_pp","Bcc1")``, will be in code units. +Alternative values for the following simulation parameters may be specified +using a ``parameters`` dict, accepting the following keys: + +* ``gamma``: ratio of specific heats, Type: Float. If not specified, + :math:`\gamma = 5/3` is assumed. +* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or + ``"cylindrical"``. Default is ``"cartesian"``. +* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values + corresponding to each dimension. Defaults to ``True`` in all directions. +* ``mu``: mean molecular weight, Type: Float. If not specified, :math:`\mu = 0.6` + (for a fully ionized primordial plasma) is assumed. + .. rubric:: Caveats * yt primarily works with primitive variables. If the Athena++ dataset contains @@ -812,46 +854,91 @@ can read FITS image files that have the following (case-insensitive) suffixes: * fts.gz yt can currently read two kinds of FITS files: FITS image files and FITS -binary table files containing positions, times, and energies of X-ray events. +binary table files containing positions, times, and energies of X-ray +events. These are described in more detail below. -Though a FITS image is composed of a single array in the FITS file, -upon being loaded into yt it is automatically decomposed into grids: +Types of FITS Datasets Supported by yt +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. code-block:: python +yt FITS Data Standard +""""""""""""""""""""" - import yt - ds = yt.load("m33_hi.fits") - ds.print_stats() +yt has facilities for creating 2 and 3-dimensional FITS images from derived, +fixed-resolution data products from other datasets. These include images +produced from slices, projections, and 3D covering grids. The resulting +FITS images are fully-describing in that unit, parameter, and coordinate +information is passed from the original dataset. These can be created via the +:class:`~yt.visualization.fits_image.FITSImageData` class and its subclasses. +For information about how to use these special classes, see +:ref:`writing_fits_images`. -.. parsed-literal:: +Once you have produced a FITS file in this fashion, you can load it using +yt and it will be detected as a ``YTFITSDataset`` object, and it can be analyzed +in the same way as any other dataset in yt. - level # grids # cells # cells^3 - ---------------------------------------------- - 0 512 981940800 994 - ---------------------------------------------- - 512 981940800 +Astronomical Image Data +""""""""""""""""""""""" -yt will generate its own domain decomposition, but the number of grids can be -set manually by passing the ``nprocs`` parameter to the ``load`` call: +These files are one of three types: + +* Generic two-dimensional FITS images in sky coordinates +* Three or four-dimensional "spectral cubes" +* *Chandra* event files + +These FITS images typically are in celestial or galactic coordinates, and +for 3D spectral cubes the third axis is typically in velocity, wavelength, +or frequency units. For these datasets, since yt does not yet recognize +non-spatial axes, the coordinates are in units of the image pixels. The +coordinates of these pixels in the WCS coordinate systems will be available +in separate fields. + +Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt +sets the pixel scale as the ``code_length``, certain visualizations (such as +volume renderings) may look extended or distended in ways that are +undesirable. To adjust the width in ``code_length`` of the spectral axis, set +``spectral_factor`` equal to a constant which gives the desired scaling, or set +it to ``"auto"`` to make the width the same as the largest axis in the sky +plane: .. code-block:: python - ds = load("m33_hi.fits", nprocs=1024) + ds = yt.load("m33_hi.fits.gz", spectral_factor=0.1) + +For 4D spectral cubes, the fourth axis is assumed to be composed of different +fields altogether (e.g., Stokes parameters for radio data). + +*Chandra* X-ray event data, which is in tabular form, will be loaded as +particle fields in yt, but a grid will be constructed from the WCS +information in the FITS header. There is a helper function, +``setup_counts_fields``, which may be used to make deposited image fields +from the event data for different energy bands (for an example see +:ref:`xray_fits`). + +Generic FITS Images +""""""""""""""""""" + +If the FITS file contains images but does not have adequate header information +to fall into one of the above categories, yt will still load the data, but +the resulting field and/or coordinate information will necessarily be +incomplete. Field names may not be descriptive, and units may be incorrect. To +get the full use out of yt for FITS files, make sure that the file is sufficiently +self-descripting to fall into one of the above categories. Making the Most of yt for FITS Data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -yt will load data without WCS information and/or some missing header keywords, but the resulting -field information will necessarily be incomplete. For example, field names may not be descriptive, -and units will not be correct. To get the full use out of yt for FITS files, make sure that for -each image the following header keywords have sensible values: +yt will load data without WCS information and/or some missing header keywords, +but the resulting field and/or coordinate information will necessarily be +incomplete. For example, field names may not be descriptive, and units will not +be correct. To get the full use out of yt for FITS files, make sure that for +each image HDU the following standard header keywords have sensible values: * ``CDELTx``: The pixel width in along axis ``x`` * ``CRVALx``: The coordinate value at the reference position along axis ``x`` * ``CRPIXx``: The reference pixel along axis ``x`` * ``CTYPEx``: The projection type of axis ``x`` * ``CUNITx``: The units of the coordinate along axis ``x`` -* ``BTYPE``: The type of the image +* ``BTYPE``: The type of the image, this will be used as the field name * ``BUNIT``: The units of the image FITS header keywords can easily be updated using AstroPy. For example, @@ -859,43 +946,13 @@ to set the ``BTYPE`` and ``BUNIT`` keywords: .. code-block:: python - import astropy.io.fits as pyfits - f = pyfits.open("xray_flux_image.fits", mode="update") + from astropy.io import fits + f = fits.open("xray_flux_image.fits", mode="update") f[0].header["BUNIT"] = "cts/s/pixel" f[0].header["BTYPE"] = "flux" f.flush() f.close() -FITS Coordinates -^^^^^^^^^^^^^^^^ - -For FITS datasets, the unit of ``code_length`` is always the width of one -pixel. yt will attempt to use the WCS information in the FITS header to -construct information about the coordinate system, and provides support for -the following dataset types: - -1. Rectilinear 2D/3D images with length units (e.g., Mpc, AU, - etc.) defined in the ``CUNITx`` keywords -2. 2D images in some celestial coordinate systems (RA/Dec, - galactic latitude/longitude, defined in the ``CTYPEx`` - keywords), and X-ray binary table event files -3. 3D images with celestial coordinates and a third axis for another - quantity, such as velocity, frequency, wavelength, etc. -4. 4D images with the first three axes like Case 3, where the slices - along the 4th axis are interpreted as different fields. - -If your data is of the first case, yt will determine the length units based -on the information in the header. If your data is of the second or third -cases, no length units will be assigned, but the world coordinate information -about the axes will be stored in separate fields. If your data is of the -fourth type, the coordinates of the first three axes will be determined -according to cases 1-3. - -.. note:: - - Linear length-based coordinates (Case 1 above) are only supported if all - dimensions have the same value for ``CUNITx``. WCS coordinates are only - supported for Cases 2-4. FITS Data Decomposition ^^^^^^^^^^^^^^^^^^^^^^^ @@ -926,8 +983,7 @@ set manually by passing the ``nprocs`` parameter to the ``load`` call: .. code-block:: python - ds = load("m33_hi.fits", nprocs=64) - + ds = yt.load("m33_hi.fits", nprocs=64) Fields in FITS Datasets ^^^^^^^^^^^^^^^^^^^^^^^ @@ -947,7 +1003,7 @@ The third way is if auxiliary files are included along with the main file, like .. code-block:: python - ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"]) + ds = yt.load("flux.fits", auxiliary_files=["temp.fits","metal.fits"]) The image blocks in each of these files will be loaded as a separate field, provided they have the same dimensions as the image blocks in the main file. @@ -957,12 +1013,6 @@ based on the corresponding ``CTYPEx`` keywords. When queried, these fields will be generated from the pixel coordinates in the file using the WCS transformations provided by AstroPy. -X-ray event data will be loaded as particle fields in yt, but a grid will be -constructed from the WCS information in the FITS header. There is a helper -function, ``setup_counts_fields``, which may be used to make deposited image -fields from the event data for different energy bands (for an example see -:ref:`xray_fits`). - .. note:: Each FITS image from a single dataset, whether from one file or from one of @@ -988,11 +1038,11 @@ containing different mask values for different fields: .. code-block:: python - # passing a single float - ds = load("m33_hi.fits", nan_mask=0.0) + # passing a single float for all images + ds = yt.load("m33_hi.fits", nan_mask=0.0) # passing a dict - ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0}) + ds = yt.load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0}) ``suppress_astropy_warnings`` """"""""""""""""""""""""""""" @@ -1001,17 +1051,6 @@ Generally, AstroPy may generate a lot of warnings about individual FITS files, many of which you may want to ignore. If you want to see these warnings, set ``suppress_astropy_warnings = False``. -``spectral_factor`` -""""""""""""""""""" - -Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt -sets the pixel scale as the ``code_length``, certain visualizations (such as -volume renderings) may look extended or distended in ways that are -undesirable. To adjust the width in ``code_length`` of the spectral axis, set -``spectral_factor`` equal to a constant which gives the desired scaling, or set -it to ``"auto"`` to make the width the same as the largest axis in the sky -plane. - Miscellaneous Tools for Use with FITS Data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1064,7 +1103,7 @@ version of AstroPy >= 1.3 must be installed. .. code-block:: python wcs_slc = PlotWindowWCS(slc) - wcs_slc.show() # for the IPython notebook + wcs_slc.show() # for Jupyter notebooks wcs_slc.save() ``WCSAxes`` is still in an experimental state, but as its functionality @@ -1092,8 +1131,8 @@ individual lines from an intensity cube: 'CH3NH2': (218.40956, 'GHz')} slab_width = (0.05, "GHz") ds = create_spectral_slabs("intensity_cube.fits", - slab_centers, slab_width, - nan_mask=0.0) + slab_centers, slab_width, + nan_mask=0.0) All keyword arguments to ``create_spectral_slabs`` are passed on to ``load`` when creating the dataset (see :ref:`additional_fits_options` above). In the @@ -1106,11 +1145,12 @@ zero, and the left and right edges of the domain along this axis are Examples of Using FITS Data ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The following IPython notebooks show examples of working with FITS data in yt, +The following Jupyter notebooks show examples of working with FITS data in yt, which we recommend you look at in the following order: * :ref:`radio_cubes` * :ref:`xray_fits` +* :ref:`writing_fits_images` .. _loading-flash-data: @@ -1148,7 +1188,31 @@ grid structure and are at the same simulation time, the particle data may be loa However, if you don't have a corresponding plotfile for a particle file, but would still like to load the particle data, you can still call ``yt.load`` on the file. However, the grid information will not be available, and the particle data will be loaded in a fashion -similar to SPH data. +similar to other particle-based datasets in yt. + +Mean Molecular Weight and Number Density Fields +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The way the mean molecular weight and number density fields are defined depends on +what type of simulation you are running. If you are running a simulation without +species and a :math:`\gamma`-law equation of state, then the mean molecular weight +is defined using the ``eos_singleSpeciesA`` parameter in the FLASH dataset. If you +have multiple species and your dataset contains the FLASH field ``"abar"``, then +this is used as the mean molecular weight. In either case, the number density field +is calculated using this weight. + +If you are running a FLASH simulation where the fields ``"sumy"`` and ``"ye"`` are +present, Then the mean molecular weight is the inverse of ``"sumy"``, and the fields +``"El_number_density"``, ``"ion_number_density"``, and ``"number_density"`` are +defined using the following mathematical definitions: + +* ``"El_number_density"`` :math:`n_e = N_AY_e\rho` +* ``"ion_number_density"`` :math:`n_i = N_A\rho/\bar{A}` +* ``"number_density"`` :math:`n = n_e + n_i` + +where :math:`n_e` and :math:`n_i` are the electron and ion number densities, +:math:`\rho` is the mass density, :math:`Y_e` is the electron number per baryon, +:math:`\bar{A}` is the mean molecular weight, and :math:`N_A` is Avogadro's number. .. rubric:: Caveats @@ -1170,9 +1234,11 @@ from the OWLS project can be found at :ref:`owls-notebook`. .. note:: - If you are loading a multi-file dataset with Gadget, supply the *zeroth* - file to the ``load`` command. For instance, - ``yt.load("snapshot_061.0.hdf5")`` . + If you are loading a multi-file dataset with Gadget, you can either supply the *zeroth* + file to the ``load`` command or the directory containing all of the files. + For instance, to load the *zeroth* file: ``yt.load("snapshot_061.0.hdf5")`` . To + give just the directory, if you have all of your ``snapshot_000.*`` files in a directory + called ``snapshot_000``, do: ``yt.load("/path/to/snapshot_000")``. Gadget data in HDF5 format can be loaded with the ``load`` command: @@ -1255,21 +1321,6 @@ The number of cells in an oct is defined by the expression It's recommended that if you want higher-resolution, try reducing the value of ``n_ref`` to 32 or 16. -Also yt can be set to generate the global mesh index according to a specific -type of particles instead of all the particles through the parameter -``index_ptype``. For example, to build the octree only according to the -``"PartType0"`` particles, you can do: - -.. code-block:: python - - ds = yt.load("snapshot_061.hdf5", index_ptype="PartType0") - -By default, ``index_ptype`` is set to ``"all"``, which means all the particles. -For Gadget binary outputs, ``index_ptype`` should be set using the particle type -names yt uses internally (e.g. ``'Gas'``, ``'Halo'``, ``'Disk'``, etc). For -Gadget HDF5 outputs the particle type names come from the HDF5 output and so -should be referred to using names like ``'PartType0'``. - .. _gadget-field-spec: Field Specifications @@ -1422,20 +1473,70 @@ argument of this form: yt will utilize length, mass and time to set up all other units. +.. _loading-swift-data: + +SWIFT Data +---------- + +yt has support for reading in SWIFT data from the HDF5 file format. It is able +to access all particles and fields which are stored on-disk and it is also able +to generate derived fields, i.e, linear momentum from on-disk fields. + +It is also possible to smooth the data onto a grid or an octree. This +interpolation can be done using an SPH kernel using either the scatter or gather +approach. The SWIFT frontend is supported and cared for by Ashley Kelly. + +SWIFT data in HDF5 format can be loaded with the ``load`` command: + +.. code-block:: python + + import yt + ds = yt.load("EAGLE_6/eagle_0005.hdf5") + +.. _arepo-data: + +Arepo Data +---------- + +Arepo data is currently treated as SPH data. The gas cells have smoothing lengths +assigned using the following prescription for a given gas cell :math:`i`: + +.. math:: + + h_{\rm sml} = \alpha\left(\frac{3}{4\pi}\frac{m_i}{\rho_i}\right)^{1/3} + +where :math:`\alpha` is a constant factor. By default, :math:`\alpha = 2`. In +practice, smoothing lengths are only used for creating slices and projections, +and this value of :math:`\alpha` works well for this purpose. However, this +value can be changed when loading an Arepo dataset by setting the +``smoothing_factor`` parameter: + +.. code-block:: python + + import yt + ds = yt.load("snapshot_100.hdf5", smoothing_factor=1.5) + +Currently, only Arepo HDF5 snapshots are supported. If the "GFM" metal fields are +present in your dataset, they will be loaded in and aliased to the appropriate +species fields in the `"GFM_Metals"` field on-disk. For more information, see +the `Illustris TNG documentation `_. + .. _loading-gamer-data: GAMER Data ---------- -GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the data like this: +GAMER HDF5 data is supported and cared for by Hsi-Yu Schive. You can load the +data like this: .. code-block:: python import yt ds = yt.load("InteractingJets/jet_000002") -For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions for -length, time, and mass to ``load`` using the ``units_override`` functionality: +For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions +for length, time, and mass to ``load`` using the ``units_override`` +functionality: .. code-block:: python @@ -1445,14 +1546,16 @@ length, time, and mass to ``load`` using the ``units_override`` functionality: "mass_unit" :(1.4690033e+36,"g") } ds = yt.load("InteractingJets/jet_000002", units_override=code_units) -This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields, -e.g., ``("gamer","Dens")``, will be in code units. +This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, +but the GAMER fields, e.g., ``("gamer","Dens")``, will be in code units. -Particle data are supported and are always stored in the same file as the grid data. +Particle data are supported and are always stored in the same file as the grid +data. .. rubric:: Caveats -* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported. +* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not +supported. .. _loading-amr-data: @@ -1768,6 +1871,41 @@ The ``load_particles`` function also accepts the following keyword parameters: ``bbox`` The bounding box for the particle positions. +A novel use of the ``load_particles`` function is to facilitate SPH +visualization of non-SPH particles. See the example below: + +.. code-block:: python + + import yt + + # Load dataset and center on the dense region + ds = yt.load('FIRE_M12i_ref11/snapshot_600.hdf5') + _, center = ds.find_max(('PartType0', 'density')) + + # Reload DM particles into a stream dataset + ad = ds.all_data() + pt = 'PartType1' + fields = ['particle_mass'] + [f'particle_position_{ax}' for ax in 'xyz'] + data = {field: ad[pt, field] for field in fields} + ds_dm = yt.load_particles(data, data_source=ad) + + # Generate the missing SPH fields + ds_dm.add_sph_fields() + + # Make the SPH projection plot + p = yt.ProjectionPlot(ds_dm, 'z', ('io', 'density'), + center=center, width=(1, 'Mpc')) + p.set_unit('density', 'Msun/kpc**2') + p.show() + +Here we see two new things. First, ``load_particles`` accepts a ``data_source`` +argument to infer parameters like code units, which could be tedious to provide +otherwise. Second, the returned +:class:`~yt.frontends.stream.data_structures.StreamParticleDataset` has an +:meth:`~yt.frontends.stream.data_structures.StreamParticleDataset.add_sph_fields` +method, to create the ``smoothing_length`` and ``density`` fields required for +SPH visualization to work. + .. _loading-gizmo-data: Gizmo Data diff --git a/doc/source/index.rst b/doc/source/index.rst index 3339c93afca..ece8a940efa 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -103,11 +103,11 @@ Table of Contents

- Topic-Specific Analysis Modules + Domain-Specific Analysis

-

Track halos, make synthetic observations, find clumps, and more

+

Astrophysical analysis, clump finding, cosmology calculations, and more

diff --git a/doc/source/installing.rst b/doc/source/installing.rst index cdd5379c17e..63f535640f0 100644 --- a/doc/source/installing.rst +++ b/doc/source/installing.rst @@ -374,60 +374,6 @@ most up-to-date source code. Alternatively, you can replace ``pip install -e .`` with ``conda develop -b .``. - -Installing Support for the Rockstar Halo Finder -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The easiest way to set rockstar up in a conda-based python environment is to run -the install script with ``INST_ROCKSTAR=1``. - -If you want to do this manually, you will need to follow these -instructions. First, clone Matt Turk's fork of rockstar and compile it: - -.. code-block:: bash - - $ git clone https://github.com/yt-project/rockstar - $ cd rockstar - $ make lib - -Next, copy `librockstar.so` into the `lib` folder of your anaconda installation: - -.. code-block:: bash - - $ cp librockstar.so /path/to/anaconda/lib - -Finally, you will need to recompile yt to enable the rockstar interface. Clone a -copy of the yt git repository (see :ref:`conda-source-build`), or navigate -to a clone that you have already made, and do the following: - -.. code-block:: bash - - $ cd /path/to/yt-git - $ ./clean.sh - $ echo /path/to/rockstar > rockstar.cfg - $ pip install -e . - -Here ``/path/to/yt-git`` is the path to your clone of the yt git repository -and ``/path/to/rockstar`` is the path to your clone of Matt Turk's fork of -rockstar. - -Finally, to actually use rockstar, you will need to ensure the folder containing -`librockstar.so` is in your LD_LIBRARY_PATH: - -.. code-block:: bash - - $ export LD_LIBRARY_PATH=/path/to/anaconda/lib - -You should now be able to enter a python session and import the rockstar -interface: - -.. code-block:: python - - >>> from yt.analysis_modules.halo_finding.rockstar import rockstar_interface - -If this python import fails, then you have not installed rockstar and yt's -rockstar interface correctly. - .. _windows-installation: Installing yt on Windows diff --git a/doc/source/quickstart/1)_Introduction.ipynb b/doc/source/quickstart/1)_Introduction.ipynb index 7fe228c9f2f..cf9615fa362 100644 --- a/doc/source/quickstart/1)_Introduction.ipynb +++ b/doc/source/quickstart/1)_Introduction.ipynb @@ -8,55 +8,53 @@ "\n", "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n", "\n", - "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at https://yt-project.org/doc/. If you encounter problems, look for help here: https://yt-project.org/doc/help/.\n", - "\n", + "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook. The documentation exists at https://yt-project.org/doc/. If you encounter problems, look for help here: https://yt-project.org/doc/help/index.html." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## Acquiring the datasets for this tutorial\n", "\n", - "If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at https://yt-project.org/data/. The datasets necessary for each lesson are noted next to the corresponding tutorial.\n", + "If you are executing these tutorials interactively, you need some sample datasets on which to run the code. You can download these datasets at https://yt-project.org/data/, or you can use the built-in yt sample data loader (using [pooch](https://www.fatiando.org/pooch/latest/api/index.html) under the hood) to automatically download the data for you.\n", + "\n", + "The datasets necessary for each lesson are noted next to the corresponding tutorial, and by default it will use the pooch-based dataset downloader. If you would like to supply your own paths, you can choose to do so.\n", "\n", + "## Using the Automatic Downloader\n", + "\n", + "For the purposes of this tutorial, or whenever you want to use sample data, you can use the `load_sample` command to utilize the pooch auto-downloader. For instance:\n", + "\n", + "```python\n", + "ds = yt.load_sample(\"IsolatedGalaxy\")\n", + "```\n", + "\n", + "## Using manual loading\n", + "\n", + "The way you will *most frequently* interact with `yt` is using the standard `load` command. This accepts a path and optional arguments. For instance:\n", + "\n", + "```python\n", + "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n", + "```\n", + "\n", + "would load the `IsolatedGalaxy` dataset by supplying the full path to the parameter file." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## What's Next?\n", "\n", "The Notebooks are meant to be explored in this order:\n", "\n", - "1. Introduction\n", + "1. Introduction (this file!)\n", "2. Data Inspection (IsolatedGalaxy dataset)\n", "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n", "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n", "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n", "6. Volume Rendering (IsolatedGalaxy dataset)" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "download_datasets = False\n", - "if download_datasets:\n", - " !curl -sSO https://yt-project.org/data/enzo_tiny_cosmology.tar.gz\n", - " print (\"Got enzo_tiny_cosmology\")\n", - " !tar xzf enzo_tiny_cosmology.tar.gz\n", - " \n", - " !curl -sSO https://yt-project.org/data/Enzo_64.tar.gz\n", - " print (\"Got Enzo_64\")\n", - " !tar xzf Enzo_64.tar.gz\n", - " \n", - " !curl -sSO https://yt-project.org/data/IsolatedGalaxy.tar.gz\n", - " print (\"Got IsolatedGalaxy\")\n", - " !tar xzf IsolatedGalaxy.tar.gz\n", - " \n", - " print (\"All done!\")" - ] } ], "metadata": { @@ -75,7 +73,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.4.3" + "version": "3.7.3" } }, "nbformat": 4, diff --git a/doc/source/quickstart/2)_Data_Inspection.ipynb b/doc/source/quickstart/2)_Data_Inspection.ipynb index 1118b256040..baf00f0c513 100644 --- a/doc/source/quickstart/2)_Data_Inspection.ipynb +++ b/doc/source/quickstart/2)_Data_Inspection.ipynb @@ -13,7 +13,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -31,11 +34,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")" + "ds = yt.load_sample(\"IsolatedGalaxy\")" ] }, { @@ -51,7 +57,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -69,7 +78,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -87,7 +99,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -105,7 +120,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -123,7 +141,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -141,7 +162,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -161,7 +185,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -192,7 +219,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -210,7 +240,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -221,7 +254,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -240,7 +276,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -251,7 +290,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -262,7 +304,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -273,7 +318,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -295,7 +343,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -306,7 +357,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -320,7 +374,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -331,7 +388,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -343,7 +403,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -368,7 +431,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -379,7 +445,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -397,7 +466,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -415,7 +487,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -439,9 +514,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.4.3" + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/quickstart/3)_Simple_Visualization.ipynb b/doc/source/quickstart/3)_Simple_Visualization.ipynb index 945047a1280..0a08ce3b800 100644 --- a/doc/source/quickstart/3)_Simple_Visualization.ipynb +++ b/doc/source/quickstart/3)_Simple_Visualization.ipynb @@ -13,7 +13,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -31,11 +34,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n", + "ds = yt.load_sample(\"enzo_tiny_cosmology\", \"DD0046/DD0046\")\n", "print (\"Redshift =\", ds.current_redshift)" ] }, @@ -52,7 +58,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -73,7 +82,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -84,7 +96,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -95,7 +110,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -106,7 +124,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -117,7 +138,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -135,7 +159,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -154,7 +181,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -172,7 +202,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -192,11 +225,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n", + "ds = yt.load_sample(\"Enzo_64\", \"DD0043/data0043\")\n", "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n", "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n", "s.zoom(10.0)" @@ -213,7 +249,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -231,7 +270,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -249,7 +291,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -269,7 +314,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -293,9 +341,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.4.3" + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb index eaac3c15bca..86ccb0fd6e4 100644 --- a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb +++ b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb @@ -13,7 +13,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -21,7 +24,7 @@ "import yt\n", "import numpy as np\n", "from matplotlib import pylab\n", - "from yt.analysis_modules.halo_finding.api import HaloFinder" + "from yt.extensions.astro_analysis.halo_finding.api import HaloFinder" ] }, { @@ -39,11 +42,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ts = yt.load(\"enzo_tiny_cosmology/DD????/DD????\")" + "ts = yt.load_sample(\"enzo_tiny_cosmology\", \"DD????/DD????\")" ] }, { @@ -59,7 +65,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -84,7 +93,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -105,14 +117,19 @@ "\n", "Let's do something a bit different. Let's calculate the total mass inside halos and outside halos.\n", "\n", - "This actually touches a lot of different pieces of machinery in yt. For every dataset, we will run the halo finder HOP. Then, we calculate the total mass in the domain. Then, for each halo, we calculate the sum of the baryon mass in that halo. We'll keep running tallies of these two things." + "This actually touches a lot of different pieces of machinery in yt. For every dataset, we will run the halo finder HOP. Then, we calculate the total mass in the domain. Then, for each halo, we calculate the sum of the baryon mass in that halo. We'll keep running tallies of these two things.\n", + "\n", + "Note, that the halo finding machinery requires the additional [yt_astro_analysis](https://github.com/yt-project/yt_astro_analysis) package. Installation instructions can be found at https://yt-astro-analysis.readthedocs.io/." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -143,7 +160,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -175,7 +195,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -187,7 +210,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -198,7 +224,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -209,7 +238,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -229,11 +261,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n", + "ds = yt.load_sample(\"IsolatedGalaxy\")\n", "v, c = ds.find_max(\"density\")\n", "sl = ds.slice(2, c[0])\n", "print (sl[\"index\", \"x\"])\n", @@ -253,7 +288,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -272,7 +310,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -296,7 +337,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -315,7 +359,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -326,7 +373,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -344,7 +394,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -370,7 +423,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -389,7 +445,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -414,9 +473,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.4.3" + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb index 363fcd0b3b8..51021789c2e 100644 --- a/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb +++ b/doc/source/quickstart/5)_Derived_Fields_and_Profiles.ipynb @@ -13,7 +13,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -37,7 +40,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -57,11 +63,14 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ - "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n", + "ds = yt.load_sample(\"IsolatedGalaxy\")\n", "dd = ds.all_data()\n", "print (list(dd.quantities.keys()))" ] @@ -77,7 +86,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -95,7 +107,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -115,7 +130,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -145,7 +163,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -167,7 +188,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -187,7 +211,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -209,7 +236,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -231,7 +261,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -267,9 +300,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.4.3" + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/quickstart/6)_Volume_Rendering.ipynb b/doc/source/quickstart/6)_Volume_Rendering.ipynb index 069b431123c..581b8358a13 100644 --- a/doc/source/quickstart/6)_Volume_Rendering.ipynb +++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb @@ -13,12 +13,15 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ "import yt\n", - "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")" + "ds = yt.load_sample(\"IsolatedGalaxy\")" ] }, { @@ -36,7 +39,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -65,7 +71,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -83,7 +92,10 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } }, "outputs": [], "source": [ @@ -120,9 +132,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.1" + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 4 } diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index c4537d1b11c..2c1aca5a65a 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -494,62 +494,16 @@ of topologically disconnected structures, i.e., clump finding. ~yt.data_objects.level_sets.clump_info_items.add_clump_info ~yt.data_objects.level_sets.clump_validators.add_validator -.. _halo_analysis_ref: - -Halo Analysis -^^^^^^^^^^^^^ - -The ``HaloCatalog`` object is the primary means for performing custom analysis -on cosmological halos. It is also the primary interface for halo finding. - -.. autosummary:: - - ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog - ~yt.analysis_modules.halo_analysis.halo_finding_methods.HaloFindingMethod - ~yt.analysis_modules.halo_analysis.halo_callbacks.HaloCallback - ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute - ~yt.analysis_modules.halo_analysis.halo_callbacks.halo_sphere - ~yt.analysis_modules.halo_analysis.halo_callbacks.iterative_center_of_mass - ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles - ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot - ~yt.analysis_modules.halo_analysis.halo_callbacks.profile - ~yt.analysis_modules.halo_analysis.halo_callbacks.save_profiles - ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity - ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter - ~yt.analysis_modules.halo_analysis.halo_callbacks.virial_quantities - ~yt.analysis_modules.halo_analysis.halo_filters.HaloFilter - ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo - ~yt.analysis_modules.halo_analysis.halo_filters.quantity_value - ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity - ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity - ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass - ~yt.analysis_modules.halo_analysis.halo_recipes.HaloRecipe - ~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities - -Halo Finding -^^^^^^^^^^^^ - -These provide direct access to the halo finders. However, it is strongly recommended -to use the ``HaloCatalog``. - -.. autosummary:: - - ~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder - ~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder - ~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder - -Two Point Functions -^^^^^^^^^^^^^^^^^^^ - -These functions are designed to create correlations or other results of -operations acting on two spatially-distinct points in a data source. See also -:ref:`two_point_functions`. +X-ray Emission Fields +^^^^^^^^^^^^^^^^^^^^^ +This can be used to create derived fields of X-ray emission in +different energy bands. .. autosummary:: - ~yt.analysis_modules.two_point_functions.two_point_functions.TwoPointFunctions - ~yt.analysis_modules.two_point_functions.two_point_functions.FcnSet + ~yt.fields.xray_emission_fields.XrayEmissivityIntegrator + ~yt.fields.xray_emission_fields.add_xray_emissivity_field Field Types ----------- @@ -592,57 +546,6 @@ writing to bitmaps. ~yt.data_objects.image_array.ImageArray -Extension Types ---------------- - -Cosmology, Star Particle Analysis, and Simulated Observations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For the generation of stellar SEDs. (See also :ref:`star_analysis`.) - - -.. autosummary:: - - ~yt.analysis_modules.star_analysis.sfr_spectrum.StarFormationRate - ~yt.analysis_modules.star_analysis.sfr_spectrum.SpectrumBuilder - -Light cone generation and simulation analysis. (See also -:ref:`light-cone-generator`.) - - -.. autosummary:: - - ~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone - ~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay - -Absorption and X-ray spectra and spectral lines: - -.. autosummary:: - - ~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum - ~yt.fields.xray_emission_fields.XrayEmissivityIntegrator - ~yt.fields.xray_emission_fields.add_xray_emissivity_field - -Absorption spectra fitting: - -.. autosummary:: - - ~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit - -Sunrise exporting: - -.. autosummary:: - - ~yt.analysis_modules.sunrise_export.sunrise_exporter.export_to_sunrise - ~yt.analysis_modules.sunrise_export.sunrise_exporter.export_to_sunrise_from_halolist - -RADMC-3D exporting: - -.. autosummary:: - - ~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DLayer - ~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter - Volume Rendering ^^^^^^^^^^^^^^^^ diff --git a/doc/source/visualizing/FITSImageData.ipynb b/doc/source/visualizing/FITSImageData.ipynb index 2d676a74275..b0a2912397d 100644 --- a/doc/source/visualizing/FITSImageData.ipynb +++ b/doc/source/visualizing/FITSImageData.ipynb @@ -10,9 +10,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "import yt" @@ -21,14 +19,13 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override={\"length_unit\":(1.0,\"Mpc\"),\n", - " \"mass_unit\":(1.0e14,\"Msun\"),\n", - " \"time_unit\":(1.0,\"Myr\")})" + "units_override = {\"length_unit\": (1.0, \"Mpc\"),\n", + " \"mass_unit\": (1.0e14, \"Msun\"),\n", + " \"time_unit\": (1.0, \"Myr\")}\n", + "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override=units_override)" ] }, { @@ -48,12 +45,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500., \"kpc\"))\n", + "prj = yt.ProjectionPlot(ds, \"z\", (\"gas\", \"temperature\"), \n", + " weight_field=(\"gas\", \"density\"), width=(500., \"kpc\"))\n", "prj.show()" ] }, @@ -67,12 +63,10 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")" + "prj_fits = yt.FITSProjection(ds, \"z\", (\"gas\", \"temperature\"), weight_field=(\"gas\", \"density\"))" ] }, { @@ -92,12 +86,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500., \"kpc\"))" + "prj_fits = yt.FITSProjection(ds, \"z\", (\"gas\", \"temperature\"), \n", + " weight_field=(\"gas\", \"density\"), width=(500., \"kpc\"))" ] }, { @@ -110,9 +103,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits.info()" @@ -128,9 +119,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits[\"temperature\"].header" @@ -140,20 +129,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. If we want the raw image data with units, we can use the `data` attribute of this field:" + "where we can see that the units of the temperature field are Kelvin and the cell widths are in kiloparsecs. Note that the length, time, mass, velocity, and magnetic field units of the dataset have been copied into the header " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " If we want the raw image data with units, we can use the `data` attribute of this field:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits[\"temperature\"].data" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Changing Aspects of the Images" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -164,15 +165,54 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj_fits.set_unit(\"temperature\",\"R\")\n", + "prj_fits.set_unit(\"temperature\", \"R\")\n", "prj_fits[\"temperature\"].data" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The length units of the image (and its coordinate system), as well as the resolution of the image, can be adjusted when creating it using the `length_unit` and `image_res` keyword arguments, respectively:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# length_unit defaults to that from the dataset\n", + "# image_res defaults to 512\n", + "slc_fits = yt.FITSSlice(ds, \"z\", (\"gas\", \"density\"), width=(500,\"kpc\"), length_unit=\"ly\", image_res=256)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now check that this worked by looking at the header, notice in particular the `NAXIS[12]` and `CUNIT[12]` keywords (the `CDELT[12]` and `CRPIX[12]` values also change):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "slc_fits[\"density\"].header" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Saving and Loading Images" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -183,27 +223,23 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj_fits.writeto(\"sloshing.fits\", clobber=True)" + "prj_fits.writeto(\"sloshing.fits\", overwrite=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Since yt can read FITS image files, it can be loaded up just like any other dataset:" + "Since yt can read FITS image files, it can be loaded up just like any other dataset. Since we created this FITS file with `FITSImageData`, the image will contain information about the units and the current time of the dataset:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "ds2 = yt.load(\"sloshing.fits\")" @@ -219,12 +255,10 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n", + "slc2 = yt.SlicePlot(ds2, \"z\", (\"gas\", \"temperature\"), width=(500.,\"kpc\"))\n", "slc2.set_log(\"temperature\", True)\n", "slc2.show()" ] @@ -233,7 +267,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `FITSImageData` directly" + "## Creating `FITSImageData` Instances Directly from FRBs, PlotWindow instances, and 3D Grids" ] }, { @@ -246,33 +280,48 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "slc3 = ds.slice(0, 0.0)\n", "frb = slc3.to_frb((500.,\"kpc\"), 800)\n", - "fid_frb = yt.FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")" + "fid_frb = frb.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], length_unit=\"pc\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "A 3D FITS cube can also be created from a covering grid:" + "If one creates a `PlotWindow` instance, e.g. `SlicePlot`, `ProjectionPlot`, etc., you can also call this same method there:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, + "outputs": [], + "source": [ + "fid_pw = prj.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], \n", + " length_unit=\"pc\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A 3D FITS cube can also be created from regularly gridded 3D data. In yt, there are covering grids and \"arbitrary grids\". The easiest way to make an arbitrary grid object is using `ds.r`, where we can index the dataset like a NumPy array, creating a grid of 1.0 Mpc on a side, centered on the origin, with 64 cells on a side:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], "source": [ - "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n", - "fid_cvg = yt.FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")" + "grid = ds.r[(-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j,\n", + " (-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j,\n", + " (-0.5, \"Mpc\"):(0.5, \"Mpc\"):64j]\n", + "fid_grid = grid.to_fits_data(fields=[(\"gas\", \"density\"), (\"gas\", \"temperature\")], length_unit=\"Mpc\")" ] }, { @@ -282,6 +331,13 @@ "## Other `FITSImageData` Methods" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Creating Images from Others" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -292,9 +348,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "fid = yt.FITSImageData.from_file(\"sloshing.fits\")\n", @@ -311,12 +365,10 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "prj_fits2 = yt.FITSProjection(ds, \"z\", [\"density\"])\n", + "prj_fits2 = yt.FITSProjection(ds, \"z\", (\"gas\", \"density\"), width=(500.0, \"kpc\"))\n", "prj_fits3 = yt.FITSImageData.from_images([prj_fits, prj_fits2])\n", "prj_fits3.info()" ] @@ -331,9 +383,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "dens_fits = prj_fits3.pop(\"density\")" @@ -349,9 +399,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "dens_fits.info()" @@ -367,14 +415,19 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits3.info()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Adding Sky Coordinates to Images" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -385,9 +438,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits[\"temperature\"].header" @@ -405,9 +456,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sky_center = [30.,45.] # in degrees\n", @@ -425,9 +474,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits[\"temperature\"].header" @@ -461,9 +508,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "prj_fits3[\"temperature\"].header" @@ -480,15 +525,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:" + "### Updating Header Parameters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also add header keywords to a single field or for all fields in the FITS image using `update_header`:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "fid_frb.update_header(\"all\", \"time\", 0.1) # Update all the fields\n", @@ -498,24 +548,123 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (fid_frb[\"density\"].header[\"time\"])\n", "print (fid_frb[\"temperature\"].header[\"scale\"])" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Changing Image Names" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can use the `change_image_name` method to change the name of an image in a `FITSImageData` instance:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fid_frb.change_image_name(\"density\", \"mass_per_volume\")\n", + "fid_frb.info() # now \"density\" should be gone and \"mass_per_volume\" should be in its place" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Convolving FITS Images" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, you can convolve an image inside a `FITSImageData` instance with a kernel, either a Gaussian with a specific standard deviation, or any kernel provided by AstroPy. See AstroPy's [Convolution and filtering](http://docs.astropy.org/en/stable/convolution/index.html) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "dens_fits.writeto(\"not_convolved.fits\", overwrite=True)\n", + "# Gaussian kernel with standard deviation of 3.0 kpc\n", + "dens_fits.convolve(\"density\", 3.0)\n", + "dens_fits.writeto(\"convolved.fits\", overwrite=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's load these up as datasets and see the difference:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds0 = yt.load(\"not_convolved.fits\")\n", + "dsc = yt.load(\"convolved.fits\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "slc3 = yt.SlicePlot(ds0, \"z\", (\"gas\", \"density\"), width=(500.,\"kpc\"))\n", + "slc3.set_log(\"density\", True)\n", + "slc3.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "slc4 = yt.SlicePlot(dsc, \"z\", (\"gas\", \"density\"), width=(500.,\"kpc\"))\n", + "slc4.set_log(\"density\", True)\n", + "slc4.show()" + ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python [default]", + "display_name": "Python 3", "language": "python", "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 2 } diff --git a/doc/source/visualizing/callbacks.rst b/doc/source/visualizing/callbacks.rst index 8011e33bf29..c49a567a7b4 100644 --- a/doc/source/visualizing/callbacks.rst +++ b/doc/source/visualizing/callbacks.rst @@ -123,7 +123,7 @@ The underlying functions are more thoroughly documented in :ref:`callback-api`. .. _annotate-clear: Clear Callbacks (Some or All) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. function:: annotate_clear(index=None) @@ -145,6 +145,27 @@ Clear Callbacks (Some or All) p.annotate_clear() p.save() +.. _annotate-list: + +List Currently Applied Callbacks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. function:: list_annotations() + + This function will print a list of each of the currently applied + callbacks together with their index. The index can be used with + :ref:`annotate_clear() function ` to remove a + specific callback. + +.. python-script:: + + import yt + ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + p = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc')) + p.annotate_scale() + p.annotate_timestamp() + p.list_annotations() + .. _annotate-arrow: Overplot Arrow @@ -357,7 +378,7 @@ Overplot Halo Annotations (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.) - Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` + Accepts a :class:`~yt_astro_analysis.halo_analysis.halo_catalog.HaloCatalog` and plots a circle at the location of each halo with the radius of the circle corresponding to the virial radius of the halo. Also accepts a :ref:`loaded halo catalog dataset ` or a data @@ -805,7 +826,7 @@ Overplot the Path of a Ray ray can be either a :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`, :class:`~yt.data_objects.selection_data_containers.YTRay`, or a - :class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay` + :class:`~trident.light_ray.LightRay` object. annotate_ray() will properly account for periodic rays across the volume. diff --git a/doc/source/visualizing/plots.rst b/doc/source/visualizing/plots.rst index 6a4b39256db..abe6440c74b 100644 --- a/doc/source/visualizing/plots.rst +++ b/doc/source/visualizing/plots.rst @@ -159,6 +159,14 @@ where for the last two objects any spatial field, such as ``"density"``, ``"velocity_z"``, etc., may be used, e.g. ``center=("min","temperature")``. +The effective resolution of the plot (i.e. the number of resolution elements +in the image itself) can be controlled with the ``buff_size`` argument: + +.. code-block:: python + + yt.SlicePlot(ds, 'z', 'density', buff_size=(1000, 1000)) + + Here is an example that combines all of the options we just discussed. .. python-script:: @@ -167,7 +175,7 @@ Here is an example that combines all of the options we just discussed. from yt.units import kpc ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") slc = yt.SlicePlot(ds, 'z', 'density', center=[0.5, 0.5, 0.5], - width=(20,'kpc')) + width=(20,'kpc'), buff_size=(1000, 1000)) slc.save() The above example will display an annotated plot of a slice of the @@ -275,11 +283,12 @@ example: from yt.units import kpc ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") prj = yt.ProjectionPlot(ds, 2, 'temperature', width=25*kpc, - weight_field='density') + weight_field='density', buff_size=(1000, 1000)) prj.save() -will create a density-weighted projection of the temperature field along the x -axis, plot it, and then save the plot to a png image file. +will create a density-weighted projection of the temperature field along +the x axis with 1000 resolution elements per side, plot it, and then save +the plot to a png image file. Like :ref:`slice-plots`, annotations and modifications can be applied after creating the ``ProjectionPlot`` object. Annotations are @@ -770,8 +779,8 @@ from black to white depending on the AMR level of the grid. Annotations are described in :ref:`callbacks`. -Set the size of the plot -~~~~~~~~~~~~~~~~~~~~~~~~ +Set the size and resolution of the plot +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To set the size of the plot, use the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_figure_size` function. The argument @@ -797,6 +806,9 @@ To change the resolution of the image, call the slc.set_buff_size(1600) slc.save() +Also see cookbook recipe :ref:`image-resolution-primer` for more information +about the parameters that determine the resolution of your images. + Turning off minorticks ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/yt4differences.rst b/doc/source/yt4differences.rst new file mode 100644 index 00000000000..34d9fc10a74 --- /dev/null +++ b/doc/source/yt4differences.rst @@ -0,0 +1,277 @@ +.. _yt4differences: + +What's New and Different in yt 4.0? +=================================== + +If you are new to yt, welcome! If you're coming to yt 4.0 from an older +version, however, there may be a few things in this version that are different +than what you are used to. We have tried to build compatibility layers to +minimize disruption to existing scripts, but necessarily things will be +different in some ways. + +.. contents:: + :depth: 2 + :local: + :backlinks: none + +Updating to yt 4.0 from Old Versions (and going back) +----------------------------------------------------- + + +.. _transitioning-to-4.0: + +Converting Old Scripts to Work with yt 4.0 +------------------------------------------ + + +Cool New Things +--------------- + +Changes for working with SPH Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In yt-3.0 most user-facing operations on SPH data are produced by interpolating +SPH data onto a volume-filling octree mesh. Historically this was easier to +implement When support for SPH data was added to yt as it allowed re-using a lot +of the existing infrastructure. This had some downsides because the octree was a +single, global object, the memory and CPU overhead of smoothing SPH data onto +the octree can be prohibitive on particle datasets produced by large +simulations. Constructing the octree during the initial indexing phase also +required each particle (albeit, in a 64-bit integer) to be present in memory +simultaneously for a sorting operation, which was memory prohibitive. +Visualizations of slices and projections produced by yt using the default +settings are somewhat blocky since by default we use a relatively coarse octree +to preserve memory. + +In yt-4.0 this has all changed! Over the past two years, Nathan Goldbaum, Meagan +Lang and Matt Turk implemented a new approach for handling I/O of particle data, +based on storing compressed bitmaps containing Morton indices instead of an +in-memory octree. This new capability means that the global octree index is now +no longer necessary to enable I/O chunking and spatial indexing of particle data +in yt. + +The new I/O method has opened up a new way of dealing with the particle data and +in particular, SPH data. + +Scatter and gather approach for SPH data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned, previously operations such as slice, projection and arbitrary +grids would smooth the particle data onto the global octree. As this is no +longer used, a different approach was required to visualize the SPH data. Using +SPLASH as inspiration, SPH smoothing pixelization operations were created using +smooting operations via "scatter" and "gather" approaches. We estimate the +contributions of a particle to a single pixel by considering the point at the +centre of the pixel and using the standard SPH smoothing formula. The heavy +lifting in these functions is undertaken by cython functions. + +It is now possible to generate slice plots, projection plots, covering grids and +arbitrary grids of smoothed quanitities using these operations. The following +code demonstrates how this could be achieved. The following would use the scatter +method: + +.. code-block:: python + + import yt + + ds = yt.load('snapshot_033/snap_033.0.hdf5') + + plot = yt.SlicePlot(ds, 2, ('gas', 'density')) + plot.save() + + plot = yt.ProjectionPlot(ds, 2, ('gas', 'density')) + plot.save() + + arbitrary_grid = ds.arbitrary_grid([0.0, 0.0, 0.0], [25, 25, 25], + dims=[16, 16, 16]) + ag_density = arbitrary_grid[('gas', 'density')] + + covering_grid = ds.covering_grid(4, 0, 16) + cg_density = covering_grid[('gas', 'density')] + +In the above example the ``covering_grid`` and the ``arbitrary_grid`` will return +the same data. In fact, these containers are very similar but provide a +slighlty different API. + +The above code can be modified to use the gather approach by changing a global +setting for the dataset. This can be achieved with +``ds.sph_smoothing_style = "gather"``, so far, the gather approach is not +supported for projections. + +The default behaviour for SPH interpolation is that the values are normalized +inline with Eq. 9 in `SPLASH, Price (2009) `_. +This can be disabled with ``ds.use_sph_normalization = False``. This will +disable the normalization for all future interpolations. + +The gather approach requires finding nearest neighbors using the KDTree. The +first call will generate a KDTree for the entire dataset which will be stored in +a sidecar file. This will be loaded whenever neccesary. + +Off-Axis Projection for SPH Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The current ``OffAxisProjectionPlot`` class will now support SPH projection plots. + +The following is a code example: + +.. code-block:: python + + import yt + + ds = yt.load('Data/GadgetDiskGalaxy/snapshot_200.hdf5') + + smoothing_field = ('gas', 'density') + + _, center = ds.find_max(smoothing_field) + + sp = ds.sphere(center, (10, 'kpc')) + + normal_vector = sp.quantities.angular_momentum_vector() + + prj = yt.OffAxisProjectionPlot(ds, normal_vector, smoothing_field, center, (20, 'kpc')) + + prj.save() + +Smoothing data onto an Octree +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Whilst the move away from the global octree is a promising one in terms of +perfomance and dealing with SPH data in a more intuitive manner, it does remove +a useful feature. We are aware that many uses will have older scripts which take +advantage of the global octree. + +As such, we have added support to smooth SPH data onto an octree when desired by +the users. The new octree is designed to give results consistent with those of +the previous octree, but the new octree takes advantage of the scatter and +gather machinery also added. + +It should be noted that the + +.. code-block:: python + + import yt + import numpy as np + + ds = yt.load('GadgetDiskGalaxy/snapshot_200.hdf5') + left = np.array([0, 0, 0], dtype='float64') + right = np.array([64000, 64000, 64000], dtype='float64') + + # generate an octree + octree = ds.octree(left, right, n_ref=64) + + # the density will be calculated using SPH scatter + density = octree[('PartType0', 'density')] + + # this will return the x positions of the octs + x = octree[('index', 'x')] + +The above code can be modified to use the scatter approach by using +``ds.sph_smoothing_style = 'gather'`` before any field access. The octree also +accepts ``over_refine_factor`` which works just like the ``over_refine_factor`` +parameter in yt-3.0 that could be passed to ``yt.load``, and determines how many +particles are in each leaf. + +The ``density_factor`` keyword allows the construction of dense octrees +trees. In a traditional octree, if a leaf has more particles that a critical +value `n_ref`, then it divides into 8 new children (hence the name oct). The +value of `density_factor` allows the node to divide into 2^(3*density_factor) +zones instead. This creates an octree structure similar that used by AMR codes +like FLASH that make use of an octree of grid patches. + +``yt.units`` is now a wrapper for ``unyt`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We have extracted ``yt.units`` into ``unyt``, its own library that you can +install separately from yt from ``pypi`` and ``conda-forge``. You can find out +more about using ``unyt`` in `its documentation +`_ and in `a paper in the Journal of +Open Source Software `_. + +From the perspective of a user of yt, very little should change. While things in +``unyt`` have different names -- for example ``YTArray`` is now called +``unyt_array`` -- we have provided wrappers in ``yt.units`` so imports in your +old scripts should continue to work without issue. If you have any old scripts +that don't work due to issues with how yt is using ``unyt`` or units issues in +general please let us know by `filing an issue on GitHub +`_. + +Moving ``unyt`` into its own library has made it much easier to add some cool +new features, which we detail below. + +``ds.units`` +~~~~~~~~~~~~ + +Each dataset now has a set of unit symbols and physical constants associated +with it, allowing easier customization and smoother interaction, especially in +workflows that need to use code units or cosmological units. The ``ds.units`` +object has a large number of attributes corresponding to the names of units and +physical constants. All units known to the dataset will be available, including +custom units. In situations where you might have used ``ds.arr`` or ``ds.quan`` +before, you can now safely use ``ds.units``: + + >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') + >>> u = ds.units + >>> ad = ds.all_data() + >>> data = ad['Enzo', 'Density'] + >>> data + 12*u.code_mass/u.code_length**3 + unyt_array([1.21784693e+01, 1.21789148e+01, 1.21788494e+01, ..., + 4.08936836e+04, 5.78006836e+04, 3.97766906e+05], 'code_mass/code_length**3') + >>> data + .0001*u.mh/u.cm**3 + unyt_array([6.07964513e+01, 6.07968968e+01, 6.07968314e+01, ..., + 4.09423016e+04, 5.78493016e+04, 3.97815524e+05], 'code_mass/code_length**3') + + +Automatic Unit Simplification +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Often the results of an operation will result in a unit expression that can be +simplified by cancelling pairs of factors. Before yt 4.0, these pairs of factors +were only cancelled if the same unit appeared in both the numerator and +denominator of an expression. Now, all pairs of factors have have inverse +dimensions are cancelled, and the appropriate scaling factor is incorporated +into the result. For example, ``Hz`` and ``s`` will now appropriately be recognized +as inverses: + + >>> from yt.units import Hz, s + >>> frequency = 60*Hz + >>> time = 60*s + >>> frequency*time + unyt_quantity(3600, '(dimensionless)') + +Similar simplifications will happen even if units aren't reciprocals of each +other, for example here ``hour`` and ``minute`` automatically cancel each other: + + >>> from yt.units import erg, minute, hour + >>> power = [20, 40, 80] * erg / minute + >>> elapsed_time = 3*hour + >>> print(power*elapsed_time) + [ 3600. 7200. 14400.] erg + +Alternate Unit Name Resolution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's now possible to use a number of common alternate spellings for unit names +and if ``unyt`` knows about the alternate spelling it will automatically resolve +alternate spellings to a canonical name. For example, it's now possible to do +things like this: + + >>> import yt.units as u + >>> d = 20*u.mile + >>> d.to('km') + unyt_quantity(32.18688, 'km') + >>> d.to('kilometer') + unyt_quantity(32.18688, 'km') + >>> d.to('kilometre') + unyt_quantity(32.18688, 'km') + +You can also use alternate unit names in more complex algebraic unit expressions: + + >>> v = d / (20*u.minute) + >>> v.to('kilometre/hour') + unyt_quantity(96.56064, 'km/hr') + +In this example the common british spelling ``"kilometre"`` is resolved to +``"km"`` and ``"hour"`` is resolved to ``"hr"``. + +API Changes +----------- diff --git a/scripts/pr_backport.py b/scripts/pr_backport.py index 22b6c80c27a..b3f521f5b11 100644 --- a/scripts/pr_backport.py +++ b/scripts/pr_backport.py @@ -4,8 +4,6 @@ import shutil import tempfile -from yt.extern.six.moves import input - API_URL = 'https://api.github.com/graphql' YT_REPO = "https://github.com/yt-project/yt" diff --git a/setup.cfg b/setup.cfg index f4cdfa99bdc..706af9c14df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,7 +8,7 @@ # unused import errors # autogenerated __config__.py files # vendored libraries -exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py +exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py,yt/units/* max-line-length=999 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E722,E741,E731,W291,W292,W293,W391,W503,W504,W605 jobs=8 diff --git a/setup.py b/setup.py index b18c07ee9a9..0c3f8fea9d0 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,6 @@ import os +import platform +from concurrent.futures import ThreadPoolExecutor as Pool import glob import sys from sys import platform as _platform @@ -12,11 +14,40 @@ read_embree_location, \ in_conda_env from distutils.version import LooseVersion +from distutils.ccompiler import CCompiler import pkg_resources -if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 5): - print("yt currently supports Python 2.7 or versions newer than Python 3.5") +def _get_cpu_count(): + if platform.system() != "Windows": + return os.cpu_count() + return 0 + + +def _compile( + self, sources, output_dir=None, macros=None, include_dirs=None, + debug=0, extra_preargs=None, extra_postargs=None, depends=None, +): + """Function to monkey-patch distutils.ccompiler.CCompiler""" + macros, objects, extra_postargs, pp_opts, build = self._setup_compile( + output_dir, macros, include_dirs, sources, depends, extra_postargs + ) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + + for obj in objects: + try: + src, ext = build[obj] + except KeyError: + continue + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + + # Return *all* object filenames, not just the ones we just built. + return objects + +CCompiler.compile = _compile + +if sys.version_info < (3, 5): + print("yt currently supports versions newer than Python 3.5") print("certain features may fail unexpectedly and silently with older " "versions.") sys.exit(1) @@ -34,7 +65,7 @@ except pkg_resources.DistributionNotFound: pass # yay! -VERSION = "3.7.dev0" +VERSION = "4.0.dev0" if os.path.exists('MANIFEST'): os.remove('MANIFEST') @@ -53,12 +84,6 @@ std_libs = ["m"] cython_extensions = [ - Extension("yt.analysis_modules.photon_simulator.utils", - ["yt/analysis_modules/photon_simulator/utils.pyx"], - include_dirs=["yt/utilities/lib"]), - Extension("yt.analysis_modules.ppv_cube.ppv_utils", - ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"], - libraries=std_libs), Extension("yt.geometry.grid_visitors", ["yt/geometry/grid_visitors.pyx"], include_dirs=["yt/utilities/lib"], @@ -78,8 +103,11 @@ libraries=std_libs), Extension("yt.geometry.particle_oct_container", ["yt/geometry/particle_oct_container.pyx"], - include_dirs=["yt/utilities/lib/"], - libraries=std_libs), + include_dirs=["yt/utilities/lib/", + "yt/utilities/lib/ewahboolarray"], + language="c++", + libraries=std_libs, + extra_compile_args=["-std=c++11"]), Extension("yt.geometry.selection_routines", ["yt/geometry/selection_routines.pyx"], include_dirs=["yt/utilities/lib/"], @@ -114,6 +142,28 @@ include_dirs=["yt/utilities/lib/", "yt/geometry/"], libraries=std_libs), + Extension("yt.utilities.lib.cykdtree.kdtree", + [ + "yt/utilities/lib/cykdtree/kdtree.pyx", + "yt/utilities/lib/cykdtree/c_kdtree.cpp", + "yt/utilities/lib/cykdtree/c_utils.cpp", + ], + depends=[ + "yt/utilities/lib/cykdtree/c_kdtree.hpp", + "yt/utilities/lib/cykdtree/c_utils.hpp", + ], + libraries=std_libs, + language="c++", + extra_compile_args=["-std=c++03"]), + Extension("yt.utilities.lib.cykdtree.utils", + [ + "yt/utilities/lib/cykdtree/utils.pyx", + "yt/utilities/lib/cykdtree/c_utils.cpp", + ], + depends=["yt/utilities/lib/cykdtree/c_utils.hpp"], + libraries=std_libs, + language="c++", + extra_compile_args=["-std=c++03"]), Extension("yt.utilities.lib.fnv_hash", ["yt/utilities/lib/fnv_hash.pyx"], include_dirs=["yt/utilities/lib/"], @@ -132,12 +182,26 @@ Extension("yt.utilities.lib.mesh_triangulation", ["yt/utilities/lib/mesh_triangulation.pyx"], depends=["yt/utilities/lib/mesh_triangulation.h"]), + Extension("yt.utilities.lib.particle_kdtree_tools", + ["yt/utilities/lib/particle_kdtree_tools.pyx"], + language="c++"), + Extension("yt.utilities.lib.bounded_priority_queue", + ["yt/utilities/lib/bounded_priority_queue.pyx"]), Extension("yt.utilities.lib.pixelization_routines", ["yt/utilities/lib/pixelization_routines.pyx", "yt/utilities/lib/pixelization_constants.c"], include_dirs=["yt/utilities/lib/"], + extra_compile_args=omp_args, + extra_link_args=omp_args, + language='c++', libraries=std_libs, depends=["yt/utilities/lib/pixelization_constants.h"]), + Extension("yt.utilities.lib.cyoctree", + ["yt/utilities/lib/cyoctree.pyx"], + extra_compile_args=omp_args, + extra_link_args=omp_args, + libraries=std_libs, + language='c++'), Extension("yt.utilities.lib.primitives", ["yt/utilities/lib/primitives.pyx"], libraries=std_libs), @@ -154,6 +218,11 @@ include_dirs=["yt/utilities/lib/"], libraries=std_libs, depends=["yt/utilities/lib/fixed_interpolator.h"]), + Extension("yt.utilities.lib.ewah_bool_wrap", + ["yt/utilities/lib/ewah_bool_wrap.pyx"], + include_dirs=["yt/utilities/lib/", + "yt/utilities/lib/ewahboolarray"], + language="c++"), Extension("yt.utilities.lib.image_samplers", ["yt/utilities/lib/image_samplers.pyx", "yt/utilities/lib/fixed_interpolator.c"], @@ -192,7 +261,7 @@ "particle_mesh_operations", "depth_first_octree", "fortran_reader", "interpolators", "basic_octree", "image_utilities", "points_in_volume", "quad_tree", "mesh_utilities", - "amr_kdtools", "lenses", "distance_queue", "allocation_container" + "amr_kdtools", "lenses", "distance_queue", "allocation_container", ] for ext_name in lib_exts: cython_extensions.append( @@ -207,12 +276,6 @@ ["yt/utilities/lib/{}.pyx".format(ext_name)])) extensions = [ - Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF", - ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c", - "yt/analysis_modules/halo_finding/fof/kd.c"], - libraries=std_libs), - Extension("yt.analysis_modules.halo_finding.hop.EnzoHop", - sorted(glob.glob("yt/analysis_modules/halo_finding/hop/*.c"))), Extension("yt.frontends.artio._artio_caller", ["yt/frontends/artio/_artio_caller.pyx"] + sorted(glob.glob("yt/frontends/artio/artio_headers/*.c")), @@ -258,44 +321,6 @@ cython_extensions += embree_extensions -# ROCKSTAR -if os.path.exists("rockstar.cfg"): - try: - rd = open("rockstar.cfg").read().strip() - except IOError: - print("Reading Rockstar location from rockstar.cfg failed.") - print("Please place the base directory of your") - print("Rockstar install in rockstar.cfg and restart.") - print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )") - sys.exit(1) - - rockstar_extdir = "yt/analysis_modules/halo_finding/rockstar" - rockstar_extensions = [ - Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_interface", - sources=[os.path.join(rockstar_extdir, "rockstar_interface.pyx")]), - Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_groupies", - sources=[os.path.join(rockstar_extdir, "rockstar_groupies.pyx")]) - ] - for ext in rockstar_extensions: - ext.library_dirs.append(rd) - ext.libraries.append("rockstar") - ext.define_macros.append(("THREADSAFE", "")) - ext.include_dirs += [rd, - os.path.join(rd, "io"), os.path.join(rd, "util")] - extensions += rockstar_extensions - -if os.environ.get("GPERFTOOLS", "no").upper() != "NO": - gpd = os.environ["GPERFTOOLS"] - idir = os.path.join(gpd, "include") - ldir = os.path.join(gpd, "lib") - print(("INCLUDE AND LIB DIRS", idir, ldir)) - cython_extensions.append( - Extension("yt.utilities.lib.perftools_wrap", - ["yt/utilities/lib/perftools_wrap.pyx"], - libraries=["profiler"], - library_dirs=[ldir], - include_dirs=[idir])) - class build_ext(_build_ext): # subclass setuptools extension builder to avoid importing cython and numpy # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869 @@ -308,22 +333,24 @@ def finalize_options(self): """Could not import cython or numpy. Building yt from source requires cython and numpy to be installed. Please install these packages using the appropriate package manager for your python environment.""") - if LooseVersion(cython.__version__) < LooseVersion('0.24'): + if LooseVersion(cython.__version__) < LooseVersion('0.26.1'): raise RuntimeError( -"""Building yt from source requires Cython 0.24 or newer but +"""Building yt from source requires Cython 0.26.1 or newer but Cython %s is installed. Please update Cython using the appropriate package manager for your python environment.""" % cython.__version__) - if LooseVersion(numpy.__version__) < LooseVersion('1.10.4'): + if LooseVersion(numpy.__version__) < LooseVersion('1.13.3'): raise RuntimeError( -"""Building yt from source requires NumPy 1.10.4 or newer but +"""Building yt from source requires NumPy 1.13.3 or newer but NumPy %s is installed. Please update NumPy using the appropriate package manager for your python environment.""" % numpy.__version__) from Cython.Build import cythonize self.distribution.ext_modules[:] = cythonize( self.distribution.ext_modules, - compiler_directives={'language_level': 2}) + compiler_directives={'language_level': 2}, + nthreads=_get_cpu_count(), + ) _build_ext.finalize_options(self) # Prevent numpy from thinking it is still in its setup process # see http://stackoverflow.com/a/21621493/1382869 @@ -333,8 +360,20 @@ def finalize_options(self): __builtins__["__NUMPY_SETUP__"] = False else: __builtins__.__NUMPY_SETUP__ = False + import numpy self.include_dirs.append(numpy.get_include()) + def build_extensions(self): + self.check_extensions_list(self.extensions) + + ncpus = _get_cpu_count() + if ncpus > 0: + with Pool(ncpus) as pool: + pool.map(self.build_extension, self.extensions) + else: + super().build_extensions() + + class sdist(_sdist): # subclass setuptools source distribution builder to ensure cython # generated C files are included in source distribution. @@ -345,69 +384,70 @@ def run(self): cythonize( cython_extensions, compiler_directives={'language_level': 2}, + nthreads=_get_cpu_count(), ) _sdist.run(self) -setup( - name="yt", - version=VERSION, - description="An analysis and visualization toolkit for volumetric data", - long_description = long_description, - long_description_content_type='text/markdown', - classifiers=["Development Status :: 5 - Production/Stable", - "Environment :: Console", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: BSD License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: POSIX :: AIX", - "Operating System :: POSIX :: Linux", - "Programming Language :: C", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Topic :: Scientific/Engineering :: Astronomy", - "Topic :: Scientific/Engineering :: Physics", - "Topic :: Scientific/Engineering :: Visualization", - "Framework :: Matplotlib"], - keywords='astronomy astrophysics visualization ' + - 'amr adaptivemeshrefinement', - entry_points={'console_scripts': [ - 'yt = yt.utilities.command_line:run_main', - ], - 'nose.plugins.0.10': [ - 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting' - ] - }, - packages=find_packages(), - include_package_data = True, - install_requires=[ - 'matplotlib>=1.5.3', - 'setuptools>=19.6', - 'sympy>=1.0', - 'numpy>=1.10.4', - 'IPython>=1.0', - ], - extras_require = { - 'hub': ["girder_client"], - 'mapserver': ["bottle"] - }, - cmdclass={'sdist': sdist, 'build_ext': build_ext}, - author="The yt project", - author_email="yt-dev@python.org", - url="https://github.com/yt-project/yt", - project_urls={ - 'Homepage': 'https://yt-project.org/', - 'Documentation': 'https://yt-project.org/doc/', - 'Source': 'https://github.com/yt-project/yt/', - 'Tracker': 'https://github.com/yt-project/yt/issues' - }, - license="BSD 3-Clause", - zip_safe=False, - scripts=["scripts/iyt"], - ext_modules=cython_extensions + extensions, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' -) +if __name__ == "__main__": + setup( + name="yt", + version=VERSION, + description="An analysis and visualization toolkit for volumetric data", + long_description = long_description, + long_description_content_type='text/markdown', + classifiers=["Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: BSD License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: AIX", + "Operating System :: POSIX :: Linux", + "Programming Language :: C", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Topic :: Scientific/Engineering :: Astronomy", + "Topic :: Scientific/Engineering :: Physics", + "Topic :: Scientific/Engineering :: Visualization", + "Framework :: Matplotlib"], + keywords='astronomy astrophysics visualization ' + + 'amr adaptivemeshrefinement', + entry_points={'console_scripts': [ + 'yt = yt.utilities.command_line:run_main', + ], + 'nose.plugins.0.10': [ + 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting' + ] + }, + packages=find_packages(), + include_package_data = True, + install_requires=[ + 'matplotlib>=1.5.3', + 'setuptools>=19.6', + 'sympy>=1.2', + 'numpy>=1.10.4', + 'IPython>=1.0', + 'unyt>=2.2.2', + ], + extras_require = { + 'hub': ["girder_client"], + 'mapserver': ["bottle"] + }, + cmdclass={'sdist': sdist, 'build_ext': build_ext}, + author="The yt project", + author_email="yt-dev@python.org", + url="https://github.com/yt-project/yt", + project_urls={ + 'Homepage': 'https://yt-project.org/', + 'Documentation': 'https://yt-project.org/doc/', + 'Source': 'https://github.com/yt-project/yt/', + 'Tracker': 'https://github.com/yt-project/yt/issues' + }, + license="BSD 3-Clause", + zip_safe=False, + scripts=["scripts/iyt"], + ext_modules=cython_extensions + extensions, + python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' + ) diff --git a/tests/nose_runner.py b/tests/nose_runner.py index 54501aa667d..0ec1e0d52f2 100644 --- a/tests/nose_runner.py +++ b/tests/nose_runner.py @@ -9,7 +9,7 @@ concurrency="multiprocessing") cov.start() -from yt.extern.six import StringIO +from io import StringIO from yt.config import ytcfg from yt.utilities.answer_testing.framework import AnswerTesting import numpy diff --git a/tests/report_failed_answers.py b/tests/report_failed_answers.py index 280fcb89b08..59ceab60bb3 100644 --- a/tests/report_failed_answers.py +++ b/tests/report_failed_answers.py @@ -4,13 +4,6 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2018, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from __future__ import print_function diff --git a/tests/test_install_script.py b/tests/test_install_script.py index 3eb04bc176d..a1cb4cc77c3 100644 --- a/tests/test_install_script.py +++ b/tests/test_install_script.py @@ -24,22 +24,19 @@ OPTIONAL_DEPS = [ 'embree', 'pyx', - 'rockstar', 'scipy', 'astropy', 'cartopy', + 'pooch', ] # dependencies that are only installable when yt is built from source YT_SOURCE_ONLY_DEPS = [ 'embree', - 'rockstar' ] DEPENDENCY_IMPORT_TESTS = { 'embree': "from yt.utilities.lib import mesh_traversal", - 'rockstar': ("from yt.analysis_modules.halo_finding.rockstar " - "import rockstar_interface") } diff --git a/tests/test_minimal_requirements.txt b/tests/test_minimal_requirements.txt index 72481ed2cd7..a8243558a1b 100644 --- a/tests/test_minimal_requirements.txt +++ b/tests/test_minimal_requirements.txt @@ -1,6 +1,6 @@ ipython==1.0.0 matplotlib==1.5.3 -sympy==1.0 +sympy==1.2 nose==1.3.7 nose-timer==0.7.3 pytest~=5.2; python_version >= '3.0' @@ -8,4 +8,4 @@ pytest~=4.6; python_version < '3.0' pyyaml>=4.2b1 coverage==4.5.1 codecov==2.0.15 -mock==2.0.0; python_version < '3.0' +git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index d1e8499cb81..3e4c21dccba 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -1,34 +1,30 @@ -astropy==3.0.5; python_version >= '3.0' -astropy==2.0.9; python_version < '3.0' +astropy==3.0.5 codecov==2.0.15 coverage==4.5.1 fastcache==1.0.2 -glueviz==0.13.3; python_version >= '3.0' -h5py==2.8.0 -ipython==7.1.1; python_version >= '3.0' -ipython==5.8.0; python_version < '3.0' -matplotlib==3.1.3; python_version >= '3.0' -matplotlib==2.2.3; python_version < '3.0' +glueviz==0.13.3 +h5py==2.10.0 +ipython==7.1.1 +matplotlib==3.1.3 mock==2.0.0; python_version < '3.0' nose-timer==0.7.3 nose==1.3.7 pandas==0.23.4 -pytest~=5.2; python_version >= '3.0' -pytest~=4.6; python_version < '3.0' +pytest~=5.2 requests==2.20.0 -scipy==1.1.0; python_version < '3.0' -scipy==1.3.3; python_version >= '3.0' +scipy==1.3.3 sympy==1.5 -pyqt5==5.11.3; python_version >= '3.0' -thingking==1.0.2; python_version < '3.0' +pyqt5==5.11.3 pint==0.8.1 -netCDF4==1.4.2; python_version < '3.0' -netCDF4==1.5.3; python_version >= '3.0' +netCDF4==1.5.3 libconf==1.0.1 cartopy==0.17.0 pyaml==17.10.0 mpi4py==3.0.3 +git+https://github.com/yt-project/unyt@de443dff7671f1e68557306d77582cd117cc94f8#egg=unyt pyyaml>=4.2b1 -xarray==0.12.3 ; python_version >= '3.0' +xarray==0.12.3 firefly_api>=0.0.2 f90nml>=1.1.2 +MiniballCpp>=0.2.1 +pooch>=0.7.0 diff --git a/tests/tests.yaml b/tests/tests.yaml index cd60e3f7b13..736e63d6176 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -3,55 +3,55 @@ answer_tests: local_amrvac_004: - yt/frontends/amrvac/tests/test_outputs.py - local_artio_002: + local_arepo_005: + - yt/frontends/arepo/tests/test_outputs.py + + local_artio_003: - yt/frontends/artio/tests/test_outputs.py - local_athena_006: + local_athena_007: - yt/frontends/athena - local_athena_pp_002: + local_athena_pp_003: - yt/frontends/athena_pp - local_chombo_003: + local_chombo_004: - yt/frontends/chombo/tests/test_outputs.py local_enzo_006: - yt/frontends/enzo - local_enzo_p_006: + local_enzo_p_007: - yt/frontends/enzo_p/tests/test_outputs.py local_fits_003: - yt/frontends/fits/tests/test_outputs.py - local_flash_010: + local_flash_012: - yt/frontends/flash/tests/test_outputs.py - local_gadget_002: + local_gadget_003: - yt/frontends/gadget/tests/test_outputs.py - local_gamer_006: + local_gamer_007: - yt/frontends/gamer/tests/test_outputs.py local_gdf_001: - yt/frontends/gdf/tests/test_outputs.py - local_gizmo_003: + local_gizmo_004: - yt/frontends/gizmo/tests/test_outputs.py local_halos_009: - - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py - - yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py - - yt/analysis_modules/halo_finding/tests/test_rockstar.py - yt/frontends/ahf/tests/test_outputs.py - - yt/frontends/owls_subfind/tests/test_outputs.py + # - yt/frontends/owls_subfind/tests/test_outputs.py - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42 - local_owls_003: + local_owls_004: - yt/frontends/owls/tests/test_outputs.py - local_pw_028: + local_pw_029: - yt/visualization/tests/test_plotwindow.py:test_attributes - yt/visualization/tests/test_plotwindow.py:test_attributes_wt - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers @@ -59,18 +59,13 @@ answer_tests: - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers - yt/visualization/tests/test_raw_field_slices.py:test_raw_field_slices - local_tipsy_003: + local_tipsy_005: - yt/frontends/tipsy/tests/test_outputs.py - local_varia_011: - - yt/analysis_modules/radmc3d_export + local_varia_015: - yt/frontends/moab/tests/test_c5.py - yt/fields/tests/test_xray_fields.py - local_photon_002: - - yt/analysis_modules/photon_simulator/tests/test_spectra.py - - yt/analysis_modules/photon_simulator/tests/test_sloshing.py - local_unstructured_011: - yt/visualization/volume_rendering/tests/test_mesh_render.py:test_composite_mesh_render - yt/visualization/volume_rendering/tests/test_mesh_render.py:test_composite_mesh_render_pyembree @@ -89,7 +84,7 @@ answer_tests: - yt/visualization/volume_rendering/tests/test_mesh_render.py:test_wedge6_render - yt/visualization/volume_rendering/tests/test_mesh_render.py:test_wedge6_render_pyembree - local_boxlib_009: + local_boxlib_010: - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect - yt/frontends/boxlib/tests/test_outputs.py:test_radtube - yt/frontends/boxlib/tests/test_outputs.py:test_star @@ -113,29 +108,18 @@ answer_tests: local_ramses_002: - yt/frontends/ramses/tests/test_outputs.py - local_ytdata_006: + local_ytdata_007: - yt/frontends/ytdata/tests/test_outputs.py - yt/frontends/ytdata/tests/test_old_outputs.py - local_absorption_spectrum_007: - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph - - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum - local_axialpix_006: - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization local_cylindrical_background_002: - yt/geometry/coordinates/tests/test_cylindrical_coordinates.py:test_noise_plots - local_particle_trajectory_001: - - yt/data_objects/tests/test_particle_trajectories.py - - local_light_cone_002: - - yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py + #local_particle_trajectory_001: + # - yt/data_objects/tests/test_particle_trajectories.py other_tests: unittests: diff --git a/yt/__init__.py b/yt/__init__.py index 97da62d74fe..da7526706af 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -7,16 +7,11 @@ * Contribute: https://github.com/yt-project/yt """ +import sys +if sys.version_info[0] < 3: + raise Exception("Python 2 no longer supported. Please install Python 3 for use with yt.") -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -__version__ = "3.7.dev0" +__version__ = "4.0.dev0" # First module imports import numpy as np # For modern purposes @@ -44,7 +39,7 @@ import yt.utilities.physical_constants as physical_constants import yt.units as units from yt.units.unit_object import define_unit -from yt.units.yt_array import \ +from yt.units import \ YTArray, \ YTQuantity, \ uconcatenate, \ @@ -126,16 +121,15 @@ from yt.convenience import \ load, simulation +from yt.utilities.load_sample import load_sample + from yt.testing import run_nose # Import some helpful math utilities from yt.utilities.math_utils import \ ortho_find, quartiles, periodic_position -from yt.units.unit_systems import UnitSystem -from yt.units.unit_object import unit_system_registry - -from yt.analysis_modules.list_modules import \ - amods +from yt.units.unit_systems import \ + UnitSystem, unit_system_registry _called_from_pytest = False diff --git a/yt/analysis_modules/absorption_spectrum/__init__.py b/yt/analysis_modules/absorption_spectrum/__init__.py index 18ea5c8cef3..e69de29bb2d 100644 --- a/yt/analysis_modules/absorption_spectrum/__init__.py +++ b/yt/analysis_modules/absorption_spectrum/__init__.py @@ -1,14 +0,0 @@ -""" -Import stuff for light cone generator. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/analysis_modules/absorption_spectrum/absorption_line.py b/yt/analysis_modules/absorption_spectrum/absorption_line.py deleted file mode 100644 index aad9ae8d9b4..00000000000 --- a/yt/analysis_modules/absorption_spectrum/absorption_line.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -Absorption line generating functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -from yt.utilities.physical_constants import \ - charge_proton_cgs, \ - mass_electron_cgs, \ - speed_of_light_cgs -from yt.utilities.on_demand_imports import _scipy, NotAModule - -special = _scipy.special -tau_factor = None -_cs = None - - -def voigt_scipy(a, u): - x = np.asarray(u).astype(np.float64) - y = np.asarray(a).astype(np.float64) - return special.wofz(x + 1j * y).real - - -def voigt_old(a, u): - """ - NAME: - VOIGT - PURPOSE: - Implementation of Voigt function - CATEGORY: - Math - CALLING SEQUENCE: - voigt=Voigt(a,u) - INPUTS: - A = Voigt "A" parameter. - U = Frequency in units of the Doppler frequency. - - The line profile "Phi(v)", the doppler width - "Delv", the voigt parameter "a", and the frequency "u" - are given by: - - Phi(v) = Voigt(a,u)/[ Delv * sqrt(pi) ] - Delv = Vo/c * sqrt[ 2kT/m ] - u = V - Vo / Delv - a = GAMMA / [ Delv * 4pi ] - Gamma = Gu + Gl + 2*Vcol - "Gu" and "Gl" are the widths of the upper and lower states - "Vcol" is the collisions per unit time - "Vo" is the line center frequency - - OUTPUTS: - An array of the same type as u - RESTRICTIONS: - U must be an array, a should not be. Also this procedure is only - valid for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should - be most astrophysical conditions (see the article below for further - comments - PROCEDURE: - Follows procedure in Armstrong JQSRT 7, 85 (1967) - also the same as the intrinsic in the previous version of IDL - MODIFICATION HISTORY: - J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong) - Sep 1990 (better overflow checking) - """ - x = np.asarray(u).astype(np.float64) - y = np.asarray(a).astype(np.float64) - - # Hummer's Chebyshev Coefficients - c = (0.1999999999972224, -0.1840000000029998, 0.1558399999965025, - -0.1216640000043988, 0.0877081599940391, -0.0585141248086907, - 0.0362157301623914, -0.0208497654398036, 0.0111960116346270, - -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2, - 0.4899519978088e-3, -0.1933630801528e-3, 0.722877446788e-4, - -0.256555124979e-4, 0.86620736841e-5, -0.27876379719e-5, - 0.8566873627e-6, -0.2518433784e-6, 0.709360221e-7, - -0.191732257e-7, 0.49801256e-8, -0.12447734e-8, - 0.2997777e-9, -0.696450e-10, 0.156262e-10, - -0.33897e-11, 0.7116e-12, -0.1447e-12, - 0.285e-13, -0.55e-14, 0.10e-14, - -0.2e-15) - - y2 = y * y - - # limits are y<1., x<4 or y<1.8(x+1), x>4 (no checking performed) - u1 = np.exp(-x * x + y2) * np.cos(2. * x * y) - - # Clenshaw's Algorithm - bno1 = np.zeros(x.shape) - bno2 = np.zeros(x.shape) - x1 = np.clip((x / 5.), -np.inf, 1.) - coef = 4. * x1 * x1 - 2. - for i in range(33, -1, -1): - bn = coef * bno1 - bno2 + c[i] - bno2 = np.copy(bno1) - bno1 = np.copy(bn) - - f = x1 * (bn - bno2) - dno1 = 1. - 2. * x * f - dno2 = f - - q = np.abs(x) > 5 - if q.any(): - x14 = np.power(np.clip(x[q], -np.inf, 500.), 14) - x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12) - x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10) - x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8) - x6 = np.power(np.clip(x[q], -np.inf, 1.e6), 6) - x4 = np.power(np.clip(x[q], -np.inf, 1.e9), 4) - x2 = np.power(np.clip(x[q], -np.inf, 1.e18), 2) - dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + - 6.5625 / x8 + 29.53125 / x10 + - 162.4218 / x12 + 1055.7421 / x14) - dno2[q] = (1. - dno1[q]) / (2. * x[q]) - - funct = y * dno1 - if (y > 1.e-8).any(): - q = 1.0 - yn = y - for i in range(2, 51): - dn = (x * dno1 + dno2) * (-2. / i) - dno2 = dno1 - dno1 = dn - if (i % 2) == 1: - q = -q - yn = yn * y2 - g = dn.astype(np.float64) * yn - funct = funct + q * g - if np.max(np.abs(g / funct)) <= 1.e-8: - break - - k1 = u1 - 1.12837917 * funct - k1 = k1.astype(np.float64).clip(0) - return k1 - - -def tau_profile(lambda_0, f_value, gamma, v_doppler, column_density, - delta_v=None, delta_lambda=None, - lambda_bins=None, n_lambda=12000, dlambda=0.01): - r""" - Create an optical depth vs. wavelength profile for an - absorption line using a voigt profile. - - Parameters - ---------- - - lambda_0 : float in angstroms - central wavelength. - f_value : float - absorption line f-value. - gamma : float - absorption line gamma value. - v_doppler : float in cm/s - doppler b-parameter. - column_density : float in cm^-2 - column density. - delta_v : float in cm/s - velocity offset from lambda_0. - Default: None (no shift). - delta_lambda : float in angstroms - wavelength offset. - Default: None (no shift). - lambda_bins : array in angstroms - wavelength array for line deposition. If None, one will be - created using n_lambda and dlambda. - Default: None. - n_lambda : int - size of lambda bins to create if lambda_bins is None. - Default: 12000. - dlambda : float in angstroms - lambda bin width in angstroms if lambda_bins is None. - Default: 0.01. - - """ - global tau_factor - if tau_factor is None: - tau_factor = ( - np.sqrt(np.pi) * charge_proton_cgs ** 2 / - (mass_electron_cgs * speed_of_light_cgs) - ).in_cgs().d - - global _cs - if _cs is None: - _cs = speed_of_light_cgs.d[()] - - # shift lambda_0 by delta_v - if delta_v is not None: - lam1 = lambda_0 * (1 + delta_v / _cs) - elif delta_lambda is not None: - lam1 = lambda_0 + delta_lambda - else: - lam1 = lambda_0 - - # conversions - nudop = 1e8 * v_doppler / lam1 # doppler width in Hz - - # create wavelength - if lambda_bins is None: - lambda_bins = lam1 + \ - np.arange(n_lambda, dtype=np.float) * dlambda - \ - n_lambda * dlambda / 2 # wavelength vector (angstroms) - - # tau_0 - tau_X = tau_factor * column_density * f_value / v_doppler - tau0 = tau_X * lambda_0 * 1e-8 - - # dimensionless frequency offset in units of doppler freq - x = _cs / v_doppler * (lam1 / lambda_bins - 1.0) - a = gamma / (4.0 * np.pi * nudop) # damping parameter - phi = voigt(a, x) # line profile - tauphi = tau0 * phi # profile scaled with tau0 - - return (lambda_bins, tauphi) - -if isinstance(special, NotAModule): - voigt = voigt_old -else: - voigt = voigt_scipy diff --git a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py deleted file mode 100644 index e9b28d85893..00000000000 --- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py +++ /dev/null @@ -1,642 +0,0 @@ -""" -AbsorptionSpectrum class and member functions. - - - -""" - -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np - -from .absorption_line import tau_profile - -from yt.extern.six import string_types -from yt.convenience import load -from yt.funcs import get_pbar, mylog -from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.physical_constants import \ - boltzmann_constant_cgs, \ - speed_of_light_cgs -from yt.utilities.on_demand_imports import _astropy -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - _get_comm, \ - parallel_objects, \ - parallel_root_only - -pyfits = _astropy.pyfits - -class AbsorptionSpectrum(object): - r"""Create an absorption spectrum object. - - Parameters - ---------- - - lambda_min : float - lower wavelength bound in angstroms. - lambda_max : float - upper wavelength bound in angstroms. - n_lambda : int - number of wavelength bins. - """ - - def __init__(self, lambda_min, lambda_max, n_lambda): - self.n_lambda = int(n_lambda) - # lambda, flux, and tau are wavelength, flux, and optical depth - self.lambda_min = lambda_min - self.lambda_max = lambda_max - self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max, - n_lambda), "angstrom") - self.tau_field = None - self.flux_field = None - self.absorbers_list = None - self.bin_width = YTQuantity((lambda_max - lambda_min) / - float(n_lambda - 1), "angstrom") - self.line_list = [] - self.continuum_list = [] - - def add_line(self, label, field_name, wavelength, - f_value, gamma, atomic_mass, - label_threshold=None): - r"""Add an absorption line to the list of lines included in the spectrum. - - Parameters - ---------- - - label : string - label for the line. - field_name : string - field name from ray data for column densities. - wavelength : float - line rest wavelength in angstroms. - f_value : float - line f-value. - gamma : float - line gamma value. - atomic_mass : float - mass of atom in amu. - """ - self.line_list.append({'label': label, 'field_name': field_name, - 'wavelength': YTQuantity(wavelength, "angstrom"), - 'f_value': f_value, - 'gamma': gamma, - 'atomic_mass': YTQuantity(atomic_mass, "amu"), - 'label_threshold': label_threshold}) - - def add_continuum(self, label, field_name, wavelength, - normalization, index): - """ - Add a continuum feature that follows a power-law. - - Parameters - ---------- - - label : string - label for the feature. - field_name : string - field name from ray data for column densities. - wavelength : float - line rest wavelength in angstroms. - normalization : float - the column density normalization. - index : float - the power-law index for the wavelength dependence. - """ - - self.continuum_list.append({'label': label, 'field_name': field_name, - 'wavelength': wavelength, - 'normalization': normalization, - 'index': index}) - - def make_spectrum(self, input_file, output_file=None, - line_list_file=None, output_absorbers_file=None, - use_peculiar_velocity=True, - subgrid_resolution=10, observing_redshift=0., - njobs="auto"): - """ - Make spectrum from ray data using the line list. - - Parameters - ---------- - - input_file : string or dataset - path to input ray data or a loaded ray dataset - output_file : optional, string - Option to save a file containing the wavelength, flux, and optical - depth fields. File formats are chosen based on the filename - extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything - else is ASCII. - Default: None - output_absorbers_file : optional, string - Option to save a text file containing all of the absorbers and - corresponding wavelength and redshift information. - For parallel jobs, combining the lines lists can be slow so it - is recommended to set to None in such circumstances. - Default: None - use_peculiar_velocity : optional, bool - if True, include peculiar velocity for calculating doppler redshift - to shift lines. Requires similar flag to be set in LightRay - generation. - Default: True - subgrid_resolution : optional, int - When a line is being added that is unresolved (ie its thermal - width is less than the spectral bin width), the voigt profile of - the line is deposited into an array of virtual wavelength bins at - higher resolution. The optical depth from these virtual bins is - integrated and then added to the coarser spectral wavelength bin. - The subgrid_resolution value determines the ratio between the - thermal width and the bin width of the virtual bins. Increasing - this value yields smaller virtual bins, which increases accuracy, - but is more expensive. A value of 10 yields accuracy to the 4th - significant digit in tau. - Default: 10 - observing_redshift : optional, float - This is the redshift at which the observer is observing - the absorption spectrum. - Default: 0 - njobs : optional, int or "auto" - the number of process groups into which the loop over - absorption lines will be divided. If set to -1, each - absorption line will be deposited by exactly one processor. - If njobs is set to a value less than the total number of - available processors (N), then the deposition of an - individual line will be parallelized over (N / njobs) - processors. If set to "auto", it will first try to - parallelize over the list of lines and only parallelize - the line deposition if there are more processors than - lines. This is the optimal strategy for parallelizing - spectrum generation. - Default: "auto" - """ - if line_list_file is not None: - mylog.info("'line_list_file' keyword is deprecated. Please use " \ - "'output_absorbers_file'.") - output_absorbers_file = line_list_file - - input_fields = ['dl', 'redshift', 'temperature'] - field_units = {"dl": "cm", "redshift": "", "temperature": "K"} - if use_peculiar_velocity: - input_fields.append('velocity_los') - input_fields.append('redshift_eff') - field_units["velocity_los"] = "cm/s" - field_units["redshift_eff"] = "" - if observing_redshift != 0.: - input_fields.append('redshift_dopp') - field_units["redshift_dopp"] = "" - for feature in self.line_list + self.continuum_list: - if not feature['field_name'] in input_fields: - input_fields.append(feature['field_name']) - field_units[feature["field_name"]] = "cm**-3" - - if isinstance(input_file, string_types): - input_ds = load(input_file) - else: - input_ds = input_file - field_data = input_ds.all_data() - - # temperature field required to calculate voigt profile widths - if ('temperature' not in input_ds.derived_field_list) and \ - (('gas', 'temperature') not in input_ds.derived_field_list): - raise RuntimeError( - "('gas', 'temperature') field required to be present in %s " - "for AbsorptionSpectrum to function." % input_file) - - self.tau_field = np.zeros(self.lambda_field.size) - self.absorbers_list = [] - - if njobs == "auto": - comm = _get_comm(()) - njobs = min(comm.size, len(self.line_list)) - - mylog.info("Creating spectrum") - self._add_lines_to_spectrum(field_data, use_peculiar_velocity, - output_absorbers_file, - subgrid_resolution=subgrid_resolution, - observing_redshift=observing_redshift, - njobs=njobs) - self._add_continua_to_spectrum(field_data, use_peculiar_velocity, - observing_redshift=observing_redshift) - - self.flux_field = np.exp(-self.tau_field) - - if output_file is None: - pass - elif output_file.endswith('.h5'): - self._write_spectrum_hdf5(output_file) - elif output_file.endswith('.fits'): - self._write_spectrum_fits(output_file) - else: - self._write_spectrum_ascii(output_file) - if output_absorbers_file is not None: - self._write_absorbers_file(output_absorbers_file) - - del field_data - return (self.lambda_field, self.flux_field) - - def _apply_observing_redshift(self, field_data, use_peculiar_velocity, - observing_redshift): - """ - Change the redshifts of individual absorbers to account for the - redshift at which the observer sits. - - The intermediate redshift that is seen by an observer - at a redshift other than z=0 is z12, where z1 is the - observing redshift and z2 is the emitted photon's redshift - Hogg (2000) eq. 13: - - 1 + z12 = (1 + z2) / (1 + z1) - """ - if observing_redshift == 0.: - # This is already assumed in the generation of the LightRay - redshift = field_data['redshift'] - if use_peculiar_velocity: - redshift_eff = field_data['redshift_eff'] - else: - # The intermediate redshift that is seen by an observer - # at a redshift other than z=0 is z12, where z1 is the - # observing redshift and z2 is the emitted photon's redshift - # Hogg (2000) eq. 13: - # 1 + z12 = (1 + z2) / (1 + z1) - redshift = ((1 + field_data['redshift']) / \ - (1 + observing_redshift)) - 1. - # Combining cosmological redshift and doppler redshift - # into an effective redshift is found in Peacock's - # Cosmological Physics eqn 3.75: - # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler) - if use_peculiar_velocity: - redshift_eff = ((1 + redshift) * \ - (1 + field_data['redshift_dopp'])) - 1. - - if not use_peculiar_velocity: - redshift_eff = redshift - - return redshift, redshift_eff - - def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity, - observing_redshift=0.): - """ - Add continuum features to the spectrum. Continuua are recorded as - a name, associated field, wavelength, normalization value, and index. - Continuua are applied at and below the denoted wavelength, where the - optical depth decreases as a power law of desired index. For positive - index values, this means optical depth is highest at the denoted - wavelength, and it drops with shorter and shorter wavelengths. - Consequently, transmitted flux undergoes a discontinuous cutoff at the - denoted wavelength, and then slowly increases with decreasing wavelength - according to the power law. - """ - # Change the redshifts of continuum sources to account for the - # redshift at which the observer sits - redshift, redshift_eff = self._apply_observing_redshift(field_data, - use_peculiar_velocity, observing_redshift) - - # min_tau is the minimum optical depth value that warrants - # accounting for an absorber. for a single absorber, noticeable - # continuum effects begin for tau = 1e-3 (leading to transmitted - # flux of e^-tau ~ 0.999). but we apply a cutoff to remove - # absorbers with insufficient column_density to contribute - # significantly to a continuum (see below). because lots of - # low column density absorbers can add up to a significant - # continuum effect, we normalize min_tau by the n_absorbers. - n_absorbers = field_data['dl'].size - min_tau = 1.e-3/n_absorbers - - for continuum in self.continuum_list: - - # Normalization is in cm**-2, so column density must be as well - column_density = (field_data[continuum['field_name']] * - field_data['dl']).in_units('cm**-2') - if (column_density == 0).all(): - mylog.info("Not adding continuum %s: insufficient column density" % continuum['label']) - continue - - # redshift_eff field combines cosmological and velocity redshifts - if use_peculiar_velocity: - delta_lambda = continuum['wavelength'] * redshift_eff - else: - delta_lambda = continuum['wavelength'] * redshift - - # right index of continuum affected area is wavelength itself - this_wavelength = delta_lambda + continuum['wavelength'] - right_index = np.digitize(this_wavelength, - self.lambda_field).clip(0, self.n_lambda) - # left index of continuum affected area wavelength at which - # optical depth reaches tau_min - left_index = np.digitize((this_wavelength * - np.power((min_tau * continuum['normalization'] / - column_density), - (1. / continuum['index']))), - self.lambda_field).clip(0, self.n_lambda) - - # Only calculate the effects of continuua where normalized - # column_density is greater than min_tau - # because lower column will not have significant contribution - valid_continuua = np.where(((column_density / - continuum['normalization']) > min_tau) & - (right_index - left_index > 1))[0] - if valid_continuua.size == 0: - mylog.info("Not adding continuum %s: insufficient column density or out of range" % - continuum['label']) - continue - - pbar = get_pbar("Adding continuum - %s [%f A]: " % \ - (continuum['label'], continuum['wavelength']), - valid_continuua.size) - - # Tau value is (wavelength / continuum_wavelength)**index / - # (column_dens / norm) - # i.e. a power law decreasing as wavelength decreases - - # Step through the absorber list and add continuum tau for each to - # the total optical depth for all wavelengths - for i, lixel in enumerate(valid_continuua): - cont_tau = \ - np.power((self.lambda_field[left_index[lixel] : - right_index[lixel]] / - this_wavelength[lixel]), \ - continuum['index']) * \ - (column_density[lixel] / continuum['normalization']) - self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau.d - pbar.update(i) - pbar.finish() - - def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity, - output_absorbers_file, subgrid_resolution=10, - observing_redshift=0., njobs=-1): - """ - Add the absorption lines to the spectrum. - """ - - # Change the redshifts of individual absorbers to account for the - # redshift at which the observer sits - redshift, redshift_eff = self._apply_observing_redshift(field_data, - use_peculiar_velocity, observing_redshift) - - # Widen wavelength window until optical depth falls below this tau - # value at the ends to assure that the wings of a line have been - # fully resolved. - min_tau = 1e-3 - - # step through each ionic transition (e.g. HI, HII, MgII) specified - # and deposit the lines into the spectrum - for line in parallel_objects(self.line_list, njobs=njobs): - column_density = field_data[line['field_name']] * field_data['dl'] - if (column_density < 0).any(): - mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name']) - np.clip(column_density, 0, np.inf, out=column_density) - if (column_density == 0).all(): - mylog.info("Not adding line %s: insufficient column density" % line['label']) - continue - - # redshift_eff field combines cosmological and velocity redshifts - # so delta_lambda gives the offset in angstroms from the rest frame - # wavelength to the observed wavelength of the transition - if use_peculiar_velocity: - delta_lambda = line['wavelength'] * redshift_eff - else: - delta_lambda = line['wavelength'] * redshift - # lambda_obs is central wavelength of line after redshift - lambda_obs = line['wavelength'] + delta_lambda - # the total number of absorbers per transition - n_absorbers = len(lambda_obs) - - # we want to know the bin index in the lambda_field array - # where each line has its central wavelength after being - # redshifted. however, because we don't know a priori how wide - # a line will be (ie DLAs), we have to include bin indices - # *outside* the spectral range of the AbsorptionSpectrum - # object. Thus, we find the "equivalent" bin index, which - # may be <0 or >the size of the array. In the end, we deposit - # the bins that actually overlap with the AbsorptionSpectrum's - # range in lambda. - - # this equation gives us the "equivalent" bin index for each line - # if it were placed into the self.lambda_field array - center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \ - / self.bin_width.d - center_index = np.ceil(center_index).astype('int') - - # thermal broadening b parameter - thermal_b = np.sqrt((2 * boltzmann_constant_cgs * - field_data['temperature']) / - line['atomic_mass']) - - # the actual thermal width of the lines - thermal_width = (lambda_obs * thermal_b / - speed_of_light_cgs).convert_to_units("angstrom") - - # Sanitize units for faster runtime of the tau_profile machinery. - lambda_0 = line['wavelength'].d # line's rest frame; angstroms - cdens = column_density.in_units("cm**-2").d # cm**-2 - thermb = thermal_b.in_cgs().d # thermal b coefficient; cm / s - dlambda = delta_lambda.d # lambda offset; angstroms - if use_peculiar_velocity: - vlos = field_data['velocity_los'].in_units("km/s").d # km/s - else: - vlos = np.zeros(field_data['temperature'].size) - - # When we actually deposit the voigt profile, sometimes we will - # have underresolved lines (ie lines with smaller widths than - # the spectral bin size). Here, we create virtual wavelength bins - # small enough in width to well resolve each line, deposit the - # voigt profile into them, then numerically integrate their tau - # values and sum them to redeposit them into the actual spectral - # bins. - - # virtual bins (vbins) will be: - # 1) <= the bin_width; assures at least as good as spectral bins - # 2) <= 1/10th the thermal width; assures resolving voigt profiles - # (actually 1/subgrid_resolution value, default is 1/10) - # 3) a bin width will be divisible by vbin_width times a power of - # 10; this will assure we don't get spikes in the deposited - # spectra from uneven numbers of vbins per bin - resolution = thermal_width / self.bin_width - n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution / - resolution) ).clip(0, np.inf) ) ).astype('int') - vbin_width = self.bin_width.d / n_vbins_per_bin - - # a note to the user about which lines components are unresolved - if (thermal_width < self.bin_width).any(): - mylog.info("%d out of %d line components will be " + - "deposited as unresolved lines.", - (thermal_width < self.bin_width).sum(), - n_absorbers) - - # provide a progress bar with information about lines processed - pbar = get_pbar("Adding line - %s [%f A]: " % \ - (line['label'], line['wavelength']), n_absorbers) - - # for a given transition, step through each location in the - # observed spectrum where it occurs and deposit a voigt profile - for i in parallel_objects(np.arange(n_absorbers), njobs=-1): - - # if there is a ray element with temperature = 0 or column - # density = 0, skip it - if (thermal_b[i] == 0.) or (cdens[i] == 0.): - pbar.update(i) - continue - - # the virtual window into which the line is deposited initially - # spans a region of 2 coarse spectral bins - # (one on each side of the center_index) but the window - # can expand as necessary. - # it will continue to expand until the tau value in the far - # edge of the wings is less than the min_tau value or it - # reaches the edge of the spectrum - window_width_in_bins = 2 - - while True: - left_index = (center_index[i] - window_width_in_bins//2) - right_index = (center_index[i] + window_width_in_bins//2) - n_vbins = (right_index - left_index) * n_vbins_per_bin[i] - - # the array of virtual bins in lambda space - vbins = \ - np.linspace(self.lambda_min + self.bin_width.d * left_index, - self.lambda_min + self.bin_width.d * right_index, - n_vbins, endpoint=False) - - # the virtual bins and their corresponding opacities - vbins, vtau = \ - tau_profile( - lambda_0, line['f_value'], line['gamma'], - thermb[i], cdens[i], - delta_lambda=dlambda[i], lambda_bins=vbins) - - # If tau has not dropped below min tau threshold by the - # edges (ie the wings), then widen the wavelength - # window and repeat process. - if (vtau[0] < min_tau and vtau[-1] < min_tau): - break - window_width_in_bins *= 2 - - # numerically integrate the virtual bins to calculate a - # virtual equivalent width; then sum the virtual equivalent - # widths and deposit into each spectral bin - vEW = vtau * vbin_width[i] - EW = np.zeros(right_index - left_index) - EW_indices = np.arange(left_index, right_index) - for k, val in enumerate(EW_indices): - EW[k] = vEW[n_vbins_per_bin[i] * k: \ - n_vbins_per_bin[i] * (k + 1)].sum() - EW = EW/self.bin_width.d - - # only deposit EW bins that actually intersect the original - # spectral wavelength range (i.e. lambda_field) - - # if EW bins don't intersect the original spectral range at all - # then skip the deposition - if ((left_index >= self.n_lambda) or \ - (right_index < 0)): - pbar.update(i) - continue - - # otherwise, determine how much of the original spectrum - # is intersected by the expanded line window to be deposited, - # and deposit the Equivalent Width data into that intersecting - # window in the original spectrum's tau - else: - intersect_left_index = max(left_index, 0) - intersect_right_index = min(right_index, self.n_lambda-1) - self.tau_field[intersect_left_index:intersect_right_index] \ - += EW[(intersect_left_index - left_index): \ - (intersect_right_index - left_index)] - - - # write out absorbers to file if the column density of - # an absorber is greater than the specified "label_threshold" - # of that absorption line - if output_absorbers_file and \ - line['label_threshold'] is not None and \ - cdens[i] >= line['label_threshold']: - - if use_peculiar_velocity: - peculiar_velocity = vlos[i] - else: - peculiar_velocity = 0.0 - self.absorbers_list.append({'label': line['label'], - 'wavelength': (lambda_0 + dlambda[i]), - 'column_density': column_density[i], - 'b_thermal': thermal_b[i], - 'redshift': redshift[i], - 'redshift_eff': redshift_eff[i], - 'v_pec': peculiar_velocity}) - pbar.update(i) - pbar.finish() - - del column_density, delta_lambda, lambda_obs, center_index, \ - thermal_b, thermal_width, cdens, thermb, dlambda, \ - vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin - - comm = _get_comm(()) - self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum") - if output_absorbers_file: - self.absorbers_list = comm.par_combine_object( - self.absorbers_list, "cat", datatype="list") - - @parallel_root_only - def _write_absorbers_file(self, filename): - """ - Write out ASCII list of all substantial absorbers found in spectrum - """ - if filename is None: - return - mylog.info("Writing absorber list: %s.", filename) - self.absorbers_list.sort(key=lambda obj: obj['wavelength']) - f = open(filename, 'w') - f.write('#%-14s %-14s %-12s %-14s %-15s %-9s %-10s\n' % - ('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z_cosmo', \ - 'z_eff', 'v_pec [km/s]')) - for line in self.absorbers_list: - f.write('%-14.6f %-14ls %e %e % e % e % e\n' % (line['wavelength'], \ - line['label'], line['column_density'], line['b_thermal'], \ - line['redshift'], line['redshift_eff'], line['v_pec'])) - f.close() - - @parallel_root_only - def _write_spectrum_ascii(self, filename): - """ - Write spectrum to an ascii file. - """ - mylog.info("Writing spectrum to ascii file: %s.", filename) - f = open(filename, 'w') - f.write("# wavelength[A] tau flux\n") - for i in range(self.lambda_field.size): - f.write("%e %e %e\n" % (self.lambda_field[i], - self.tau_field[i], self.flux_field[i])) - f.close() - - @parallel_root_only - def _write_spectrum_fits(self, filename): - """ - Write spectrum to a fits file. - """ - mylog.info("Writing spectrum to fits file: %s.", filename) - col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field) - col2 = pyfits.Column(name='tau', format='E', array=self.tau_field) - col3 = pyfits.Column(name='flux', format='E', array=self.flux_field) - cols = pyfits.ColDefs([col1, col2, col3]) - tbhdu = pyfits.BinTableHDU.from_columns(cols) - tbhdu.writeto(filename, overwrite=True) - - @parallel_root_only - def _write_spectrum_hdf5(self, filename): - """ - Write spectrum to an hdf5 file. - - """ - mylog.info("Writing spectrum to hdf5 file: %s.", filename) - output = h5py.File(filename, mode='w') - output.create_dataset('wavelength', data=self.lambda_field) - output.create_dataset('tau', data=self.tau_field) - output.create_dataset('flux', data=self.flux_field) - output.close() diff --git a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py deleted file mode 100644 index ca4aa3c033c..00000000000 --- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py +++ /dev/null @@ -1,1018 +0,0 @@ -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np - -from yt.analysis_modules.absorption_spectrum.absorption_line import \ - voigt -from yt.funcs import \ - mylog -from yt.units.yt_array import \ - YTArray -from yt.utilities.on_demand_imports import \ - _scipy - -optimize = _scipy.optimize - -def generate_total_fit(x, fluxData, orderFits, speciesDicts, - minError=1E-4, complexLim=.995, - fitLim=.97, minLength=3, - maxLength=1000, splitLim=.99, - output_file=None): - - """ - This function is designed to fit an absorption spectrum by breaking - the spectrum up into absorption complexes, and iteratively adding - and optimizing voigt profiles to each complex. - - Parameters - ---------- - x : (N) ndarray - 1d array of wavelengths - fluxData : (N) ndarray - array of flux corresponding to the wavelengths given - in x. (needs to be the same size as x) - orderFits : list - list of the names of the species in the order that they - should be fit. Names should correspond to the names of the species - given in speciesDicts. (ex: ['lya','OVI']) - speciesDicts : dictionary - Dictionary of dictionaries (I'm addicted to dictionaries, I - confess). Top level keys should be the names of all the species given - in orderFits. The entries should be dictionaries containing all - relevant parameters needed to create an absorption line of a given - species (f,Gamma,lambda0) as well as max and min values for parameters - to be fit - complexLim : float, optional - Maximum flux to start the edge of an absorption complex. Different - from fitLim because it decides extent of a complex rather than - whether or not a complex is accepted. - fitLim : float,optional - Maximum flux where the level of absorption will trigger - identification of the region as an absorption complex. Default = .98. - (ex: for a minSize=.98, a region where all the flux is between 1.0 and - .99 will not be separated out to be fit as an absorbing complex, but - a region that contains a point where the flux is .97 will be fit - as an absorbing complex.) - minLength : int, optional - number of cells required for a complex to be included. - default is 3 cells. - maxLength : int, optional - number of cells required for a complex to be split up. Default - is 1000 cells. - splitLim : float, optional - if attempting to split a region for being larger than maxlength - the point of the split must have a flux greater than splitLim - (ie: absorption greater than splitLim). Default= .99. - output_file : string, optional - location to save the results of the fit. - - Returns - ------- - allSpeciesLines : dictionary - Dictionary of dictionaries representing the fit lines. - Top level keys are the species given in orderFits and the corresponding - entries are dictionaries with the keys 'N','b','z', and 'group#'. - Each of these corresponds to a list of the parameters for every - accepted fitted line. (ie: N[0],b[0],z[0] will create a line that - fits some part of the absorption spectrum). 'group#' is a similar list - but identifies which absorbing complex each line belongs to. Lines - with the same group# were fit at the same time. group#'s do not - correlate between species (ie: an lya line with group number 1 and - an OVI line with group number 1 were not fit together and do - not necessarily correspond to the same region) - yFit : (N) ndarray - array of flux corresponding to the combination of all fitted - absorption profiles. Same size as x. - """ - - # convert to NumPy array if we have a YTArray - if isinstance(x, YTArray): - x = x.d - - #Empty dictionary for fitted lines - allSpeciesLines = {} - - #Wavelength of beginning of array, wavelength resolution - x0,xRes=x[0],x[1]-x[0] - - #Empty fit without any lines - yFit = np.ones(len(fluxData)) - - #Force the first and last flux pixel to be 1 to prevent OOB - fluxData[0]=1 - fluxData[-1]=1 - - - #Find all regions where lines/groups of lines are present - cBounds = _find_complexes(x, fluxData, fitLim=fitLim, - complexLim=complexLim, minLength=minLength, - maxLength=maxLength, splitLim=splitLim) - - #Fit all species one at a time in given order from low to high wavelength - for species in orderFits: - speciesDict = speciesDicts[species] - speciesLines = {'N':np.array([]), - 'b':np.array([]), - 'z':np.array([]), - 'group#':np.array([])} - - #Set up wavelengths for species - initWl = speciesDict['wavelength'][0] - - for b_i,b in enumerate(cBounds): - xBounded=x[b[1]:b[2]] - yDatBounded=fluxData[b[1]:b[2]] - yFitBounded=yFit[b[1]:b[2]] - - - #Find init redshift - z=(xBounded[yDatBounded.argmin()]-initWl)/initWl - - #Check if any flux at partner sites - if not _line_exists(speciesDict['wavelength'], - fluxData,z,x0,xRes,fitLim): - continue - - #Fit Using complex tools - newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded, - z,fitLim,minError,speciesDict) - - #If flagged as a bad fit, species is lyman alpha, - # and it may be a saturated line, use special tools - if flag and species=='lya' and min(yDatBounded)<.1: - newLinesP=_large_flag_fit(xBounded,yDatBounded, - yFitBounded,z,speciesDict, - fitLim,minError) - - if np.size(newLinesP)> 0: - - #Check for EXPLOOOOSIIONNNSSS - newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b) - - - #Check existence of partner lines if applicable - if len(speciesDict['wavelength']) != 1: - newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, - b, minError, x0, xRes, speciesDict) - - - - - #Adjust total current fit - yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict) - - - #Add new group to all fitted lines - if np.size(newLinesP)>0: - speciesLines['N']=np.append(speciesLines['N'],newLinesP[:,0]) - speciesLines['b']=np.append(speciesLines['b'],newLinesP[:,1]) - speciesLines['z']=np.append(speciesLines['z'],newLinesP[:,2]) - groupNums = b_i*np.ones(np.size(newLinesP[:,0])) - speciesLines['group#']=np.append(speciesLines['group#'],groupNums) - - allSpeciesLines[species]=speciesLines - - - if output_file: - _output_fit(allSpeciesLines, output_file) - - return (allSpeciesLines,yFit) - -def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict, - initP=None): - """ Fit an absorption complex by iteratively adding and optimizing - voigt profiles. - - A complex is defined as a region where some number of lines may be present, - or a region of non zero of absorption. Lines are iteratively added - and optimized until the difference between the flux generated using - the optimized parameters has a least squares difference between the - desired flux profile less than the error bound. - - Parameters - ---------- - x : (N) ndarray - array of wavelength - ydat : (N) ndarray - array of desired flux profile to be fitted for the wavelength - space given by x. Same size as x. - yFit : (N) ndarray - array of flux profile fitted for the wavelength - space given by x already. Same size as x. - initz : float - redshift to try putting first line at - (maximum absorption for region) - minsize : float - minimum absorption allowed for a line to still count as a line - given in normalized flux (ie: for minSize=.9, only lines with minimum - flux less than .9 will be fitted) - errbound : float - maximum total error allowed for an acceptable fit - speciesDict : dictionary - dictionary containing all relevant parameters needed - to create an absorption line of a given species (f,Gamma,lambda0) - as well as max and min values for parameters to be fit - initP : (,3,) ndarray - initial guess to try for line parameters to fit the region. Used - by large_flag_fit. Default = None, and initial guess generated - automatically. - - Returns - ------- - linesP : (3,) ndarray - Array of best parameters if a good enough fit is found in - the form [[N1,b1,z1], [N2,b2,z2],...] - flag : bool - boolean value indicating the success of the fit (True if unsuccessful) - """ - - #Setup initial line guesses - if initP is None: #Regular fit - initP = [0,0,0] - if min(yDat)<.01: #Large lines get larger initial guess - initP[0] = speciesDict['init_N']*10**2 - elif min(yDat)<.5: - initP[0] = speciesDict['init_N']*10**1 - elif min(yDat)>.9: #Small lines get smaller initial guess - initP[0] = speciesDict['init_N']*10**-1 - else: - initP[0] = speciesDict['init_N'] - initP[1] = speciesDict['init_b'] - initP[2]=initz - initP=np.array([initP]) - - linesP = initP - - #For generating new z guesses - wl0 = speciesDict['wavelength'][0] - - #Check if first line exists still - if min(yDat-yFit+1)>minSize: - return [],False - - #Values to proceed through first run - errSq,prevErrSq,prevLinesP=1,10*len(x),[] - - if errBound is None: - errBound = len(yDat)*(max(1-yDat)*1E-2)**2 - else: - errBound = errBound*len(yDat) - - flag = False - while True: - - #Initial parameter guess from joining parameters from all lines - # in lines into a single array - initP = linesP.flatten() - - #Optimize line - fitP,success=optimize.leastsq(_voigt_error,initP, - args=(x,yDat,yFit,speciesDict), - epsfcn=1E-10,maxfev=1000) - - - #Set results of optimization - linesP = np.reshape(fitP,(-1,3)) - - #Generate difference between current best fit and data - yNewFit=_gen_flux_lines(x,linesP,speciesDict) - dif = yFit*yNewFit-yDat - - #Sum to get idea of goodness of fit - errSq=sum(dif**2) - - if any(linesP[:,1]==speciesDict['init_b']): - flag = True - break - - #If good enough, break - if errSq < errBound: - break - - #If last fit was worse, reject the last line and revert to last fit - if errSq > prevErrSq*10 : - #If its still pretty damn bad, cut losses and try flag fit tools - if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya': - return [],True - else: - linesP = prevLinesP - break - - #If too many lines - if np.shape(linesP)[0]>8 or np.size(linesP)+3>=len(x): - #If its fitable by flag tools and still bad, use flag tools - if errSq >1E2*errBound and speciesDict['name']=='HI lya': - return [],True - else: - flag = True - break - - #Store previous data in case reject next fit - prevErrSq = errSq - prevLinesP = linesP - - #Set up initial condition for new line - newP = [0,0,0] - - yAdjusted = 1+yFit*yNewFit-yDat - - if min(yAdjusted)<.01: #Large lines get larger initial guess - newP[0] = speciesDict['init_N']*10**2 - elif min(yAdjusted)<.5: - newP[0] = speciesDict['init_N']*10**1 - elif min(yAdjusted)>.9: #Small lines get smaller initial guess - newP[0] = speciesDict['init_N']*10**-1 - else: - newP[0] = speciesDict['init_N'] - newP[1] = speciesDict['init_b'] - newP[2]=(x[dif.argmax()]-wl0)/wl0 - linesP=np.append(linesP,[newP],axis=0) - - - #Check the parameters of all lines to see if they fall in an - # acceptable range, as given in dict ref - remove=[] - for i,p in enumerate(linesP): - check=_check_params(np.array([p]),speciesDict,x) - if check: - remove.append(i) - linesP = np.delete(linesP,remove,axis=0) - - return linesP,flag - -def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound): - """ - Attempts to more robustly fit saturated lyman alpha regions that have - not converged to satisfactory fits using the standard tools. - - Uses a preselected sample of a wide range of initial parameter guesses - designed to fit saturated lines (see get_test_lines). - - Parameters - ---------- - x : (N) ndarray - array of wavelength - ydat : (N) ndarray - array of desired flux profile to be fitted for the wavelength - space given by x. Same size as x. - yFit : (N) ndarray - array of flux profile fitted for the wavelength - space given by x already. Same size as x. - initz : float - redshift to try putting first line at - (maximum absorption for region) - speciesDict : dictionary - dictionary containing all relevant parameters needed - to create an absorption line of a given species (f,Gamma,lambda0) - as well as max and min values for parameters to be fit - minsize : float - minimum absorption allowed for a line to still count as a line - given in normalized flux (ie: for minSize=.9, only lines with minimum - flux less than .9 will be fitted) - errbound : float - maximum total error allowed for an acceptable fit - - Returns - ------- - bestP : (3,) ndarray - array of best parameters if a good enough fit is found in - the form [[N1,b1,z1], [N2,b2,z2],...] - """ - - #Set up some initial line guesses - lineTests = _get_test_lines(initz) - - #Keep track of the lowest achieved error - bestError = 1000 - - #Iterate through test line guesses - for initLines in lineTests: - if initLines[1,0]==0: - initLines = np.delete(initLines,1,axis=0) - - #Do fitting with initLines as first guess - linesP,flag=_complex_fit(x,yDat,yFit,initz, - minSize,errBound,speciesDict,initP=initLines) - - #Find error of last fit - yNewFit=_gen_flux_lines(x,linesP,speciesDict) - dif = yFit*yNewFit-yDat - errSq=sum(dif**2) - - #If error lower, keep track of the lines used to make that fit - if errSq < bestError: - bestError = errSq - bestP = linesP - - if bestError>10*errBound*len(x): - return [] - else: - return bestP - -def _get_test_lines(initz): - """ - Returns a 3d numpy array of lines to test as initial guesses for difficult - to fit lyman alpha absorbers that are saturated. - - The array is 3d because - the first dimension gives separate initial guesses, the second dimension - has multiple lines for the same guess (trying a broad line plus a - saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z) - - Parameters - ---------- - initz : float - redshift to give all the test lines - - Returns - ------- - testP : (,3,) ndarray - numpy array of the form - [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...] - """ - - #Set up a bunch of empty lines - testP = np.zeros((10,2,3)) - - testP[0,0,:]=[1E18,20,initz] - testP[1,0,:]=[1E18,40,initz] - testP[2,0,:]=[1E16,5, initz] - testP[3,0,:]=[1E16,20,initz] - testP[4,0,:]=[1E16,80,initz] - - testP[5,0,:]=[1E18,20,initz] - testP[6,0,:]=[1E18,40,initz] - testP[7,0,:]=[1E16,5, initz] - testP[8,0,:]=[1E16,20,initz] - testP[9,0,:]=[1E16,80,initz] - - testP[5,1,:]=[1E13,100,initz] - testP[6,1,:]=[1E13,100,initz] - testP[7,1,:]=[1E13,100,initz] - testP[8,1,:]=[1E13,100,initz] - testP[9,1,:]=[1E13,100,initz] - - return testP - -def _get_bounds(z, b, wl, x0, xRes): - """ - Gets the indices of range of wavelength that the wavelength wl is in - with the size of some initial wavelength range. - - Used for checking if species with multiple lines (as in the OVI doublet) - fit all lines appropriately. - - Parameters - ---------- - z : float - redshift - b : (3) ndarray/list - initial bounds in form [i0,i1,i2] where i0 is the index of the - minimum flux for the complex, i1 is index of the lower wavelength - edge of the complex, and i2 is the index of the higher wavelength - edge of the complex. - wl : float - unredshifted wavelength of the peak of the new region - x0 : float - wavelength of the index 0 - xRes : float - difference in wavelength for two consecutive indices - - Returns - ------- - indices : (2) tuple - Tuple (i1,i2) where i1 is the index of the lower wavelength bound of - the new region and i2 is the index of the higher wavelength bound of - the new region - """ - - r=[-b[1]+100+b[0],b[2]+100-b[0]] - redWl = (z+1)*wl - iRedWl=int((redWl-x0)/xRes) - indices = (iRedWl-r[0],iRedWl+r[1]) - - return indices - -def _remove_unaccepted_partners(linesP, x, y, b, errBound, - x0, xRes, speciesDict): - """ - Given a set of parameters [N,b,z] that form multiple lines for a given - species (as in the OVI doublet), remove any set of parameters where - not all transition wavelengths have a line that matches the fit. - - (ex: if a fit is determined based on the first line of the OVI doublet, - but the given parameters give a bad fit of the wavelength space of - the second line then that set of parameters is removed from the array - of line parameters.) - - Parameters - ---------- - linesP : (3,) ndarray - array giving sets of line parameters in - form [[N1, b1, z1], ...] - x : (N) ndarray - wavelength array [nm] - y : (N) ndarray - normalized flux array of original data - b : (3) tuple/list/ndarray - indices that give the bounds of the original region so that another - region of similar size can be used to determine the goodness - of fit of the other wavelengths - errBound : float - size of the error that is appropriate for a given region, - adjusted to account for the size of the region. - - Returns - ------- - linesP : (3,) ndarray - array similar to linesP that only contains lines with - appropriate fits of all transition wavelengths. - """ - - #List of lines to remove - removeLines=[] - - #Set error - - - #Iterate through all sets of line parameters - for i,p in enumerate(linesP): - - #iterate over all transition wavelengths - for wl in speciesDict['wavelength']: - - #Get the bounds of a similar sized region around the - # appropriate wavelength, and then get the appropriate - # region of wavelength and flux - lb = _get_bounds(p[2],b,wl,x0,xRes) - xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]] - - if errBound is None: - errBound = 10*len(yb)*(max(1-yb)*1E-2)**2 - else: - errBound = 10*errBound*len(yb) - - #Generate a fit and find the difference to data - yFitb=_gen_flux_lines(xb,np.array([p]),speciesDict) - dif =yb-yFitb - - - - #Only counts as an error if line is too big ---------------< - dif = [k for k in dif if k>0] - err = sum(dif) - - #If the fit is too bad then add the line to list of removed lines - if err > errBound: - removeLines.append(i) - break - - #Remove all bad line fits - linesP = np.delete(linesP,removeLines,axis=0) - - return linesP - - - -def _line_exists(wavelengths, y, z, x0, xRes,fluxMin): - """For a group of lines finds if the there is some change in flux greater - than some minimum at the same redshift with different initial wavelengths - - Parameters - ---------- - wavelengths : (N) ndarray - array of initial wavelengths to check - y : (N) ndarray - flux array to check - x0 : float - wavelength of the first value in y - xRes : float - difference in wavelength between consecutive cells in flux array - fluxMin : float - maximum flux to count as a line existing. - - Returns - ------- - - flag : boolean - value indicating whether all lines exist. True if all lines exist - """ - - #Iterate through initial wavelengths - for wl in wavelengths: - #Redshifted wavelength - redWl = (z+1)*wl - - #Index of the redshifted wavelength - indexRedWl = (redWl-x0)/xRes - - #Check to see if even in flux range - if indexRedWl > len(y): - return False - - #Check if surpasses minimum absorption bound - if y[int(indexRedWl)]>fluxMin: - return False - - return True - -def _find_complexes(x, yDat, complexLim=.999, fitLim=.99, - minLength =3, maxLength=1000, splitLim=.99): - """Breaks up the wavelength space into groups - where there is some absorption. - - Parameters - ---------- - x : (N) ndarray - array of wavelengths - yDat : (N) ndarray - array of flux corresponding to the wavelengths given - in x. (needs to be the same size as x) - complexLim : float, optional - Maximum flux to start the edge of an absorption complex. Different - from fitLim because it decides extent of a complex rather than - whether or not a complex is accepted. - fitLim : float,optional - Maximum flux where the level of absorption will trigger - identification of the region as an absorption complex. Default = .98. - (ex: for a minSize=.98, a region where all the flux is between 1.0 and - .99 will not be separated out to be fit as an absorbing complex, but - a region that contains a point where the flux is .97 will be fit - as an absorbing complex.) - minLength : int, optional - number of cells required for a complex to be included. - default is 3 cells. - maxLength : int, optional - number of cells required for a complex to be split up. Default - is 1000 cells. - splitLim : float, optional - if attempting to split a region for being larger than maxlength - the point of the split must have a flux greater than splitLim - (ie: absorption greater than splitLim). Default= .99. - - Returns - ------- - cBounds : (3,) - list of bounds in the form [[i0,i1,i2],...] where i0 is the - index of the maximum flux for a complex, i1 is the index of the - beginning of the complex, and i2 is the index of the end of the - complex. Indexes refer to the indices of x and yDat. - """ - - #Initialize empty list of bounds - cBounds=[] - - #Iterate through cells of flux - i=0 - while (iminLength: - - #Check if there is enough absorption for the complex to - # be included - cPeak = yDat[i:i+j].argmin() - if yDat[cPeak+i]maxLength: - - split = _split_region(yDat,b,splitLim) - - if split: - - #add the two regions separately - cBounds.insert(i+1,split[0]) - cBounds.insert(i+2,split[1]) - - #Remove the original region - cBounds.pop(i) - i=i+1 - i=i+1 - - return cBounds - - -def _split_region(yDat,b,splitLim): - #Find the minimum absorption in the middle two quartiles of - # the large complex - - q=(b[2]-b[1])/4 - cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q - - #Only break it up if the minimum absorption is actually low enough - if yDat[cut]>splitLim: - - #Get the new two peaks - b1Peak = yDat[b[1]:cut].argmin()+b[1] - b2Peak = yDat[cut:b[2]].argmin()+cut - - region_1 = [b1Peak,b[1],cut] - region_2 = [b2Peak,cut,b[2]] - - return [region_1,region_2] - - else: - - return [] - - - -def _gen_flux_lines(x, linesP, speciesDict,firstLine=False): - """ - Calculates the normalized flux for a region of wavelength space - generated by a set of absorption lines. - - Parameters - ---------- - x : (N) ndarray - Array of wavelength - linesP: (3,) ndarray - Array giving sets of line parameters in - form [[N1, b1, z1], ...] - speciesDict : dictionary - Dictionary containing all relevant parameters needed - to create an absorption line of a given species (f,Gamma,lambda0) - - Returns - ------- - flux : (N) ndarray - Array of normalized flux generated by the line parameters - given in linesP over the wavelength space given in x. Same size as x. - """ - y=0 - for p in linesP: - for i in range(speciesDict['numLines']): - f=speciesDict['f'][i] - g=speciesDict['Gamma'][i] - wl=speciesDict['wavelength'][i] - y = y+ _gen_tau(x,p,f,g,wl) - if firstLine: - break - - flux = np.exp(-y) - return flux - -def _gen_tau(t, p, f, Gamma, lambda_unshifted): - """This calculates a flux distribution for given parameters using the yt - voigt profile generator""" - N,b,z= p - - #Calculating quantities - tau_o = 1.4973614E-15*N*f*lambda_unshifted/b - a=7.95774715459E-15*Gamma*lambda_unshifted/b - x=299792.458/b*(lambda_unshifted*(1+z)/t-1) - - H = np.zeros(len(x)) - H = voigt(a,x) - - tau = tau_o*H - - return tau - -def _voigt_error(pTotal, x, yDat, yFit, speciesDict): - """ - Gives the error of each point used to optimize the fit of a group - of absorption lines to a given flux profile. - - If the parameters are not in the acceptable range as defined - in speciesDict, the first value of the error array will - contain a large value (999), to prevent the optimizer from running - into negative number problems. - - Parameters - ---------- - pTotal : (3,) ndarray - Array with form [[N1, b1, z1], ...] - x : (N) ndarray - array of wavelengths [nm] - yDat : (N) ndarray - desired normalized flux from fits of lines in wavelength - space given by x - yFit : (N) ndarray - previous fit over the wavelength space given by x. - speciesDict : dictionary - dictionary containing all relevant parameters needed - to create an absorption line of a given species (f,Gamma,lambda0) - as well as max and min values for parameters to be fit - - Returns - ------- - error : (N) ndarray - the difference between the fit generated by the parameters - given in pTotal multiplied by the previous fit and the desired - flux profile, w/ first index modified appropriately for bad - parameter choices and additional penalty for fitting with a lower - flux than observed. - """ - - pTotal.shape = (-1,3) - yNewFit = _gen_flux_lines(x,pTotal,speciesDict) - - error = yDat-yFit*yNewFit - error_plus = (yDat-yFit*yNewFit).clip(min=0) - - error = error+error_plus - error[0] = _check_params(pTotal,speciesDict,x) - - return error - -def _check_params(p, speciesDict,xb): - """ - Check to see if any of the parameters in p fall outside the range - given in speciesDict or on the boundaries - - Parameters - ---------- - p : (3,) ndarray - array with form [[N1, b1, z1], ...] - speciesDict : dictionary - dictionary with properties giving the max and min - values appropriate for each parameter N,b, and z. - xb : (N) ndarray - wavelength array [nm] - - Returns - ------- - check : int - 0 if all values are fine - 999 if any values fall outside acceptable range - """ - - minz = (xb[0])/speciesDict['wavelength'][0]-1 - maxz = (xb[-1])/speciesDict['wavelength'][0]-1 - - check = 0 - if any(p[:,0] >= speciesDict['maxN']) or\ - any(p[:,0] <= speciesDict['minN']) or\ - any(p[:,1] >= speciesDict['maxb']) or\ - any(p[:,1] <= speciesDict['minb']) or\ - any(p[:,2] >= maxz) or\ - any(p[:,2] <= minz): - check = 999 - - return check - -def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound): - - """ - Check to see if any of the parameters in p are the - same as initial parameters and if so, attempt to - split the region and refit it. - - Parameters - ---------- - p : (3,) ndarray - array with form [[N1, b1, z1], ...] - speciesDict : dictionary - dictionary with properties giving the max and min - values appropriate for each parameter N,b, and z. - x : (N) ndarray - wavelength array [nm] - """ - - # Check if anything is a default parameter - if any(p[:,0] == speciesDict['init_N']) or\ - any(p[:,0] == speciesDict['init_N']*10) or\ - any(p[:,0] == speciesDict['init_N']*100) or\ - any(p[:,0] == speciesDict['init_N']*.1) or\ - any(p[:,1] == speciesDict['init_b']) or\ - any(p[:,1] == speciesDict['maxb']): - - # These are the initial bounds - init_bounds = [yDat.argmin(),0,len(xb)-1] - - # Gratitutous limit for splitting region - newSplitLim = 1 - (1-min(yDat))*.5 - - # Attempt to split region - split = _split_region(yDat,init_bounds,newSplitLim) - - # If we can't split it, just reject it. Its unphysical - # to just keep the default parameters and we're out of - # options at this point - if not split: - return [] - - # Else set up the bounds for each region and fit separately - b1,b2 = split[0][2], split[1][1] - - p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1], - initz, minSize, errorBound, speciesDict) - - p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:], - initz, minSize, errorBound, speciesDict) - - # Make the final line parameters. Its annoying because - # one or both regions may have fit to nothing - if np.size(p1)> 0 and np.size(p2)>0: - p = np.r_[p1,p2] - elif np.size(p1) > 0: - p = p1 - else: - p = p2 - - return p - - -def _check_numerical_instability(x, p, speciesDict,b): - - """ - Check to see if any of the parameters in p are causing - unstable numerical effects outside the region of fit - - Parameters - ---------- - p : (3,) ndarray - array with form [[N1, b1, z1], ...] - speciesDict : dictionary - dictionary with properties giving the max and min - values appropriate for each parameter N,b, and z. - x : (N) ndarray - wavelength array [nm] - b : (3) list - list of integers indicating bounds of region fit in x - """ - - remove_lines = [] - - - for i,line in enumerate(p): - - # First to check if the line is at risk for instability - if line[1]<5 or line[0] < 1E12: - - - # get all flux that isn't part of fit plus a little wiggle room - # max and min to prevent boundary errors - - flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True) - flux = np.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]] - - #Find regions that are absorbing outside the region we fit - flux_dif = 1 - flux - absorbing_coefficient = max(abs(flux_dif)) - - - #Really there shouldn't be any absorption outside - #the region we fit, but we'll give some leeway. - #for high resolution spectra the tiny bits on the edges - #can give a non negligible amount of flux. Plus the errors - #we are looking for are HUGE. - if absorbing_coefficient > .1: - - # we just set it to no fit because we've tried - # everything else at this point. this region just sucks :( - remove_lines.append(i) - - if remove_lines: - p = np.delete(p, remove_lines, axis=0) - - return p - -def _output_fit(lineDic, file_name = 'spectrum_fit.h5'): - """ - This function is designed to output the parameters of the series - of lines used to fit an absorption spectrum. - - The dataset contains entries in the form species/N, species/b - species/z, and species/complex. The ith entry in each of the datasets - is the fitted parameter for the ith line fitted to the spectrum for - the given species. The species names come from the fitted line - dictionary. - - Parameters - ---------- - lineDic : dictionary - Dictionary of dictionaries representing the fit lines. - Top level keys are the species given in orderFits and the corresponding - entries are dictionaries with the keys 'N','b','z', and 'group#'. - Each of these corresponds to a list of the parameters for every - accepted fitted line. - fileName : string, optional - Name of the file to output fit to. Default = 'spectrum_fit.h5' - - """ - f = h5py.File(file_name, mode='w') - for ion, params in lineDic.items(): - f.create_dataset("{0}/N".format(ion),data=params['N']) - f.create_dataset("{0}/b".format(ion),data=params['b']) - f.create_dataset("{0}/z".format(ion),data=params['z']) - f.create_dataset("{0}/complex".format(ion),data=params['group#']) - mylog.info('Writing spectrum fit to {0}'.format(file_name)) - f.close() diff --git a/yt/analysis_modules/absorption_spectrum/api.py b/yt/analysis_modules/absorption_spectrum/api.py index 40281fde65f..bb66b6f4b45 100644 --- a/yt/analysis_modules/absorption_spectrum/api.py +++ b/yt/analysis_modules/absorption_spectrum/api.py @@ -1,28 +1,7 @@ -""" -API for absorption_spectrum +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the AbsorptionSpectrum module has been moved to the " - "Trident package. This version is deprecated and will be removed from yt " - "in a future release. See https://github.com/trident-project/trident " - "for further information.") - -from .absorption_spectrum import \ - AbsorptionSpectrum - -from .absorption_spectrum_fit import \ - generate_total_fit +raise YTModuleRemoved( + "AbsorptionSpectrum", + "https://github.com/trident-project/trident", + "https://trident.readthedocs.io/") diff --git a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py deleted file mode 100644 index ece514a1a97..00000000000 --- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py +++ /dev/null @@ -1,558 +0,0 @@ -""" -Unit test for the AbsorptionSpectrum analysis module -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -from yt.testing import \ - assert_allclose_units, requires_file, requires_module, \ - assert_almost_equal -from yt.analysis_modules.absorption_spectrum.absorption_line import \ - voigt_old, voigt_scipy -from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum -from yt.analysis_modules.cosmological_observation.api import LightRay -from yt.utilities.answer_testing.framework import \ - GenericArrayTest, \ - requires_answer_testing -import tempfile -import os -import shutil -from yt.utilities.on_demand_imports import \ - _h5py as h5 -from yt.convenience import load - - -COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo" -COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009" -GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param" -GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5" -ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030" -FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5" - -@requires_file(COSMO_PLUS) -@requires_answer_testing() -def test_absorption_spectrum_cosmo(): - """ - This test generates an absorption spectrum from a compound light ray on a - grid dataset - """ - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) - - lr.make_light_ray(seed=1234567, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(900.0, 1800.0, 10000) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 912.323660 # Angstroms - normalization = 1.6e17 - index = 3.0 - - sp.add_continuum(my_label, field, wavelength, normalization, index) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_cosmo".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_cosmo.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS_SINGLE) -@requires_answer_testing() -def test_absorption_spectrum_non_cosmo(): - """ - This test generates an absorption spectrum from a simple light ray on a - grid dataset - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS_SINGLE) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_non_cosmo".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_non_cosmo.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS_SINGLE) -@requires_answer_testing() -def test_absorption_spectrum_non_cosmo_novpec(): - """ - This test generates an absorption spectrum from a simple light ray on a - grid dataset - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS_SINGLE) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5', use_peculiar_velocity=False) - - sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=False) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_non_cosmo_novpec".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS_SINGLE) -def test_equivalent_width_conserved(): - """ - This tests that the equivalent width of the optical depth is conserved - regardless of the bin width employed in wavelength space. - Unresolved lines should still deposit optical depth into the spectrum. - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS_SINGLE) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - my_label = 'HI Lya' - field = 'H_number_density' - wave = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - lambda_min= 1200 - lambda_max= 1300 - lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1] - total_tau = [] - - for lambda_bin_width in lambda_bin_widths: - n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1 - sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, - n_lambda=n_lambda) - sp.add_line(my_label, field, wave, f_value, gamma, mass) - wavelength, flux = sp.make_spectrum('lightray.h5') - total_tau.append((lambda_bin_width * sp.tau_field).sum()) - - # assure that the total tau values are all within 1e-3 of each other - for tau in total_tau: - assert_almost_equal(tau, total_tau[0], 3) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - - -@requires_file(COSMO_PLUS_SINGLE) -@requires_module("astropy") -def test_absorption_spectrum_fits(): - """ - This test generates an absorption spectrum and saves it as a fits file. - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS_SINGLE) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(900.0, 1800.0, 10000) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 912.323660 # Angstroms - normalization = 1.6e17 - index = 3.0 - - sp.add_continuum(my_label, field, wavelength, normalization, index) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.fits', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - - -@requires_module("scipy") -def test_voigt_profiles(): - a = 1.7e-4 - x = np.linspace(5.0, -3.6, 60) - assert_allclose_units(voigt_old(a, x), voigt_scipy(a, x), 1e-8) - -@requires_file(GIZMO_PLUS) -@requires_answer_testing() -def test_absorption_spectrum_cosmo_sph(): - """ - This test generates an absorption spectrum from a compound light ray on a - particle dataset - """ - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01) - - lr.make_light_ray(seed=1234567, - fields=[('gas', 'temperature'), - ('gas', 'H_number_density')], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(900.0, 1800.0, 10000) - - my_label = 'HI Lya' - field = ('gas', 'H_number_density') - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - my_label = 'HI Lya' - field = ('gas', 'H_number_density') - wavelength = 912.323660 # Angstroms - normalization = 1.6e17 - index = 3.0 - - sp.add_continuum(my_label, field, wavelength, normalization, index) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_cosmo_sph".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_cosmo_sph.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(GIZMO_PLUS_SINGLE) -@requires_answer_testing() -def test_absorption_spectrum_non_cosmo_sph(): - """ - This test generates an absorption spectrum from a simple light ray on a - particle dataset - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - ds = load(GIZMO_PLUS_SINGLE) - lr = LightRay(ds) - ray_start = ds.domain_left_edge - ray_end = ds.domain_right_edge - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=[('gas', 'temperature'), - ('gas', 'H_number_density')], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(1200.0, 1300.0, 10001) - - my_label = 'HI Lya' - field = ('gas', 'H_number_density') - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_non_cosmo_sph".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_non_cosmo_sph.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(ISO_GALAXY) -@requires_answer_testing() -def test_absorption_spectrum_with_continuum(): - """ - This test generates an absorption spectrum from a simple light ray on a - grid dataset and adds Lyman alpha and Lyman continuum to it - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - ds = load(ISO_GALAXY) - lr = LightRay(ds) - - ray_start = ds.domain_left_edge - ray_end = ds.domain_right_edge - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - sp = AbsorptionSpectrum(800.0, 1300.0, 5001) - - my_label = 'HI Lya' - field = 'H_number_density' - wavelength = 1215.6700 # Angstroms - f_value = 4.164E-01 - gamma = 6.265e+08 - mass = 1.00794 - - sp.add_line(my_label, field, wavelength, f_value, - gamma, mass, label_threshold=1.e10) - - my_label = 'Ly C' - field = 'H_number_density' - wavelength = 912.323660 # Angstroms - normalization = 1.6e17 - index = 3.0 - - sp.add_continuum(my_label, field, wavelength, normalization, index) - - wavelength, flux = sp.make_spectrum('lightray.h5', - output_file='spectrum.h5', - line_list_file='lines.txt', - use_peculiar_velocity=True) - - # load just-generated hdf5 file of spectral data (for consistency) - data = h5.File('spectrum.h5', 'r') - - for key in data.keys(): - func = lambda x=key: data[x][:] - func.__name__ = "{}_continuum".format(key) - test = GenericArrayTest(None, func) - test_absorption_spectrum_with_continuum.__name__ = test.description - yield test - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(FIRE) -def test_absorption_spectrum_with_zero_field(): - """ - This test generates an absorption spectrum with some - particle dataset - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - ds = load(FIRE) - lr = LightRay(ds) - - # Define species and associated parameters to add to continuum - # Parameters used for both adding the transition to the spectrum - # and for fitting - # Note that for single species that produce multiple lines - # (as in the OVI doublet), 'numLines' will be equal to the number - # of lines, and f,gamma, and wavelength will have multiple values. - - HI_parameters = { - 'name': 'HI', - 'field': 'H_number_density', - 'f': [.4164], - 'Gamma': [6.265E8], - 'wavelength': [1215.67], - 'mass': 1.00794, - 'numLines': 1, - 'maxN': 1E22, 'minN': 1E11, - 'maxb': 300, 'minb': 1, - 'maxz': 6, 'minz': 0, - 'init_b': 30, - 'init_N': 1E14 - } - - species_dicts = {'HI': HI_parameters} - - - # Get all fields that need to be added to the light ray - fields = [('gas','temperature')] - for s, params in species_dicts.items(): - fields.append(params['field']) - - # With a single dataset, a start_position and - # end_position or trajectory must be given. - # Trajectory should be given as (r, theta, phi) - lr.make_light_ray( - start_position=ds.arr([0., 0., 0.], 'unitary'), - end_position=ds.arr([1., 1., 1.], 'unitary'), - solution_filename='test_lightraysolution.txt', - data_filename='test_lightray.h5', - fields=fields) - - # Create an AbsorptionSpectrum object extending from - # lambda = 900 to lambda = 1800, with 10000 pixels - sp = AbsorptionSpectrum(900.0, 1400.0, 50000) - - # Iterate over species - for s, params in species_dicts.items(): - # Iterate over transitions for a single species - for i in range(params['numLines']): - # Add the lines to the spectrum - sp.add_line( - s, params['field'], - params['wavelength'][i], params['f'][i], - params['Gamma'][i], params['mass'], - label_threshold=1.e10) - - - # Make and save spectrum - wavelength, flux = sp.make_spectrum( - 'test_lightray.h5', - output_file='test_spectrum.h5', - line_list_file='test_lines.txt', - use_peculiar_velocity=True) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) diff --git a/yt/analysis_modules/cosmological_observation/api.py b/yt/analysis_modules/cosmological_observation/api.py index 2e0dd00d4c0..da1df1ec4b7 100644 --- a/yt/analysis_modules/cosmological_observation/api.py +++ b/yt/analysis_modules/cosmological_observation/api.py @@ -1,32 +1,7 @@ -""" -API for cosmology analysis. +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the CosmologySplice module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .cosmology_splice import \ - CosmologySplice - -from .light_cone.api import \ - LightCone - -from .light_ray.api import \ - LightRay +raise YTModuleRemoved( + "CosmologySplice and LightCone", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/cosmological_observation/cosmology_splice.py b/yt/analysis_modules/cosmological_observation/cosmology_splice.py deleted file mode 100644 index b3dbd6ad764..00000000000 --- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py +++ /dev/null @@ -1,344 +0,0 @@ -""" -CosmologyTimeSeries class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -import os - -from yt.convenience import \ - simulation -from yt.funcs import mylog -from yt.utilities.cosmology import \ - Cosmology -from yt.utilities.physical_constants import \ - c - -class CosmologySplice(object): - """ - Class for splicing together datasets to extend over a - cosmological distance. - """ - - def __init__(self, parameter_filename, simulation_type, find_outputs=False): - self.parameter_filename = parameter_filename - self.simulation_type = simulation_type - self.simulation = simulation(parameter_filename, simulation_type, - find_outputs=find_outputs) - - self.cosmology = Cosmology( - hubble_constant=(self.simulation.hubble_constant), - omega_matter=self.simulation.omega_matter, - omega_lambda=self.simulation.omega_lambda) - - def create_cosmology_splice(self, near_redshift, far_redshift, - minimal=True, max_box_fraction=1.0, - deltaz_min=0.0, - time_data=True, redshift_data=True): - r"""Create list of datasets capable of spanning a redshift - interval. - - For cosmological simulations, the physical width of the simulation - box corresponds to some \Delta z, which varies with redshift. - Using this logic, one can stitch together a series of datasets to - create a continuous volume or length element from one redshift to - another. This method will return such a list - - Parameters - ---------- - near_redshift : float - The nearest (lowest) redshift in the cosmology splice list. - far_redshift : float - The furthest (highest) redshift in the cosmology splice list. - minimal : bool - If True, the minimum number of datasets is used to connect the - initial and final redshift. If false, the list will contain as - many entries as possible within the redshift - interval. - Default: True. - max_box_fraction : float - In terms of the size of the domain, the maximum length a light - ray segment can be in order to span the redshift interval from - one dataset to another. If using a zoom-in simulation, this - parameter can be set to the length of the high resolution - region so as to limit ray segments to that size. If the - high resolution region is not cubical, the smallest side - should be used. - Default: 1.0 (the size of the box) - deltaz_min : float - Specifies the minimum delta z between consecutive datasets - in the returned - list. - Default: 0.0. - time_data : bool - Whether or not to include time outputs when gathering - datasets for time series. - Default: True. - redshift_data : bool - Whether or not to include redshift outputs when gathering - datasets for time series. - Default: True. - - Examples - -------- - - >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") - >>> cosmo = co.create_cosmology_splice(1.0, 0.0) - - """ - - if time_data and redshift_data: - self.splice_outputs = self.simulation.all_outputs - elif time_data: - self.splice_outputs = self.simulation.all_time_outputs - elif redshift_data: - self.splice_outputs = self.simulation.all_redshift_outputs - else: - mylog.error('Both time_data and redshift_data are False.') - return - - # Link datasets in list with pointers. - # This is used for connecting datasets together. - for i, output in enumerate(self.splice_outputs): - if i == 0: - output['previous'] = None - output['next'] = self.splice_outputs[i + 1] - elif i == len(self.splice_outputs) - 1: - output['previous'] = self.splice_outputs[i - 1] - output['next'] = None - else: - output['previous'] = self.splice_outputs[i - 1] - output['next'] = self.splice_outputs[i + 1] - - # Calculate maximum delta z for each data dump. - self.max_box_fraction = max_box_fraction - self._calculate_deltaz_max() - - # Calculate minimum delta z for each data dump. - self._calculate_deltaz_min(deltaz_min=deltaz_min) - - cosmology_splice = [] - - if near_redshift == far_redshift: - self.simulation.get_time_series(redshifts=[near_redshift]) - cosmology_splice.append( - {'time': self.simulation[0].current_time, - 'redshift': self.simulation[0].current_redshift, - 'filename': os.path.join(self.simulation[0].fullpath, - self.simulation[0].basename), - 'next': None}) - mylog.info("create_cosmology_splice: Using %s for z = %f ." % - (cosmology_splice[0]['filename'], near_redshift)) - return cosmology_splice - - # Use minimum number of datasets to go from z_i to z_f. - if minimal: - - z_Tolerance = 1e-3 - z = far_redshift - - # Sort data outputs by proximity to current redshift. - self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift'])) - cosmology_splice.append(self.splice_outputs[0]) - z = cosmology_splice[-1]["redshift"] - z_target = z - cosmology_splice[-1]["dz_max"] - - # fill redshift space with datasets - while ((z_target > near_redshift) and - (np.abs(z_target - near_redshift) > z_Tolerance)): - - # Move forward from last slice in stack until z > z_max. - current_slice = cosmology_splice[-1] - - while current_slice["next"] is not None: - current_slice = current_slice['next'] - if current_slice["next"] is None: - break - if current_slice["next"]["redshift"] < z_target: - break - - if current_slice["redshift"] < z_target: - need_fraction = self.cosmology.comoving_radial_distance( - current_slice["redshift"], z) / \ - self.simulation.box_size - raise RuntimeError( - ("Cannot create cosmology splice: " + - "Getting from z = %f to %f requires " + - "max_box_fraction = %f, but max_box_fraction " - "is set to %f") % - (z, current_slice["redshift"], - need_fraction, max_box_fraction)) - - cosmology_splice.append(current_slice) - z = current_slice["redshift"] - z_target = z - current_slice["dz_max"] - - # Make light ray using maximum number of datasets (minimum spacing). - else: - # Sort data outputs by proximity to current redshift. - self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift - - obj['redshift'])) - # For first data dump, choose closest to desired redshift. - cosmology_splice.append(self.splice_outputs[0]) - - nextOutput = cosmology_splice[-1]['next'] - while (nextOutput is not None): - if (nextOutput['redshift'] <= near_redshift): - break - if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) > - cosmology_splice[-1]['dz_min']): - cosmology_splice.append(nextOutput) - nextOutput = nextOutput['next'] - if (cosmology_splice[-1]['redshift'] - - cosmology_splice[-1]['dz_max']) > near_redshift: - mylog.error("Cosmology splice incomplete due to insufficient data outputs.") - near_redshift = cosmology_splice[-1]['redshift'] - \ - cosmology_splice[-1]['dz_max'] - - mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." % - (len(cosmology_splice), far_redshift, near_redshift)) - - # change the 'next' and 'previous' pointers to point to the correct outputs - # for the created splice - for i, output in enumerate(cosmology_splice): - if len(cosmology_splice) == 1: - output['previous'] = None - output['next'] = None - elif i == 0: - output['previous'] = None - output['next'] = cosmology_splice[i + 1] - elif i == len(cosmology_splice) - 1: - output['previous'] = cosmology_splice[i - 1] - output['next'] = None - else: - output['previous'] = cosmology_splice[i - 1] - output['next'] = cosmology_splice[i + 1] - - self.splice_outputs.sort(key=lambda obj: obj['time']) - return cosmology_splice - - def plan_cosmology_splice(self, near_redshift, far_redshift, - decimals=3, filename=None, - start_index=0): - r"""Create imaginary list of redshift outputs to maximally - span a redshift interval. - - If you want to run a cosmological simulation that will have just - enough data outputs to create a cosmology splice, - this method will calculate a list of redshifts outputs that will - minimally connect a redshift interval. - - Parameters - ---------- - near_redshift : float - The nearest (lowest) redshift in the cosmology splice list. - far_redshift : float - The furthest (highest) redshift in the cosmology splice list. - decimals : int - The decimal place to which the output redshift will be rounded. - If the decimal place in question is nonzero, the redshift will - be rounded up to - ensure continuity of the splice. Default: 3. - filename : string - If provided, a file will be written with the redshift outputs in - the form in which they should be given in the enzo dataset. - Default: None. - start_index : int - The index of the first redshift output. Default: 0. - - Examples - -------- - >>> from yt.analysis_modules.api import CosmologySplice - >>> my_splice = CosmologySplice('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo') - >>> my_splice.plan_cosmology_splice(0.0, 0.1, filename='redshifts.out') - - """ - - z = far_redshift - outputs = [] - - while z > near_redshift: - rounded = np.round(z, decimals=decimals) - if rounded - z < 0: - rounded += np.power(10.0, (-1.0*decimals)) - z = rounded - - deltaz_max = self._deltaz_forward(z, self.simulation.box_size * - self.max_box_fraction) - outputs.append({'redshift': z, 'dz_max': deltaz_max}) - z -= deltaz_max - - mylog.info("%d data dumps will be needed to get from z = %f to %f." % - (len(outputs), near_redshift, far_redshift)) - - if filename is not None: - self.simulation._write_cosmology_outputs(filename, outputs, - start_index, - decimals=decimals) - return outputs - - def _calculate_deltaz_max(self): - r"""Calculate delta z that corresponds to full box length going - from z to (z - delta z). - """ - - target_distance = self.simulation.box_size * \ - self.max_box_fraction - for output in self.splice_outputs: - output['dz_max'] = self._deltaz_forward(output['redshift'], - target_distance) - - def _calculate_deltaz_min(self, deltaz_min=0.0): - r"""Calculate delta z that corresponds to a single top grid pixel - going from z to (z - delta z). - """ - - target_distance = self.simulation.box_size / \ - self.simulation.domain_dimensions[0] - for output in self.splice_outputs: - zf = self._deltaz_forward(output['redshift'], - target_distance) - output['dz_min'] = max(zf, deltaz_min) - - def _deltaz_forward(self, z, target_distance): - r"""Calculate deltaz corresponding to moving a comoving distance - starting from some redshift. - """ - - d_Tolerance = 1e-4 - max_Iterations = 100 - - z1 = z - # Use Hubble's law for initial guess - target_distance = self.cosmology.quan(target_distance.to("Mpccm / h")) - v = self.cosmology.hubble_parameter(z) * target_distance - v = min(v, 0.9 * c) - dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1. - z2 = z1 - dz - distance1 = self.cosmology.quan(0.0, "Mpccm / h") - distance2 = self.cosmology.comoving_radial_distance(z2, z) - iteration = 1 - - while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance): - m = (distance2 - distance1) / (z2 - z1) - z1 = z2 - distance1 = distance2 - z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2 - distance2 = self.cosmology.comoving_radial_distance(z2, z) - iteration += 1 - if (iteration > max_Iterations): - mylog.error("deltaz_forward: Warning - max iterations " + - "exceeded for z = %f (delta z = %f)." % - (z, np.abs(z2 - z))) - break - return np.abs(z2 - z) diff --git a/yt/analysis_modules/cosmological_observation/light_cone/__init__.py b/yt/analysis_modules/cosmological_observation/light_cone/__init__.py deleted file mode 100644 index 18ea5c8cef3..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_cone/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Import stuff for light cone generator. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/analysis_modules/cosmological_observation/light_cone/api.py b/yt/analysis_modules/cosmological_observation/light_cone/api.py deleted file mode 100644 index a9a74b2119b..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_cone/api.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -API for light_cone - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the LightCone module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .light_cone import \ - LightCone diff --git a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py deleted file mode 100644 index c7aa2168da0..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py +++ /dev/null @@ -1,468 +0,0 @@ -""" -LightCone class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np -import os - -from yt.config import \ - ytcfg -from yt.funcs import \ - ensure_dir, \ - mylog, \ - only_on_root -from yt.analysis_modules.cosmological_observation.cosmology_splice import \ - CosmologySplice -from yt.convenience import \ - load -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_objects, \ - parallel_root_only -from yt.visualization.image_writer import \ - write_image -from yt.units.yt_array import \ - YTArray -from .light_cone_projection import \ - _light_cone_projection - -class LightCone(CosmologySplice): - """ - Initialize a LightCone object. - - Parameters - ---------- - near_redshift : float - The near (lowest) redshift for the light cone. - far_redshift : float - The far (highest) redshift for the light cone. - observer_redshift : float - The redshift of the observer. - Default: 0.0. - use_minimum_datasets : bool - If True, the minimum number of datasets is used to connect the initial - and final redshift. If false, the light cone solution will contain - as many entries as possible within the redshift interval. - Default: True. - deltaz_min : float - Specifies the minimum :math:`\Delta z` between consecutive datasets in - the returned list. - Default: 0.0. - minimum_coherent_box_fraction : float - Used with use_minimum_datasets set to False, this parameter specifies - the fraction of the total box size to be traversed before rerandomizing - the projection axis and center. This was invented to allow light cones - with thin slices to sample coherent large scale structure, but in - practice does not work so well. Try setting this parameter to 1 and - see what happens. - Default: 0.0. - time_data : bool - Whether or not to include time outputs when gathering - datasets for time series. - Default: True. - redshift_data : bool - Whether or not to include redshift outputs when gathering - datasets for time series. - Default: True. - find_outputs : bool - Whether or not to search for datasets in the current - directory. - Default: False. - set_parameters : dict - Dictionary of parameters to attach to ds.parameters. - Default: None. - output_dir : string - The directory in which images and data files will be written. - Default: "LC". - output_prefix : string - The prefix of all images and data files. - Default: "LightCone". - - """ - def __init__(self, parameter_filename, simulation_type, - near_redshift, far_redshift, - observer_redshift=0.0, - use_minimum_datasets=True, deltaz_min=0.0, - minimum_coherent_box_fraction=0.0, - time_data=True, redshift_data=True, - find_outputs=False, set_parameters=None, - output_dir="LC", output_prefix="LightCone"): - - self.near_redshift = near_redshift - self.far_redshift = far_redshift - self.observer_redshift = observer_redshift - self.use_minimum_datasets = use_minimum_datasets - self.deltaz_min = deltaz_min - self.minimum_coherent_box_fraction = minimum_coherent_box_fraction - if set_parameters is None: - self.set_parameters = {} - else: - self.set_parameters = set_parameters - self.output_dir = output_dir - self.output_prefix = output_prefix - - # Create output directory. - ensure_dir(self.output_dir) - - # Calculate light cone solution. - CosmologySplice.__init__(self, parameter_filename, simulation_type, - find_outputs=find_outputs) - self.light_cone_solution = \ - self.create_cosmology_splice(self.near_redshift, self.far_redshift, - minimal=self.use_minimum_datasets, - deltaz_min=self.deltaz_min, - time_data=time_data, - redshift_data=redshift_data) - - def calculate_light_cone_solution(self, seed=None, filename=None): - r"""Create list of projections to be added together to make the light cone. - - Several sentences providing an extended description. Refer to - variables using back-ticks, e.g. `var`. - - Parameters - ---------- - seed : int - The seed for the random number generator. Any light cone solution - can be reproduced by giving the same random seed. Default: None - (each solution will be distinct). - filename : string - If given, a text file detailing the solution will be written out. - Default: None. - - """ - - # Don"t use box coherence with maximum projection depths. - if self.use_minimum_datasets and \ - self.minimum_coherent_box_fraction > 0: - mylog.info("Setting minimum_coherent_box_fraction to 0 with " + - "minimal light cone.") - self.minimum_coherent_box_fraction = 0 - - # Calculate projection sizes, and get - # random projection axes and centers. - seed = int(seed) - np.random.seed(seed) - - # For box coherence, keep track of effective depth travelled. - box_fraction_used = 0.0 - - for q in range(len(self.light_cone_solution)): - if "previous" in self.light_cone_solution[q]: - del self.light_cone_solution[q]["previous"] - if "next" in self.light_cone_solution[q]: - del self.light_cone_solution[q]["next"] - if q == len(self.light_cone_solution) - 1: - z_next = self.near_redshift - else: - z_next = self.light_cone_solution[q+1]["redshift"] - - # Calculate fraction of box required for a depth of delta z - self.light_cone_solution[q]["box_depth_fraction"] = \ - (self.cosmology.comoving_radial_distance(z_next, \ - self.light_cone_solution[q]["redshift"]) / \ - self.simulation.box_size).in_units("") - - # Calculate fraction of box required for width corresponding to - # requested image size. - proper_box_size = self.simulation.box_size / \ - (1.0 + self.light_cone_solution[q]["redshift"]) - self.light_cone_solution[q]["box_width_per_angle"] = \ - (self.cosmology.angular_scale(self.observer_redshift, - self.light_cone_solution[q]["redshift"]) / - proper_box_size).in_units("1 / degree") - - # Simple error check to make sure more than 100% of box depth - # is never required. - if self.light_cone_solution[q]["box_depth_fraction"] > 1.0: - mylog.error(("Warning: box fraction required to go from " + - "z = %f to %f is %f") % - (self.light_cone_solution[q]["redshift"], z_next, - self.light_cone_solution[q]["box_depth_fraction"])) - mylog.error(("Full box delta z is %f, but it is %f to the " + - "next data dump.") % - (self.light_cone_solution[q]["dz_max"], - self.light_cone_solution[q]["redshift"]-z_next)) - - # Get projection axis and center. - # If using box coherence, only get random axis and center if enough - # of the box has been used, or if box_fraction_used will be greater - # than 1 after this slice. - if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \ - (box_fraction_used > self.minimum_coherent_box_fraction) or \ - (box_fraction_used + - self.light_cone_solution[q]["box_depth_fraction"] > 1.0): - # Random axis and center. - self.light_cone_solution[q]["projection_axis"] = \ - np.random.randint(0, 3) - self.light_cone_solution[q]["projection_center"] = \ - np.random.random(3) - box_fraction_used = 0.0 - else: - # Same axis and center as previous slice, - # but with depth center shifted. - self.light_cone_solution[q]["projection_axis"] = \ - self.light_cone_solution[q-1]["projection_axis"] - self.light_cone_solution[q]["projection_center"] = \ - self.light_cone_solution[q-1]["projection_center"].copy() - self.light_cone_solution[q]["projection_center"]\ - [self.light_cone_solution[q]["projection_axis"]] += \ - 0.5 * (self.light_cone_solution[q]["box_depth_fraction"] + - self.light_cone_solution[q-1]["box_depth_fraction"]) - if self.light_cone_solution[q]["projection_center"]\ - [self.light_cone_solution[q]["projection_axis"]] >= 1.0: - self.light_cone_solution[q]["projection_center"]\ - [self.light_cone_solution[q]["projection_axis"]] -= 1.0 - - box_fraction_used += self.light_cone_solution[q]["box_depth_fraction"] - - # Write solution to a file. - if filename is not None: - self._save_light_cone_solution(filename=filename) - - def project_light_cone(self, field_of_view, image_resolution, field, - weight_field=None, photon_field=False, - save_stack=True, save_final_image=True, - save_slice_images=False, - cmap_name=None, - njobs=1, dynamic=False): - r"""Create projections for light cone, then add them together. - - Parameters - ---------- - field_of_view : YTQuantity or tuple of (float, str) - The field of view of the image and the units. - image_resolution : YTQuantity or tuple of (float, str) - The size of each image pixel and the units. - field : string - The projected field. - weight_field : string - the weight field of the projection. This has the same meaning as - in standard projections. - Default: None. - photon_field : bool - if True, the projection data for each slice is decremented by 4 Pi - R^2`, where R is the luminosity distance between the observer and - the slice redshift. - Default: False. - save_stack : bool - if True, the light cone data including each individual - slice is written to an hdf5 file. - Default: True. - save_final_image : bool - if True, save an image of the final light cone projection. - Default: True. - save_slice_images : bool - save images for each individual projection slice. - Default: False. - cmap_name : string - color map for images. - Default: your default colormap. - njobs : int - The number of parallel jobs over which the light cone projection - will be split. Choose -1 for one processor per individual - projection and 1 to have all processors work together on each - projection. - Default: 1. - dynamic : bool - If True, use dynamic load balancing to create the projections. - Default: False. - - """ - - if cmap_name is None: - cmap_name = ytcfg.get("yt", "default_colormap") - - if isinstance(field_of_view, tuple) and len(field_of_view) == 2: - field_of_view = self.simulation.quan(field_of_view[0], - field_of_view[1]) - elif not isinstance(field_of_view, YTArray): - raise RuntimeError("field_of_view argument must be either a YTQuantity " + - "or a tuple of type (float, str).") - if isinstance(image_resolution, tuple) and len(image_resolution) == 2: - image_resolution = self.simulation.quan(image_resolution[0], - image_resolution[1]) - elif not isinstance(image_resolution, YTArray): - raise RuntimeError("image_resolution argument must be either a YTQuantity " + - "or a tuple of type (float, str).") - - # Calculate number of pixels on a side. - pixels = int((field_of_view / image_resolution).in_units("")) - - # Clear projection stack. - projection_stack = [] - projection_weight_stack = [] - if "object" in self.light_cone_solution[-1]: - del self.light_cone_solution[-1]["object"] - - # for q, output in enumerate(self.light_cone_solution): - all_storage = {} - for my_storage, output in parallel_objects(self.light_cone_solution, - storage=all_storage, - dynamic=dynamic): - output["object"] = load(output["filename"]) - output["object"].parameters.update(self.set_parameters) - - # Calculate fraction of box required for width corresponding to - # requested image size. - proper_box_size = self.simulation.box_size / \ - (1.0 + output["redshift"]) - output["box_width_fraction"] = (output["box_width_per_angle"] * - field_of_view).in_units("") - - frb = _light_cone_projection(output, field, pixels, - weight_field=weight_field) - - if photon_field: - # Decrement the flux by the luminosity distance. - # Assume field in frb is in erg/s/cm^2/Hz - dL = self.cosmology.luminosity_distance(self.observer_redshift, - output["redshift"]) - proper_box_size = self.simulation.box_size / \ - (1.0 + output["redshift"]) - pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2 - factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2) - mylog.info("Distance to slice = %s" % dL) - frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane. - - if weight_field is None: - my_storage.result = {"field": frb[field]} - else: - my_storage.result = {"field": (frb[field] * - frb["weight_field"]), - "weight_field": frb["weight_field"]} - - del output["object"] - - # Combine results from each slice. - all_slices = list(all_storage.keys()) - all_slices.sort() - for my_slice in all_slices: - if save_slice_images: - name = os.path.join(self.output_dir, - "%s_%04d_%04d" % - (self.output_prefix, - my_slice, len(self.light_cone_solution))) - if weight_field is None: - my_image = all_storage[my_slice]["field"] - else: - my_image = all_storage[my_slice]["field"] / \ - all_storage[my_slice]["weight_field"] - only_on_root(write_image, np.log10(my_image), - "%s_%s.png" % (name, field), cmap_name=cmap_name) - - projection_stack.append(all_storage[my_slice]["field"]) - if weight_field is not None: - projection_weight_stack.append(all_storage[my_slice]["field"]) - - projection_stack = self.simulation.arr(projection_stack) - projection_weight_stack = self.simulation.arr(projection_weight_stack) - - # Add up slices to make light cone projection. - if (weight_field is None): - light_cone_projection = projection_stack.sum(axis=0) - else: - light_cone_projection = \ - projection_stack.sum(axis=0) / \ - self.simulation.arr(projection_weight_stack).sum(axis=0) - - filename = os.path.join(self.output_dir, self.output_prefix) - - # Write image. - if save_final_image: - only_on_root(write_image, np.log10(light_cone_projection), - "%s_%s.png" % (filename, field), cmap_name=cmap_name) - - # Write stack to hdf5 file. - if save_stack: - self._save_light_cone_stack(field, weight_field, - projection_stack, projection_weight_stack, - filename=filename, - attrs={"field_of_view": str(field_of_view), - "image_resolution": str(image_resolution)}) - - @parallel_root_only - def _save_light_cone_solution(self, filename="light_cone.dat"): - "Write out a text file with information on light cone solution." - - mylog.info("Saving light cone solution to %s." % filename) - - f = open(filename, "w") - f.write("# parameter_filename = %s\n" % self.parameter_filename) - f.write("\n") - f.write("# Slice Dataset Redshift depth/box " + \ - "width/degree axis center\n") - for q, output in enumerate(self.light_cone_solution): - f.write(("%04d %s %f %f %f %d %f %f %f\n") % - (q, output["filename"], output["redshift"], - output["box_depth_fraction"], output["box_width_per_angle"], - output["projection_axis"], output["projection_center"][0], - output["projection_center"][1], output["projection_center"][2])) - f.close() - - @parallel_root_only - def _save_light_cone_stack(self, field, weight_field, - pstack, wstack, - filename=None, attrs=None): - "Save the light cone projection stack as a 3d array in and hdf5 file." - - if attrs is None: - attrs = {} - - # Make list of redshifts to include as a dataset attribute. - redshift_list = np.array([my_slice["redshift"] \ - for my_slice in self.light_cone_solution]) - - field_node = "%s_%s" % (field, weight_field) - weight_field_node = "weight_field_%s" % weight_field - - if (filename is None): - filename = os.path.join(self.output_dir, "%s_data" % self.output_prefix) - if not(filename.endswith(".h5")): - filename += ".h5" - - if pstack.size == 0: - mylog.info("save_light_cone_stack: light cone projection is empty.") - return - - mylog.info("Writing light cone data to %s." % filename) - - fh = h5py.File(filename, mode="a") - - if field_node in fh: - del fh[field_node] - - mylog.info("Saving %s to %s." % (field_node, filename)) - dataset = fh.create_dataset(field_node, - data=pstack) - dataset.attrs["units"] = str(pstack.units) - dataset.attrs["redshifts"] = redshift_list - dataset.attrs["observer_redshift"] = np.float(self.observer_redshift) - for key, value in attrs.items(): - dataset.attrs[key] = value - - if wstack.size > 0: - if weight_field_node in fh: - del fh[weight_field_node] - - mylog.info("Saving %s to %s." % (weight_field_node, filename)) - dataset = fh.create_dataset(weight_field_node, - data=wstack) - dataset.attrs["units"] = str(wstack.units) - dataset.attrs["redshifts"] = redshift_list - dataset.attrs["observer_redshift"] = np.float(self.observer_redshift) - for key, value in attrs.items(): - dataset.attrs[key] = value - - fh.close() diff --git a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py deleted file mode 100644 index 33c2b7856e6..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py +++ /dev/null @@ -1,265 +0,0 @@ -""" -Create randomly centered, tiled projections to be used in light cones. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.funcs import \ - mylog -from yt.units.yt_array import \ - uconcatenate -from yt.visualization.fixed_resolution import \ - FixedResolutionBuffer -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_blocking_call - -@parallel_blocking_call -def _light_cone_projection(my_slice, field, pixels, weight_field=None, - save_image=False, field_cuts=None): - "Create a single projection to be added into the light cone stack." - - # We are just saving the projection object, so only the projection axis - # needs to be considered since the lateral shifting and tiling occurs after - # the projection object is made. - # Likewise, only the box_depth_fraction needs to be considered. - - mylog.info("Making projection at z = %f from %s." % \ - (my_slice["redshift"], my_slice["filename"])) - - region_center = [0.5 * (my_slice["object"].domain_right_edge[q] + - my_slice["object"].domain_left_edge[q]) \ - for q in range(my_slice["object"].dimensionality)] - - # 1. The Depth Problem - # Use coordinate field cut in line of sight to cut projection to proper depth. - if field_cuts is None: - these_field_cuts = [] - else: - these_field_cuts = field_cuts.copy() - - if (my_slice["box_depth_fraction"] < 1): - axis = ("x", "y", "z")[my_slice["projection_axis"]] - depthLeft = \ - my_slice["projection_center"][my_slice["projection_axis"]] \ - - 0.5 * my_slice["box_depth_fraction"] - depthRight = \ - my_slice["projection_center"][my_slice["projection_axis"]] \ - + 0.5 * my_slice["box_depth_fraction"] - if (depthLeft < 0): - cut_mask = ( - "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & " - " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | " - "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & " - " (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \ - (axis, axis, axis, axis, depthRight, - axis, axis, (depthLeft+1), axis, axis) - elif (depthRight > 1): - cut_mask = ( - "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & " - "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | " - "((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & " - "(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \ - (axis, axis, axis, axis, (depthRight-1), - axis, axis, depthLeft, axis, axis) - else: - cut_mask = ( - "(obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & " - "(obj['index', '%s'] - 0.5*obj['index', '%s'] <= %f)") % \ - (axis, axis, depthLeft, axis, axis, depthRight) - - these_field_cuts.append(cut_mask) - - data_source = my_slice["object"].all_data() - cut_region = data_source.cut_region(these_field_cuts) - - # Make projection. - proj = my_slice["object"].proj(field, my_slice["projection_axis"], - weight_field, center=region_center, - data_source=cut_region) - proj_field = proj.field[0] - - del data_source, cut_region - - # 2. The Tile Problem - # Tile projection to specified width. - - # Original projection data. - original_px = proj.field_data["px"].in_units("code_length").copy() - original_py = proj.field_data["py"].in_units("code_length").copy() - original_pdx = proj.field_data["pdx"].in_units("code_length").copy() - original_pdy = proj.field_data["pdy"].in_units("code_length").copy() - original_field = proj.field_data[proj_field].copy() - original_weight_field = proj.field_data["weight_field"].copy() - - for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]: - proj.field_data[my_field] = [proj.field_data[my_field]] - - # Copy original into offset positions to make tiles. - for x in range(int(np.ceil(my_slice["box_width_fraction"]))): - x = my_slice["object"].quan(x, "code_length") - for y in range(int(np.ceil(my_slice["box_width_fraction"]))): - y = my_slice["object"].quan(y, "code_length") - if ((x + y) > 0): - proj.field_data["px"] += [original_px+x] - proj.field_data["py"] += [original_py+y] - proj.field_data["pdx"] += [original_pdx] - proj.field_data["pdy"] += [original_pdy] - proj.field_data["weight_field"] += [original_weight_field] - proj.field_data[proj_field] += [original_field] - - for my_field in ["px", "py", "pdx", "pdy", proj_field, "weight_field"]: - proj.field_data[my_field] = \ - my_slice["object"].arr(proj.field_data[my_field]).flatten() - - # Delete originals. - del original_px - del original_py - del original_pdx - del original_pdy - del original_field - del original_weight_field - - # 3. The Shift Problem - # Shift projection by random x and y offsets. - - image_axes = np.roll(np.arange(3), -my_slice["projection_axis"])[1:] - di_left_x = my_slice["object"].domain_left_edge[image_axes[0]] - di_right_x = my_slice["object"].domain_right_edge[image_axes[0]] - di_left_y = my_slice["object"].domain_left_edge[image_axes[1]] - di_right_y = my_slice["object"].domain_right_edge[image_axes[1]] - - offset = my_slice["projection_center"].copy() * \ - my_slice["object"].domain_width - offset = np.roll(offset, -my_slice["projection_axis"])[1:] - - # Shift x and y positions. - proj.field_data["px"] -= offset[0] - proj.field_data["py"] -= offset[1] - - # Wrap off-edge cells back around to other side (periodic boundary conditions). - proj.field_data["px"][proj.field_data["px"] < di_left_x] += \ - np.ceil(my_slice["box_width_fraction"]) * di_right_x - proj.field_data["py"][proj.field_data["py"] < di_left_y] += \ - np.ceil(my_slice["box_width_fraction"]) * di_right_y - - # After shifting, some cells have fractional coverage on both sides of the box. - # Find those cells and make copies to be placed on the other side. - - # Cells hanging off the right edge. - add_x_right = proj.field_data["px"] + 0.5 * proj.field_data["pdx"] > \ - np.ceil(my_slice["box_width_fraction"]) * di_right_x - add_x_px = proj.field_data["px"][add_x_right] - add_x_px -= np.ceil(my_slice["box_width_fraction"]) * di_right_x - add_x_py = proj.field_data["py"][add_x_right] - add_x_pdx = proj.field_data["pdx"][add_x_right] - add_x_pdy = proj.field_data["pdy"][add_x_right] - add_x_field = proj.field_data[proj_field][add_x_right] - add_x_weight_field = proj.field_data["weight_field"][add_x_right] - del add_x_right - - # Cells hanging off the left edge. - add_x_left = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < di_left_x - add2_x_px = proj.field_data["px"][add_x_left] - add2_x_px += np.ceil(my_slice["box_width_fraction"]) * di_right_x - add2_x_py = proj.field_data["py"][add_x_left] - add2_x_pdx = proj.field_data["pdx"][add_x_left] - add2_x_pdy = proj.field_data["pdy"][add_x_left] - add2_x_field = proj.field_data[proj_field][add_x_left] - add2_x_weight_field = proj.field_data["weight_field"][add_x_left] - del add_x_left - - # Cells hanging off the top edge. - add_y_right = proj.field_data["py"] + 0.5 * proj.field_data["pdy"] > \ - np.ceil(my_slice["box_width_fraction"]) * di_right_y - add_y_px = proj.field_data["px"][add_y_right] - add_y_py = proj.field_data["py"][add_y_right] - add_y_py -= np.ceil(my_slice["box_width_fraction"]) * di_right_y - add_y_pdx = proj.field_data["pdx"][add_y_right] - add_y_pdy = proj.field_data["pdy"][add_y_right] - add_y_field = proj.field_data[proj_field][add_y_right] - add_y_weight_field = proj.field_data["weight_field"][add_y_right] - del add_y_right - - # Cells hanging off the bottom edge. - add_y_left = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < di_left_y - add2_y_px = proj.field_data["px"][add_y_left] - add2_y_py = proj.field_data["py"][add_y_left] - add2_y_py += np.ceil(my_slice["box_width_fraction"]) * di_right_y - add2_y_pdx = proj.field_data["pdx"][add_y_left] - add2_y_pdy = proj.field_data["pdy"][add_y_left] - add2_y_field = proj.field_data[proj_field][add_y_left] - add2_y_weight_field = proj.field_data["weight_field"][add_y_left] - del add_y_left - - # Add the hanging cells back to the projection data. - proj.field_data["px"] = uconcatenate( - [proj.field_data["px"], add_x_px, - add_y_px, add2_x_px, add2_y_px]) - proj.field_data["py"] = uconcatenate( - [proj.field_data["py"], add_x_py, - add_y_py, add2_x_py, add2_y_py]) - proj.field_data["pdx"] = uconcatenate( - [proj.field_data["pdx"], add_x_pdx, - add_y_pdx, add2_x_pdx, add2_y_pdx]) - proj.field_data["pdy"] = uconcatenate( - [proj.field_data["pdy"], add_x_pdy, - add_y_pdy, add2_x_pdy, add2_y_pdy]) - proj.field_data[proj_field] = uconcatenate( - [proj.field_data[proj_field], add_x_field, - add_y_field, add2_x_field, add2_y_field]) - proj.field_data["weight_field"] = uconcatenate( - [proj.field_data["weight_field"], - add_x_weight_field, add_y_weight_field, - add2_x_weight_field, add2_y_weight_field]) - - # Delete original copies of hanging cells. - del add_x_px, add_y_px, add2_x_px, add2_y_px - del add_x_py, add_y_py, add2_x_py, add2_y_py - del add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx - del add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy - del add_x_field, add_y_field, add2_x_field, add2_y_field - del add_x_weight_field, add_y_weight_field, add2_x_weight_field, add2_y_weight_field - - # Tiles were made rounding up the width to the nearest integer. - # Cut off the edges to get the specified width. - # Cut in the x direction. - cut_x = proj.field_data["px"] - 0.5 * proj.field_data["pdx"] < \ - di_right_x * my_slice["box_width_fraction"] - proj.field_data["px"] = proj.field_data["px"][cut_x] - proj.field_data["py"] = proj.field_data["py"][cut_x] - proj.field_data["pdx"] = proj.field_data["pdx"][cut_x] - proj.field_data["pdy"] = proj.field_data["pdy"][cut_x] - proj.field_data[proj_field] = proj.field_data[proj_field][cut_x] - proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_x] - del cut_x - - # Cut in the y direction. - cut_y = proj.field_data["py"] - 0.5 * proj.field_data["pdy"] < \ - di_right_y * my_slice["box_width_fraction"] - proj.field_data["px"] = proj.field_data["px"][cut_y] - proj.field_data["py"] = proj.field_data["py"][cut_y] - proj.field_data["pdx"] = proj.field_data["pdx"][cut_y] - proj.field_data["pdy"] = proj.field_data["pdy"][cut_y] - proj.field_data[proj_field] = proj.field_data[proj_field][cut_y] - proj.field_data["weight_field"] = proj.field_data["weight_field"][cut_y] - del cut_y - - # Create fixed resolution buffer to return back to the light cone object. - # These buffers will be stacked together to make the light cone. - frb = FixedResolutionBuffer(proj, - (di_left_x, di_right_x * my_slice["box_width_fraction"], - di_left_y, di_right_y * my_slice["box_width_fraction"]), - (pixels, pixels), antialias=False) - - return frb diff --git a/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py b/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py deleted file mode 100644 index 18cdaec3c4f..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_cone/tests/test_light_cone.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -light cone generator test - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import \ - _h5py as h5py -import numpy as np -import os -import shutil -import tempfile - -from yt.analysis_modules.cosmological_observation.api import \ - LightCone -from yt.testing import \ - assert_equal, \ - requires_module -from yt.utilities.answer_testing.framework import \ - AnswerTestingTest, \ - requires_sim - -ETC = "enzo_tiny_cosmology/32Mpc_32.enzo" - -class LightConeProjectionTest(AnswerTestingTest): - _type_name = "LightConeProjection" - _attrs = () - - def __init__(self, parameter_file, simulation_type): - self.parameter_file = parameter_file - self.simulation_type = simulation_type - self.ds = os.path.basename(self.parameter_file) - - @property - def storage_name(self): - return os.path.basename(self.parameter_file) - - @requires_module("h5py") - def run(self): - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lc = LightCone( - self.parameter_file, self.simulation_type, 0., 0.1, - observer_redshift=0.0, time_data=False) - lc.calculate_light_cone_solution( - seed=123456789, filename="LC/solution.txt") - lc.project_light_cone( - (600.0, "arcmin"), (60.0, "arcsec"), "density", - weight_field=None, save_stack=True) - - fh = h5py.File("LC/LightCone.h5", mode="r") - data = fh["density_None"].value - units = fh["density_None"].attrs["units"] - assert units == "g/cm**2" - fh.close() - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - - mean = data.mean() - mi = data[data.nonzero()].min() - ma = data.max() - return np.array([mean, mi, ma]) - - def compare(self, new_result, old_result): - assert_equal(new_result, old_result, verbose=True) - -@requires_sim(ETC, "Enzo") -def test_light_cone_projection(): - yield LightConeProjectionTest(ETC, "Enzo") diff --git a/yt/analysis_modules/cosmological_observation/light_ray/api.py b/yt/analysis_modules/cosmological_observation/light_ray/api.py deleted file mode 100644 index 7f356fe3506..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_ray/api.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -API for light_ray - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the LightRay module has been moved to the Trident " - "package. This version is deprecated and will be removed from yt " - "in a future release. See https://github.com/trident-project/trident " - "for further information.") - -from .light_ray import \ - LightRay diff --git a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py deleted file mode 100644 index aadbf4a2e01..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py +++ /dev/null @@ -1,900 +0,0 @@ -""" -LightRay class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.analysis_modules.cosmological_observation.cosmology_splice import \ - CosmologySplice -from yt.convenience import \ - load -from yt.frontends.ytdata.utilities import \ - save_as_dataset -from yt.units.yt_array import \ - YTArray -from yt.utilities.cosmology import \ - Cosmology -from yt.utilities.logger import \ - ytLogger as mylog -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_objects, \ - parallel_root_only -from yt.utilities.physical_constants import speed_of_light_cgs -from yt.data_objects.static_output import Dataset - -class LightRay(CosmologySplice): - """ - A 1D object representing the path of a light ray passing through a - simulation. LightRays can be either simple, where they pass through a - single dataset, or compound, where they pass through consecutive - datasets from the same cosmological simulation. One can sample any of - the fields intersected by the LightRay object as it passed through - the dataset(s). - - For compound rays, the LightRay stacks together multiple datasets in a time - series in order to approximate a LightRay's path through a volume - and redshift interval larger than a single simulation data output. - The outcome is something akin to a synthetic QSO line of sight. - - Once the LightRay object is set up, use LightRay.make_light_ray to - begin making rays. Different randomizations can be created with a - single object by providing different random seeds to make_light_ray. - - Parameters - ---------- - parameter_filename : string or :class:`~yt.data_objects.static_output.Dataset` - For simple rays, one may pass either a loaded dataset object or - the filename of a dataset. - For compound rays, one must pass the filename of the simulation - parameter file. - simulation_type : optional, string - This refers to the simulation frontend type. Do not use for simple - rays. - Default: None - near_redshift : optional, float - The near (lowest) redshift for a light ray containing multiple - datasets. Do not use for simple rays. - Default: None - far_redshift : optional, float - The far (highest) redshift for a light ray containing multiple - datasets. Do not use for simple rays. - Default: None - use_minimum_datasets : optional, bool - If True, the minimum number of datasets is used to connect the - initial and final redshift. If false, the light ray solution - will contain as many entries as possible within the redshift - interval. Do not use for simple rays. - Default: True. - max_box_fraction : optional, float - In terms of the size of the domain, the maximum length a light - ray segment can be in order to span the redshift interval from - one dataset to another. If using a zoom-in simulation, this - parameter can be set to the length of the high resolution - region so as to limit ray segments to that size. If the - high resolution region is not cubical, the smallest side - should be used. - Default: 1.0 (the size of the box) - deltaz_min : optional, float - Specifies the minimum :math:`\Delta z` between consecutive - datasets in the returned list. Do not use for simple rays. - Default: 0.0. - minimum_coherent_box_fraction : optional, float - Use to specify the minimum length of a ray, in terms of the - size of the domain, before the trajectory is re-randomized. - Set to 0 to have ray trajectory randomized for every dataset. - Set to np.inf (infinity) to use a single trajectory for the - entire ray. - Default: 0. - time_data : optional, bool - Whether or not to include time outputs when gathering - datasets for time series. Do not use for simple rays. - Default: True. - redshift_data : optional, bool - Whether or not to include redshift outputs when gathering - datasets for time series. Do not use for simple rays. - Default: True. - find_outputs : optional, bool - Whether or not to search for datasets in the current - directory. Do not use for simple rays. - Default: False. - load_kwargs : optional, dict - If you are passing a filename of a dataset to LightRay rather than an - already loaded dataset, then you can optionally provide this dictionary - as keywords when the dataset is loaded by yt with the "load" function. - Necessary for use with certain frontends. E.g. - Tipsy using "bounding_box" - Gadget using "unit_base", etc. - Default : None - - """ - def __init__(self, parameter_filename, simulation_type=None, - near_redshift=None, far_redshift=None, - use_minimum_datasets=True, max_box_fraction=1.0, - deltaz_min=0.0, minimum_coherent_box_fraction=0.0, - time_data=True, redshift_data=True, - find_outputs=False, load_kwargs=None): - - if near_redshift is not None and far_redshift is not None and \ - near_redshift >= far_redshift: - raise RuntimeError( - "near_redshift must be less than far_redshift.") - - self.near_redshift = near_redshift - self.far_redshift = far_redshift - self.use_minimum_datasets = use_minimum_datasets - self.deltaz_min = deltaz_min - self.minimum_coherent_box_fraction = minimum_coherent_box_fraction - self.parameter_filename = parameter_filename - if load_kwargs is None: - self.load_kwargs = {} - else: - self.load_kwargs = load_kwargs - self.light_ray_solution = [] - self._data = {} - - # The options here are: - # 1) User passed us a dataset: use it to make a simple ray - # 2) User passed us a dataset filename: use it to make a simple ray - # 3) User passed us a simulation filename: use it to make a compound ray - - # Make a light ray from a single, given dataset: #1, #2 - if simulation_type is None: - self.simulation_type = simulation_type - if isinstance(self.parameter_filename, Dataset): - self.ds = self.parameter_filename - self.parameter_filename = self.ds.basename - elif isinstance(self.parameter_filename, str): - self.ds = load(self.parameter_filename, **self.load_kwargs) - if self.ds.cosmological_simulation: - redshift = self.ds.current_redshift - self.cosmology = Cosmology( - hubble_constant=self.ds.hubble_constant, - omega_matter=self.ds.omega_matter, - omega_lambda=self.ds.omega_lambda) - else: - redshift = 0. - self.light_ray_solution.append({"filename": self.parameter_filename, - "redshift": redshift}) - - # Make a light ray from a simulation time-series. #3 - else: - self.ds = None - assert isinstance(self.parameter_filename, str) - # Get list of datasets for light ray solution. - CosmologySplice.__init__(self, self.parameter_filename, simulation_type, - find_outputs=find_outputs) - self.light_ray_solution = \ - self.create_cosmology_splice( - self.near_redshift, self.far_redshift, - minimal=self.use_minimum_datasets, - max_box_fraction=max_box_fraction, - deltaz_min=self.deltaz_min, - time_data=time_data, - redshift_data=redshift_data) - - def _calculate_light_ray_solution(self, seed=None, - left_edge=None, right_edge=None, - min_level=None, periodic=True, - start_position=None, end_position=None, - trajectory=None, filename=None): - "Create list of datasets to be added together to make the light ray." - - # Calculate dataset sizes, and get random dataset axes and centers. - my_random = np.random.RandomState(seed) - - # If using only one dataset, set start and stop manually. - if start_position is not None: - if self.near_redshift is not None or self.far_redshift is not None: - raise RuntimeError("LightRay Error: cannot specify both " + \ - "start_position and a redshift range.") - if not ((end_position is None) ^ (trajectory is None)): - raise RuntimeError("LightRay Error: must specify either end_position " + \ - "or trajectory, but not both.") - self.light_ray_solution[0]['start'] = start_position - if end_position is not None: - self.light_ray_solution[0]['end'] = end_position - else: - # assume trajectory given as r, theta, phi - if len(trajectory) != 3: - raise RuntimeError("LightRay Error: trajectory must have length 3.") - r, theta, phi = trajectory - self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \ - r * np.array([np.cos(phi) * np.sin(theta), - np.sin(phi) * np.sin(theta), - np.cos(theta)]) - self.light_ray_solution[0]['traversal_box_fraction'] = \ - vector_length(self.light_ray_solution[0]['start'], - self.light_ray_solution[0]['end']) - - # the normal way (random start positions and trajectories for each dataset) - else: - - # For box coherence, keep track of effective depth travelled. - box_fraction_used = 0.0 - - for q in range(len(self.light_ray_solution)): - if (q == len(self.light_ray_solution) - 1): - z_next = self.near_redshift - else: - z_next = self.light_ray_solution[q+1]['redshift'] - - # Calculate fraction of box required for a depth of delta z - self.light_ray_solution[q]['traversal_box_fraction'] = \ - self.cosmology.comoving_radial_distance(z_next, \ - self.light_ray_solution[q]['redshift']).in_units("Mpccm / h") / \ - self.simulation.box_size - - # Get dataset axis and center. - # If using box coherence, only get start point and vector if - # enough of the box has been used. - if (q == 0) or (box_fraction_used >= - self.minimum_coherent_box_fraction): - if periodic: - self.light_ray_solution[q]['start'] = left_edge + \ - (right_edge - left_edge) * my_random.random_sample(3) - theta = np.pi * my_random.random_sample() - phi = 2 * np.pi * my_random.random_sample() - box_fraction_used = 0.0 - else: - ds = load(self.light_ray_solution[q]["filename"]) - ray_length = \ - ds.quan(self.light_ray_solution[q]['traversal_box_fraction'], - "unitary") - self.light_ray_solution[q]['start'], \ - self.light_ray_solution[q]['end'] = \ - non_periodic_ray(ds, left_edge, right_edge, ray_length, - my_random=my_random, min_level=min_level) - del ds - else: - # Use end point of previous segment, adjusted for periodicity, - # and the same trajectory. - self.light_ray_solution[q]['start'] = \ - periodic_adjust(self.light_ray_solution[q-1]['end'][:], - left=left_edge, right=right_edge) - - if "end" not in self.light_ray_solution[q]: - self.light_ray_solution[q]['end'] = \ - self.light_ray_solution[q]['start'] + \ - self.light_ray_solution[q]['traversal_box_fraction'] * \ - self.simulation.box_size * \ - np.array([np.cos(phi) * np.sin(theta), - np.sin(phi) * np.sin(theta), - np.cos(theta)]) - box_fraction_used += \ - self.light_ray_solution[q]['traversal_box_fraction'] - - if filename is not None: - self._write_light_ray_solution(filename, - extra_info={'parameter_filename':self.parameter_filename, - 'random_seed':seed, - 'far_redshift':self.far_redshift, - 'near_redshift':self.near_redshift}) - - def make_light_ray(self, seed=None, periodic=True, - left_edge=None, right_edge=None, min_level=None, - start_position=None, end_position=None, - trajectory=None, - fields=None, setup_function=None, - solution_filename=None, data_filename=None, - get_los_velocity=None, use_peculiar_velocity=True, - redshift=None, field_parameters=None, njobs=-1): - """ - make_light_ray(seed=None, periodic=True, - left_edge=None, right_edge=None, min_level=None, - start_position=None, end_position=None, - trajectory=None, fields=None, setup_function=None, - solution_filename=None, data_filename=None, - use_peculiar_velocity=True, redshift=None, - njobs=-1) - - Create a light ray and get field values for each lixel. A light - ray consists of a list of field values for cells intersected by - the ray and the path length of the ray through those cells. - Light ray data must be written out to an hdf5 file. - - Parameters - ---------- - seed : optional, int - Seed for the random number generator. - Default: None. - periodic : optional, bool - If True, ray trajectories will make use of periodic - boundaries. If False, ray trajectories will not be - periodic. - Default : True. - left_edge : optional, iterable of floats or YTArray - The left corner of the region in which rays are to be - generated. If None, the left edge will be that of the - domain. If specified without units, it is assumed to - be in code units. - Default: None. - right_edge : optional, iterable of floats or YTArray - The right corner of the region in which rays are to be - generated. If None, the right edge will be that of the - domain. If specified without units, it is assumed to - be in code units. - Default: None. - min_level : optional, int - The minimum refinement level of the spatial region in which - the ray passes. This can be used with zoom-in simulations - where the high resolution region does not keep a constant - geometry. - Default: None. - start_position : optional, iterable of floats or YTArray. - Used only if creating a light ray from a single dataset. - The coordinates of the starting position of the ray. - If specified without units, it is assumed to be in code units. - Default: None. - end_position : optional, iterable of floats or YTArray. - Used only if creating a light ray from a single dataset. - The coordinates of the ending position of the ray. - If specified without units, it is assumed to be in code units. - Default: None. - trajectory : optional, list of floats - Used only if creating a light ray from a single dataset. - The (r, theta, phi) direction of the light ray. Use either - end_position or trajectory, not both. - Default: None. - fields : optional, list - A list of fields for which to get data. - Default: None. - setup_function : optional, callable, accepts a ds - This function will be called on each dataset that is loaded - to create the light ray. For, example, this can be used to - add new derived fields. - Default: None. - solution_filename : optional, string - Path to a text file where the trajectories of each - subray is written out. - Default: None. - data_filename : optional, string - Path to output file for ray data. - Default: None. - use_peculiar_velocity : optional, bool - If True, the peculiar velocity along the ray will be sampled for - calculating the effective redshift combining the cosmological - redshift and the doppler redshift. - Default: True. - redshift : optional, float - Used with light rays made from single datasets to specify a - starting redshift for the ray. If not used, the starting - redshift will be 0 for a non-cosmological dataset and - the dataset redshift for a cosmological dataset. - Default: None. - njobs : optional, int - The number of parallel jobs over which the segments will - be split. Choose -1 for one processor per segment. - Default: -1. - - Examples - -------- - - Make a light ray from multiple datasets: - - >>> import yt - >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \ - ... LightRay - >>> my_ray = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo", - ... 0., 0.1, time_data=False) - ... - >>> my_ray.make_light_ray(seed=12345, - ... solution_filename="solution.txt", - ... data_filename="my_ray.h5", - ... fields=["temperature", "density"], - ... use_peculiar_velocity=True) - - Make a light ray from a single dataset: - - >>> import yt - >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \ - ... LightRay - >>> my_ray = LightRay("IsolatedGalaxy/galaxy0030/galaxy0030") - ... - >>> my_ray.make_light_ray(start_position=[0., 0., 0.], - ... end_position=[1., 1., 1.], - ... solution_filename="solution.txt", - ... data_filename="my_ray.h5", - ... fields=["temperature", "density"], - ... use_peculiar_velocity=True) - - """ - if self.simulation_type is None: - domain = self.ds - else: - domain = self.simulation - - assumed_units = "code_length" - if left_edge is None: - left_edge = domain.domain_left_edge - elif not hasattr(left_edge, 'units'): - left_edge = domain.arr(left_edge, assumed_units) - left_edge.convert_to_units('unitary') - - if right_edge is None: - right_edge = domain.domain_right_edge - elif not hasattr(right_edge, 'units'): - right_edge = domain.arr(right_edge, assumed_units) - right_edge.convert_to_units('unitary') - - if start_position is not None: - if hasattr(start_position, 'units'): - start_position = start_position - else: - start_position = self.ds.arr(start_position, assumed_units) - start_position.convert_to_units('unitary') - - if end_position is not None: - if hasattr(end_position, 'units'): - end_position = end_position - else: - end_position = self.ds.arr(end_position, assumed_units) - end_position.convert_to_units('unitary') - - if get_los_velocity is not None: - use_peculiar_velocity = get_los_velocity - mylog.warn("'get_los_velocity' kwarg is deprecated. " + \ - "Use 'use_peculiar_velocity' instead.") - - # Calculate solution. - self._calculate_light_ray_solution(seed=seed, - left_edge=left_edge, - right_edge=right_edge, - min_level=min_level, periodic=periodic, - start_position=start_position, - end_position=end_position, - trajectory=trajectory, - filename=solution_filename) - - if field_parameters is None: - field_parameters = {} - - # Initialize data structures. - self._data = {} - # temperature field is automatically added to fields - if fields is None: fields = [] - if (('gas', 'temperature') not in fields) and \ - ('temperature' not in fields): - fields.append(('gas', 'temperature')) - data_fields = fields[:] - all_fields = fields[:] - all_fields.extend(['dl', 'dredshift', 'redshift']) - all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz']) - data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz']) - if use_peculiar_velocity: - all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', - 'velocity_los', 'redshift_eff', - 'redshift_dopp']) - data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z']) - - all_ray_storage = {} - for my_storage, my_segment in parallel_objects(self.light_ray_solution, - storage=all_ray_storage, - njobs=njobs): - - # In case of simple rays, use the already loaded dataset: self.ds, - # otherwise, load dataset for segment. - if self.ds is None: - ds = load(my_segment['filename'], **self.load_kwargs) - else: - ds = self.ds - - my_segment['unique_identifier'] = ds.unique_identifier - if redshift is not None: - if ds.cosmological_simulation and redshift != ds.current_redshift: - mylog.warn("Generating light ray with different redshift than " + - "the dataset itself.") - my_segment["redshift"] = redshift - - if setup_function is not None: - setup_function(ds) - - if not ds.cosmological_simulation: - next_redshift = my_segment["redshift"] - elif self.near_redshift == self.far_redshift: - if isinstance(my_segment["traversal_box_fraction"], YTArray) and \ - not my_segment["traversal_box_fraction"].units.is_dimensionless: - segment_length = \ - my_segment["traversal_box_fraction"].in_units("Mpccm / h") - else: - segment_length = my_segment["traversal_box_fraction"] * \ - ds.domain_width[0].in_units("Mpccm / h") - next_redshift = my_segment["redshift"] - \ - self._deltaz_forward(my_segment["redshift"], - segment_length) - elif my_segment.get("next", None) is None: - next_redshift = self.near_redshift - else: - next_redshift = my_segment['next']['redshift'] - - # Make sure start, end, left, right - # are using the dataset's unit system. - my_start = ds.arr(my_segment['start']) - my_end = ds.arr(my_segment['end']) - my_left = ds.arr(left_edge) - my_right = ds.arr(right_edge) - mylog.info("Getting segment at z = %s: %s to %s." % - (my_segment['redshift'], my_start, my_end)) - - # Break periodic ray into non-periodic segments. - sub_segments = periodic_ray(my_start, my_end, - left=my_left, right=my_right) - - # Prepare data structure for subsegment. - sub_data = {} - sub_data['segment_redshift'] = my_segment['redshift'] - for field in all_fields: - sub_data[field] = [] - - # Get data for all subsegments in segment. - for sub_segment in sub_segments: - mylog.info("Getting subsegment: %s to %s." % - (list(sub_segment[0]), list(sub_segment[1]))) - sub_ray = ds.ray(sub_segment[0], sub_segment[1]) - for key, val in field_parameters.items(): - sub_ray.set_field_parameter(key, val) - asort = np.argsort(sub_ray["t"]) - sub_data['dl'].extend(sub_ray['dts'][asort] * - vector_length(sub_ray.start_point, - sub_ray.end_point)) - - for field in data_fields: - sub_data[field].extend(sub_ray[field][asort]) - - if use_peculiar_velocity: - line_of_sight = sub_segment[0] - sub_segment[1] - line_of_sight /= ((line_of_sight**2).sum())**0.5 - sub_vel = ds.arr([sub_ray['velocity_x'], - sub_ray['velocity_y'], - sub_ray['velocity_z']]) - # Line of sight velocity = vel_los - sub_vel_los = (np.rollaxis(sub_vel, 1) * \ - line_of_sight).sum(axis=1) - sub_data['velocity_los'].extend(sub_vel_los[asort]) - - # doppler redshift: - # See https://en.wikipedia.org/wiki/Redshift and - # Peebles eqns: 5.48, 5.49 - - # 1 + redshift_dopp = (1 + v*cos(theta)/c) / - # sqrt(1 - v**2/c**2) - - # where v is the peculiar velocity (ie physical velocity - # without the hubble flow, but no hubble flow in sim, so - # just the physical velocity). - - # the bulk of the doppler redshift is from line of sight - # motion, but there is a small amount from time dilation - # of transverse motion, hence the inclusion of theta (the - # angle between line of sight and the velocity). - # theta is the angle between the ray vector (i.e. line of - # sight) and the velocity vectors: a dot b = ab cos(theta) - - sub_vel_mag = sub_ray['velocity_magnitude'] - cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag - # Protect against situations where velocity mag is exactly - # zero, in which case zero / zero = NaN. - cos_theta = np.nan_to_num(cos_theta) - redshift_dopp = \ - (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \ - np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1 - sub_data['redshift_dopp'].extend(redshift_dopp[asort]) - del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \ - redshift_dopp - - sub_ray.clear_data() - del sub_ray, asort - - for key in sub_data: - sub_data[key] = ds.arr(sub_data[key]).in_cgs() - - # Get redshift for each lixel. Assume linear relation between l - # and z. - sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \ - (sub_data['dl'] / vector_length(my_start, my_end).in_cgs()) - sub_data['redshift'] = my_segment['redshift'] - \ - sub_data['dredshift'].cumsum() + sub_data['dredshift'] - - # When using the peculiar velocity, create effective redshift - # (redshift_eff) field combining cosmological redshift and - # doppler redshift. - - # then to add cosmological redshift and doppler redshifts, follow - # eqn 3.75 in Peacock's Cosmological Physics: - # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler) - - if use_peculiar_velocity: - sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \ - (1 + sub_data['redshift'])) - 1 - - # Remove empty lixels. - sub_dl_nonzero = sub_data['dl'].nonzero() - for field in all_fields: - sub_data[field] = sub_data[field][sub_dl_nonzero] - del sub_dl_nonzero - - # Add to storage. - my_storage.result = sub_data - - del ds - - # Reconstruct ray data from parallel_objects storage. - all_data = [my_data for my_data in all_ray_storage.values()] - # This is now a list of segments where each one is a dictionary - # with all the fields. - all_data.sort(key=lambda a:a['segment_redshift'], reverse=True) - # Flatten the list into a single dictionary containing fields - # for the whole ray. - all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift']) - self._data = all_data - - if data_filename is not None: - self._write_light_ray(data_filename, all_data) - ray_ds = load(data_filename) - return ray_ds - else: - return None - - def __getitem__(self, field): - return self._data[field] - - @parallel_root_only - def _write_light_ray(self, filename, data): - """ - _write_light_ray(filename, data) - - Write light ray data to hdf5 file. - """ - - extra_attrs = {"data_type": "yt_light_ray"} - if self.simulation_type is None: - ds = self.ds - else: - ds = {} - ds["periodicity"] = (True, True, True) - ds["current_redshift"] = self.near_redshift - for attr in ["dimensionality", "cosmological_simulation", - "domain_left_edge", "domain_right_edge", - "length_unit", "time_unit"]: - ds[attr] = getattr(self.simulation, attr) - if self.simulation.cosmological_simulation: - for attr in ["omega_lambda", "omega_matter", - "hubble_constant"]: - ds[attr] = getattr(self.cosmology, attr) - ds["current_time"] = \ - self.cosmology.t_from_z(ds["current_redshift"]) - if isinstance(ds["hubble_constant"], YTArray): - ds["hubble_constant"] = \ - ds["hubble_constant"].to("100*km/(Mpc*s)").d - extra_attrs["unit_registry_json"] = \ - self.simulation.unit_registry.to_json() - - # save the light ray solution - if len(self.light_ray_solution) > 0: - for key in self.light_ray_solution[0]: - if key in ["next", "previous", "index"]: - continue - lrsa = [sol[key] for sol in self.light_ray_solution] - if isinstance(lrsa[-1], YTArray): - to_arr = YTArray - else: - to_arr = np.array - arr = to_arr(lrsa) - # If we somehow create an object array, convert it to a string - # to avoid errors later - if arr.dtype == 'O': - arr = arr.astype(str) - extra_attrs["light_ray_solution_%s" % key] = arr - - field_types = dict([(field, "grid") for field in data.keys()]) - - # Only return LightRay elements with non-zero density - if 'temperature' in data: f = 'temperature' - if ('gas', 'temperature') in data: f = ('gas', 'temperature') - if 'temperature' in data or ('gas', 'temperature') in data: - mask = data[f] > 0 - if not np.any(mask): - raise RuntimeError( - "No zones along light ray with nonzero %s. " - "Please modify your light ray trajectory." % (f,)) - for key in data.keys(): - data[key] = data[key][mask] - save_as_dataset(ds, filename, data, field_types=field_types, - extra_attrs=extra_attrs) - - @parallel_root_only - def _write_light_ray_solution(self, filename, extra_info=None): - """ - _write_light_ray_solution(filename, extra_info=None) - - Write light ray solution to a file. - """ - - mylog.info("Writing light ray solution to %s." % filename) - f = open(filename, 'w') - if extra_info is not None: - for par, val in extra_info.items(): - f.write("%s = %s\n" % (par, val)) - f.write("\nSegment Redshift dl/box Start x y " + \ - "z End x y z Dataset\n") - for q, my_segment in enumerate(self.light_ray_solution): - f.write("%04d %.6f %.6f % .10f % .10f % .10f % .10f % .10f % .10f %s\n" % \ - (q, my_segment['redshift'], my_segment['traversal_box_fraction'], - my_segment['start'][0], my_segment['start'][1], my_segment['start'][2], - my_segment['end'][0], my_segment['end'][1], my_segment['end'][2], - my_segment['filename'])) - f.close() - -def _flatten_dict_list(data, exceptions=None): - """ - _flatten_dict_list(data, exceptions=None) - - Flatten the list of dicts into one dict. - """ - - if exceptions is None: exceptions = [] - new_data = {} - for datum in data: - for field in [field for field in datum.keys() - if field not in exceptions]: - if field not in new_data: - new_data[field] = [] - new_data[field].extend(datum[field]) - for field in new_data: - new_data[field] = YTArray(new_data[field]) - return new_data - -def vector_length(start, end): - """ - vector_length(start, end) - - Calculate vector length. - """ - - return np.sqrt(np.power((end - start), 2).sum()) - -def periodic_adjust(p, left=None, right=None): - """ - Return the point p adjusted for periodic boundaries. - - """ - if isinstance(p, YTArray): - p.convert_to_units("unitary") - if left is None: - left = np.zeros_like(p) - if right is None: - right = np.ones_like(p) - - w = right - left - p -= left - return np.mod(p, w) - -def periodic_distance(coord1, coord2): - """ - periodic_distance(coord1, coord2) - - Calculate length of shortest vector between to points in periodic domain. - """ - dif = coord1 - coord2 - - dim = np.ones(coord1.shape,dtype=int) - def periodic_bind(num): - pos = np.abs(num % dim) - neg = np.abs(num % -dim) - return np.min([pos,neg],axis=0) - - dif = periodic_bind(dif) - return np.sqrt((dif * dif).sum(axis=-1)) - -def periodic_ray(start, end, left=None, right=None): - """ - periodic_ray(start, end, left=None, right=None) - - Break up periodic ray into non-periodic segments. - Accepts start and end points of periodic ray as YTArrays. - Accepts optional left and right edges of periodic volume as YTArrays. - Returns a list of lists of coordinates, where each element of the - top-most list is a 2-list of start coords and end coords of the - non-periodic ray: - - [[[x0start,y0start,z0start], [x0end, y0end, z0end]], - [[x1start,y1start,z1start], [x1end, y1end, z1end]], - ...,] - - """ - - if left is None: - left = np.zeros(start.shape) - if right is None: - right = np.ones(start.shape) - dim = right - left - - vector = end - start - wall = np.zeros_like(start) - close = np.zeros(start.shape, dtype=object) - - left_bound = vector < 0 - right_bound = vector > 0 - no_bound = vector == 0.0 - bound = vector != 0.0 - - wall[left_bound] = left[left_bound] - close[left_bound] = np.max - wall[right_bound] = right[right_bound] - close[right_bound] = np.min - wall[no_bound] = np.inf - close[no_bound] = np.min - - segments = [] - this_start = start.copy() - this_end = end.copy() - t = 0.0 - tolerance = 1e-6 - while t < 1.0 - tolerance: - hit_left = (this_start <= left) & (vector < 0) - if (hit_left).any(): - this_start[hit_left] += dim[hit_left] - this_end[hit_left] += dim[hit_left] - hit_right = (this_start >= right) & (vector > 0) - if (hit_right).any(): - this_start[hit_right] -= dim[hit_right] - this_end[hit_right] -= dim[hit_right] - - nearest = vector.unit_array * \ - np.array([close[q]([this_end[q], wall[q]]) \ - for q in range(start.size)]) - dt = ((nearest - this_start) / vector)[bound].min() - now = this_start + vector * dt - close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10 - now[close_enough] = nearest[close_enough] - segments.append([this_start.copy(), now.copy()]) - this_start = now.copy() - t += dt - - return segments - -def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000, - min_level=None, my_random=None): - - max_length = vector_length(left_edge, right_edge) - if ray_length > max_length: - raise RuntimeError( - ("The maximum segment length in the region %s to %s is %s, " + - "but the ray length requested is %s. Decrease ray length.") % - (left_edge, right_edge, max_length, ray_length)) - - if my_random is None: - my_random = np.random.RandomState() - i = 0 - while True: - start = my_random.random_sample(3) * \ - (right_edge - left_edge) + left_edge - theta = np.pi * my_random.random_sample() - phi = 2 * np.pi * my_random.random_sample() - end = start + ray_length * \ - np.array([np.cos(phi) * np.sin(theta), - np.sin(phi) * np.sin(theta), - np.cos(theta)]) - i += 1 - test_ray = ds.ray(start, end) - if (end >= left_edge).all() and (end <= right_edge).all() and \ - (min_level is None or min_level <= 0 or - (test_ray["grid_level"] >= min_level).all()): - mylog.info("Found ray after %d attempts." % i) - del test_ray - return start, end - del test_ray - if i > max_iter: - raise RuntimeError( - ("Failed to create segment in %d attempts. " + - "Decreasing ray length is recommended") % i) diff --git a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py deleted file mode 100644 index e09af2cae41..00000000000 --- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Unit test for the light_ray analysis module -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.convenience import \ - load -from yt.testing import \ - assert_array_equal, \ - requires_file -from yt.analysis_modules.cosmological_observation.api import LightRay -import os -import shutil -from yt.utilities.answer_testing.framework import data_dir_load -import tempfile - -COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo" -COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009" - -def compare_light_ray_solutions(lr1, lr2): - assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution) - if len(lr1.light_ray_solution) == 0: - return - for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution): - for field in s1: - if field in ["next", "previous"]: - continue - if isinstance(s1[field], np.ndarray): - assert_array_equal(s1[field], s2[field]) - else: - assert s1[field] == s2[field] - -@requires_file(COSMO_PLUS) -def test_light_ray_cosmo(): - """ - This test generates a cosmological light ray - """ - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) - - lr.make_light_ray(seed=1234567, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - ds = load('lightray.h5') - compare_light_ray_solutions(lr, ds) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS) -def test_light_ray_cosmo_nested(): - """ - This test generates a cosmological light ray confing the ray to a subvolume - """ - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - left = np.ones(3) * 0.25 - right = np.ones(3) * 0.75 - - lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) - - lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - ds = load('lightray.h5') - compare_light_ray_solutions(lr, ds) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS) -def test_light_ray_cosmo_nonperiodic(): - """ - This test generates a cosmological light ray using non-periodic segments - """ - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03) - - lr.make_light_ray(seed=1234567, periodic=False, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - ds = load('lightray.h5') - compare_light_ray_solutions(lr, ds) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS_SINGLE) -def test_light_ray_non_cosmo(): - """ - This test generates a non-cosmological light ray - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - lr = LightRay(COSMO_PLUS_SINGLE) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - ds = load('lightray.h5') - compare_light_ray_solutions(lr, ds) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - -@requires_file(COSMO_PLUS_SINGLE) -def test_light_ray_non_cosmo_from_dataset(): - """ - This test generates a non-cosmological light ray created from an already - loaded dataset - """ - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - ds = data_dir_load(COSMO_PLUS_SINGLE) - lr = LightRay(ds) - - ray_start = [0,0,0] - ray_end = [1,1,1] - lr.make_light_ray(start_position=ray_start, end_position=ray_end, - fields=['temperature', 'density', 'H_number_density'], - data_filename='lightray.h5') - - ds = load('lightray.h5') - compare_light_ray_solutions(lr, ds) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - diff --git a/yt/analysis_modules/halo_analysis/api.py b/yt/analysis_modules/halo_analysis/api.py index 0f59bc9058e..e9eb1f054ad 100644 --- a/yt/analysis_modules/halo_analysis/api.py +++ b/yt/analysis_modules/halo_analysis/api.py @@ -1,41 +1,7 @@ -""" -API for halo_analysis +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the HaloCatalog module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .halo_catalog import \ - HaloCatalog - -from .halo_callbacks import \ - add_callback - -from .halo_finding_methods import \ - add_finding_method - -from .halo_filters import \ - add_filter - -from .halo_quantities import \ - add_quantity - -from .halo_recipes import \ - add_recipe +raise YTModuleRemoved( + "halo_analysis", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py deleted file mode 100644 index c2aaa794f22..00000000000 --- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py +++ /dev/null @@ -1,805 +0,0 @@ -""" -A very simple, purely-serial, merger tree script that knows how to parse FOF -catalogs, either output by Enzo or output by yt's FOF halo finder, and then -compare parent/child relationships. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -# plot_halo_evolution() gives a good full example of how to use the framework - -# First pass at a simplified merger tree -# -# Basic outline: -# -# 1. Halo find inline, obtaining particle catalogs -# 2. Load dataset at time t -# 3. Load dataset at time t+1 -# 4. Parse catalogs for t and t+1 -# 5. Place halos for t+1 in kD-tree -# 6. For every halo in t, execute ball-query with some linking length -# 7. For every halo in ball-query result, execute numpy's intersect1d on -# particle IDs -# 8. Parentage is described by a fraction of particles that pass from one to -# the other; we have both descendent fractions and ancestor fractions. - - -import numpy as np -from yt.utilities.on_demand_imports import _h5py as h5py -import glob -import os - -from yt.extern.six.moves import cPickle -from yt.extern.pykdtree import KDTree -from yt.funcs import mylog, get_pbar - -import yt.extern.pydot as pydot - -# We don't currently use this, but we may again find a use for it in the -# future. -class MaxLengthDict(dict): - def __init__(self, *args, **kwargs): - dict.__init__(self, *args, **kwargs) - self.order = [None] * 50 - - def __setitem__(self, key, val): - if key not in self.order: - to_remove = self.order.pop(0) - self.pop(to_remove, None) - self.order.append(key) - dict.__setitem__(self, key, val) - - def __getitem__(self, key): - if key in self.order: - self.order.pop(self.order.index(key)) - self.order.append(key) - return dict.__getitem__(self, key) - - def __delitem__(self, key): - dict.__delitem__(self, key) - self.order.pop(self.order.index(key)) - self.order.insert(0, None) - -class HaloCatalog(object): - r"""A catalog of halos, parsed from EnzoFOF outputs. - - This class will read in catalogs output by the Enzo FOF halo finder and - make available their positions, radii, etc. Enzo FOF was provided - starting with 2.0, and can be run either inline (with the correct - options) or as a postprocessing step using the `-F` command line - option. This class is mostly useful when calculating a merger tree, - and when the particle IDs for members of a given halo are output as - well. - - Parameters - ---------- - output_id : int - This is the integer output id of the halo catalog to parse and - load. - cache : bool - Should we store, in between accesses, the particle IDs? If set to - true, the correct particle files must exist. - external_FOF : bool, optional - Are we building a tree from outputs generated by an - external FOF program, or an FOF internal to yt? - FOF_directory : str, optional - Directory where FOF files are located - """ - cache = None - def __init__(self, output_id, cache = True, external_FOF=True, FOF_directory="FOF"): - self.output_id = output_id - self.external_FOF = external_FOF - self.redshift = 0.0 - self.FOF_directory = FOF_directory - self.particle_file = h5py.File("%s/particles_%05i.h5" % \ - (FOF_directory, output_id), "r") - if self.external_FOF: - self.parse_halo_catalog_external() - else: - self.parse_halo_catalog_internal() - if cache: self.cache = dict()#MaxLengthDict() - - def __del__(self): - self.particle_file.close() - - def parse_halo_catalog_external(self): - hp = [] - for line in open("%s/groups_%05i.dat" % \ - (self.FOF_directory, self.output_id)): - if line.strip() == "": continue # empty - if line.startswith("# Red"): - self.redshift = float(line.split("=")[1]) - if line[0] == "#": continue # comment - if line[0] == "d": continue # datavar - x,y,z = [float(f) for f in line.split(None, 3)[:-1]] - hp.append([x,y,z]) - if hp != []: - self.halo_positions = np.array(hp) - self.halo_kdtree = KDTree(self.halo_positions) - else: - self.halo_positions = None - self.halo_kdtree = None - return hp - - def parse_halo_catalog_internal(self): - """ - This parser works on the files output directly out of yt's internal - halo_finder. The parse_halo_catalog_external works with an - external version of FOF. - - Examples - -------- - >>> ds = load("DD0000/DD0000") - >>> halo_list = FOFHaloFinder(ds) - >>> halo_list.write_out("FOF/groups_00000.txt") - >>> halos_COM = parse_halo_catalog_internal() - """ - hp = [] - for line in open("%s/groups_%05i.txt" % \ - (self.FOF_directory, self.output_id)): - if line.startswith("# RED"): - self.redshift = float(line.split("=")[1]) - continue - if line.strip() == "": continue # empty - if line[0] == "#": continue # comment - x,y,z = [float(f) for f in line.split()[7:10]] # COM x,y,z - hp.append([x,y,z]) - if hp != []: - self.halo_positions = np.array(hp) - self.halo_kdtree = KDTree(self.halo_positions) - else: - self.halo_positions = None - self.halo_kdtree = None - return hp - - def read_particle_ids(self, halo_id): - if self.cache is not None: - if halo_id not in self.cache: - if self.external_FOF: - self.cache[halo_id] = \ - self.particle_file["/Halo%08i/Particle ID" % halo_id][:] - else: - self.cache[halo_id] = \ - self.particle_file["/Halo%08i/particle_index" % halo_id][:] - ids = self.cache[halo_id] - else: - if self.external_FOF: - ids = self.particle_file["/Halo%08i/Particle ID" % halo_id][:] - else: - ids = self.particle_file["/Halo%08i/particle_index" % halo_id][:] - return HaloParticleList(halo_id, self.halo_positions[halo_id,:], ids) - - def calculate_parentage_fractions(self, other_catalog, radius = 0.10): - parentage_fractions = {} - if self.halo_positions is None or other_catalog.halo_positions is None: - return parentage_fractions - mylog.debug("Ball-tree query with radius %0.3e", radius) - all_nearest = self.halo_kdtree.query_ball_tree( - other_catalog.halo_kdtree, radius) - pbar = get_pbar("Halo Mergers", self.halo_positions.shape[0]) - for hid1, nearest in enumerate(all_nearest): - pbar.update(hid1) - parentage_fractions[hid1] = {} - HPL1 = self.read_particle_ids(hid1) - for hid2 in sorted(nearest): - HPL2 = other_catalog.read_particle_ids(hid2) - p1, p2 = HPL1.find_relative_parentage(HPL2) - parentage_fractions[hid1][hid2] = (p1, p2, HPL2.number_of_particles) - parentage_fractions[hid1]["NumberOfParticles"] = HPL1.number_of_particles - pbar.finish() - return parentage_fractions - -class HaloParticleList(object): - def __init__(self, halo_id, position, particle_ids): - self.halo_id = halo_id - self.position = np.array(position) - self.particle_ids = particle_ids - self.number_of_particles = particle_ids.size - - def find_nearest(self, other_tree, radius = 0.10): - return other_tree.query_ball_point(self.position, radius) - - def find_relative_parentage(self, child): - # Return two values: percent this halo gave to the other, and percent - # of the other that comes from this halo - overlap = np.intersect1d(self.particle_ids, child.particle_ids).size - of_child_from_me = float(overlap)/child.particle_ids.size - of_mine_from_me = float(overlap)/self.particle_ids.size - return of_child_from_me, of_mine_from_me - -class EnzoFOFMergerBranch(object): - def __init__(self, tree, output_num, halo_id, max_children, - min_relation=0.25): - self.output_num = output_num - self.halo_id = halo_id - self.npart = tree.relationships[output_num][halo_id]["NumberOfParticles"] - self.children = [] - self.progenitor = -1 - max_relationship = 0.0 - halo_count = 0 - keys = list(tree.relationships[output_num][halo_id].keys()) - keys.remove('NumberOfParticles') - for k in sorted(keys): - v = tree.relationships[output_num][halo_id][k] - if v[1] > min_relation and halo_count < max_children: - halo_count += 1 - self.children.append((k,v[1],v[2])) - if v[1] > max_relationship: - self.progenitor = k - max_relationship = v[1] - -class EnzoFOFMergerTree(object): - r"""Calculates the parentage relationships for halos for a series of - outputs, using the framework provided in enzofof_merger_tree. - - Parameters - ---------- - zrange : tuple - This is the redshift range (min, max) to calculate the - merger tree. E.g. (0, 2) for z=2 to z=0 - cycle_range : tuple, optional - This is the cycle number range (min, max) to calculate the - merger tree. If both zrange and cycle_number given, - ignore zrange. - output : bool, optional - If provided, both .cpkl and .txt files containing the parentage - relationships will be output. - load_saved : bool, optional - Flag to load previously saved parental relationships - save_filename : str, optional - Filename to save parental relationships - external_FOF : bool, optional - Are we building a tree from outputs generated by an - external FOF program, or an FOF internal to yt? - FOF_directory : str, optional - Directory where FOF files are located, note that the files - must be named according to the syntax: groups_DDDDD.txt for - internal yt outputs, and groups_DDDDD.dat for external FOF outputs. - where DDDDD are digits representing the equivalent cycle number. - e.g. groups_00000.txt - - Examples - -------- - >>> mt = EnzoFOFMergerTree() # by default it grabs every DD in FOF dir - >>> mt.build_tree(0) # Create tree for halo 0 - >>> mt.print_tree() - >>> mt.write_dot() - - See Also - -------- - plot_halo_evolution() - """ - def __init__(self, zrange=None, cycle_range=None, output=False, - load_saved=False, save_filename="merger_tree.cpkl", - external_FOF=True, FOF_directory="FOF"): - - self.relationships = {} - self.redshifts = {} - self.external_FOF = external_FOF - self.FOF_directory = FOF_directory - if load_saved: - self.load_tree("%s/%s" % (self.FOF_directory, save_filename)) - # make merger tree work within specified cycle/z limits - # on preloaded halos - if zrange is not None: - self.select_redshifts(zrange) - if cycle_range is not None: - self.select_cycles(cycle_range) - else: - self.find_outputs(zrange, cycle_range, output) - self.run_merger_tree(output) - self.save_tree("%s/%s" % (self.FOF_directory, save_filename)) - - def select_cycles(self, cycle_range): - """ - Takes an existing tree and pares it to only include a subset of - cycles. Useful in paring a loaded tree. - """ - # N.B. Does not delete info from self.relationships to save space - # just removes it from redshift dict for indexing - for cycle in self.redshifts.keys(): - if cycle <= cycle_range[0] and cycle >= cycle_range[1]: - del self.redshifts[cycle] - - def select_redshifts(self, zrange): - """ - Takes an existing tree and pares it to only include a subset of - redshifts. Useful in paring a loaded tree. - """ - # N.B. Does not delete info from self.relationships to save space - # just removes it from redshift dict for indexing - for redshift in self.redshifts.values(): - if redshift <= zrange[0] and redshift >= zrange[1]: - # some reverse lookup magic--assumes unique cycle/z pairs - cycle = [key for key,value in self.redshifts.items() \ - if value == redshift][0] - del self.redshifts[cycle] - - def save_tree(self, filename): - cPickle.dump((self.redshifts, self.relationships), - open(filename, "wb")) - - def load_tree(self, filename): - self.redshifts, self.relationships = \ - cPickle.load(open(filename, "rb")) - - def clear_data(self): - r"""Deletes previous merger tree, but keeps parentage - relationships. - """ - del self.levels - - def find_outputs(self, zrange, cycle_range, output): - self.numbers = [] - if self.external_FOF: - filenames = "%s/groups_*.dat" % (self.FOF_directory) - files = glob.glob(filenames) - else: - filenames = "%s/groups_*.txt" % (self.FOF_directory) - files = glob.glob(filenames) - # If using redshift range, load redshifts only - for f in files: - num = int(f[-9:-4]) - if zrange is not None: - HC = HaloCatalog(num, external_FOF=self.external_FOF, \ - FOF_directory=self.FOF_directory) - # Allow for some epsilon - diff1 = (HC.redshift - zrange[0]) / zrange[0] - diff2 = (HC.redshift - zrange[1]) / zrange[1] - if diff1 >= -1e-3 and diff2 <= 1e-3: - self.numbers.append(num) - del HC - elif cycle_range is not None: - if num >= cycle_range[0] and num <= cycle_range[1]: - self.numbers.append(num) - else: - self.numbers.append(num) - self.numbers.sort() - - def run_merger_tree(self, output): - # Run merger tree for all outputs, starting with the last output - for i in range(len(self.numbers)-1, 0, -1): - if output: - output = "%s/tree-%5.5d-%5.5d" % \ - (self.FOF_directory, self.numbers[i], self.numbers[i-1]) - else: - output = None - z0, z1, fr = find_halo_relationships(self.numbers[i], \ - self.numbers[i-1], \ - output_basename=output, \ - external_FOF=self.external_FOF, - FOF_directory=self.FOF_directory) - self.relationships[self.numbers[i]] = fr - self.redshifts[self.numbers[i]] = z0 - # Fill in last redshift - self.redshifts[self.numbers[0]] = z1 - - def build_tree(self, halonum, min_particles=0, max_children=1e20): - r"""Builds a merger tree, starting at the last output. - - Parameters - ---------- - halonum : int - Halo number in the last output to analyze. - min_particles : int, optional - Minimum number of particles of halos in tree. - max_children : int, optional - Maximum number of child halos each leaf can have. - """ - self.halonum = halonum - self.max_children = max_children - self.output_numbers = sorted(self.relationships, reverse=True) - self.levels = {} - trunk = self.output_numbers[0] - self.levels[trunk] = [EnzoFOFMergerBranch(self, trunk, halonum, - max_children)] - self.generate_tree(min_particles, max_children) - - def filter_small_halos(self, lvl, min_particles): - # Filter out children with less than min_particles - for h in self.levels[lvl]: - fil = [] - for c in h.children: - if c[2] > min_particles: # c[2] = npart - fil.append(c) - h.children = fil - - def generate_tree(self, min_particles, max_children): - self.filter_small_halos(self.output_numbers[0], min_particles) - for i in range(1,len(self.output_numbers)): - prev = self.output_numbers[i-1] - this = self.output_numbers[i] - self.levels[this] = [] - this_halos = [] # To check for duplicates - for h in self.levels[prev]: - for c in h.children: - if c[0] in this_halos: continue - if self.relationships[this] == {}: continue - branch = EnzoFOFMergerBranch(self, this, c[0], - max_children) - self.levels[this].append(branch) - this_halos.append(c[0]) - self.filter_small_halos(this, min_particles) - - def get_massive_progenitors(self, halonum, min_relation=0.25): - r"""Returns a list of the most massive progenitor halos. - - This routine walks down the tree, following the most massive - progenitor on each node. - - Parameters - ---------- - halonum : int - Halo number at the last output to trace. - - Returns - ------- - output : dict - Dictionary of redshifts, cycle numbers, and halo numbers - of the most massive progenitor. keys = {redshift, cycle, - halonum} - """ - output = {"redshift": [], "cycle": [], "halonum": []} - # First (lowest redshift) node in tree - halo0 = halonum - for cycle in sorted(self.numbers, reverse=True): - if cycle not in self.relationships: break - if halo0 not in self.relationships[cycle]: break - node = self.relationships[cycle][halo0] - output["redshift"].append(self.redshifts[cycle]) - output["cycle"].append(cycle) - output["halonum"].append(halo0) - # Find progenitor - max_rel = 0.0 - for k,v in node.items(): - if not str(k).isdigit(): continue - if v[1] > max_rel and v[1] > min_relation: - halo0 = k - max_rel = v[1] - return output - - def print_tree(self): - r"""Prints the merger tree to stdout. - """ - for lvl in sorted(self.levels, reverse=True): - if lvl not in self.redshifts: continue - print("========== Cycle %5.5d (z=%f) ==========" % \ - (lvl, self.redshifts[lvl])) - for br in self.levels[lvl]: - print("Parent halo = %d" % br.halo_id) - print("--> Most massive progenitor == Halo %d" % \ - (br.progenitor)) - for i,c in enumerate(br.children): - if i > self.max_children: break - print("--> Halo %8.8d :: fraction = %g" % (c[0], c[1])) - - def save_halo_evolution(self, filename): - """ - Saves as an HDF5 file the relevant details about a halo - over the course of its evolution following the most massive - progenitor to have given it the bulk of its particles. - It stores info from the FOF_groups file: location, mass, id, etc. - """ - f = h5py.File("%s/%s" % (self.FOF_directory, filename), 'a') - cycle_fin = sorted(list(self.redshifts.keys()))[-1] - halo_id = self.levels[cycle_fin][0].halo_id - halo = "halo%05d" % halo_id - if halo in f: - del f["halo%05d" % halo_id] - g = f.create_group("halo%05d" % halo_id) - size = len(self.redshifts) - cycle = np.zeros(size) - redshift = np.zeros(size) - halo_id = np.zeros(size) - fraction = np.zeros(size) - mass = np.zeros(size) - densest_point = np.zeros((3,size)) - COM = np.zeros((6,size)) - fraction[0] = 1. - - for i, lvl in enumerate(sorted(self.levels, reverse=True)): - if len(self.levels[lvl]) == 0: # lineage for this halo ends - cycle = cycle[:i] # so truncate arrays, and break - redshift = redshift[:i] # Not big enough. - halo_id = halo_id[:i] - fraction = fraction[:i] - mass = mass[:i] - densest_point = densest_point[:,:i] - COM = COM[:,:i] - break - if lvl not in self.redshifts: continue - mylog.info("========== Cycle %5.5d (z=%f) ==========" % \ - (lvl, self.redshifts[lvl])) - cycle[i] = lvl - redshift[i] = self.redshifts[lvl] - - br = self.levels[lvl][0] - mylog.info("Parent halo = %d" % br.halo_id) - mylog.info("-> Most massive progenitor == Halo %d" % (br.progenitor)) - halo_id[i] = br.halo_id - - if len(br.children) == 0: # lineage for this halo ends - cycle = cycle[:i+1] # (no children) - redshift = redshift[:i+1] # so truncate arrays, and break - halo_id = halo_id[:i+1] - fraction = fraction[:i+1] - mass = mass[:i+1] - densest_point = densest_point[:,:i+1] - COM = COM[:,:i+1] - break - - if i < size-1: - fraction[i+1] = br.children[0][1] - - # open up FOF file to parse for details - filename = "%s/groups_%05d.txt" % (self.FOF_directory, lvl) - mass[i], densest_point[:,i], COM[:,i] = \ - grab_FOF_halo_info_internal(filename, br.halo_id) - - # save the arrays in the hdf5 file - g.create_dataset("cycle", data=cycle) - g.create_dataset("redshift", data=redshift) - g.create_dataset("halo_id", data=halo_id) - g.create_dataset("fraction", data=fraction) - g.create_dataset("mass", data=mass) - g.create_dataset("densest_point", data=densest_point) - g.create_dataset("COM", data=COM) - f.close() - - def write_dot(self, filename=None): - r"""Writes merger tree to a GraphViz or image file. - - Parameters - ---------- - filename : str, optional - Filename to write the GraphViz file. Default will be - tree_halo%05i.gv, which is a text file in the GraphViz format. - If filename is an image (e.g. "MergerTree.png") the output will - be in the appropriate image format made by calling GraphViz - automatically. See GraphViz (e.g. "dot -v") - for a list of available output formats. - """ - if filename is None: - filename = "%s/tree_halo%5.5d.gv" % \ - (self.FOF_directory, self.halonum) - # Create the pydot graph object. - self.graph = pydot.Dot('galaxy', graph_type='digraph') - self.halo_shape = "rect" - self.z_shape = "plaintext" - # Subgraphs to align levels - self.subgs = {} - for num in self.numbers: - self.subgs[num] = pydot.Subgraph('', rank = 'same') - self.graph.add_subgraph(self.subgs[num]) - sorted_lvl = sorted(self.levels, reverse=True) - for ii,lvl in enumerate(sorted_lvl): - # Since we get the cycle number from the key, it won't - # exist for the last level, i.e. children of last level. - # Get it from self.numbers. - if ii < len(sorted_lvl)-1: - next_lvl = sorted_lvl[ii+1] - else: - next_lvl = self.numbers[0] - for br in self.levels[lvl]: - for c in br.children: - color = "red" if c[0] == br.progenitor else "black" - self.graph.add_edge(pydot.Edge("C%d_H%d" %(lvl, br.halo_id), - "C%d_H%d" % (next_lvl, c[0]), color=color)) - #line = " C%d_H%d -> C%d_H%d [color=%s];\n" % \ - # (lvl, br.halo_id, next_lvl, c[0], color) - - #fp.write(line) - for ii,lvl in enumerate(sorted_lvl): - npart_max = 0 - for br in self.levels[lvl]: - if br.npart > npart_max: npart_max = br.npart - for br in self.levels[lvl]: - halo_str = "C%d_H%d" % (lvl, br.halo_id) - style = "filled" if br.npart == npart_max else "solid" - self.graph.add_node(pydot.Node(halo_str, - label = "Halo %d\\n%d particles" % (br.halo_id, br.npart), - style = style, shape = self.halo_shape)) - # Add this node to the correct level subgraph. - self.subgs[lvl].add_node(pydot.Node(halo_str)) - for lvl in self.numbers: - # Don't add the z if there are no halos already in the subgraph. - if len(self.subgs[lvl].get_node_list()) == 0: continue - self.subgs[lvl].add_node(pydot.Node("%1.5e" % self.redshifts[lvl], - shape = self.z_shape, label = "z=%0.3f" % self.redshifts[lvl])) - # Based on the suffix of the file name, write out the result to a file. - suffix = filename.split(".")[-1] - if suffix == "gv": suffix = "raw" - mylog.info("Writing %s format %s to disk." % (suffix, filename)) - self.graph.write("%s" % filename, format=suffix) - -def find_halo_relationships(output1_id, output2_id, output_basename = None, - radius = 0.10, external_FOF=True, - FOF_directory='FOF'): - r"""Calculate the parentage and child relationships between two EnzoFOF - halo catalogs. - - This function performs a very simple merger tree calculation between two - sets of halos. For every halo in the second halo catalog, it looks to the - first halo catalog to find the parents by looking at particle IDs. The - particle IDs from the child halos are identified in potential parents, and - then both percent-of-parent and percent-to-child values are recorded. - - Note that this works with catalogs constructed by Enzo's FOF halo - when used in external_FOF=True mode, whereas it will work with - catalogs constructed by yt using external_FOF=False mode. - - Parameters - ---------- - output1_id : int - This is the integer output id of the (first) halo catalog to parse and - load. - output2_id : int - This is the integer output id of the (second) halo catalog to parse and - load. - output_basename : string - If provided, both .cpkl and .txt files containing the parentage - relationships will be output. - radius : float, default to 0.10 - In absolute units, the radius to examine when guessing possible - parent/child relationships. If this value is too small, you will miss - possible relationships. - FOF_directory : str, optional - Directory where FOF files are located - - Returns - ------- - pfrac : dict - This is a dict of dicts. The first key is the parent halo id, the - second is the child halo id. The values are the percent contributed - from parent to child and the percent of a child that came from the - parent. - """ - mylog.info("Parsing Halo Catalog %04i", output1_id) - HC1 = HaloCatalog(output1_id, False, external_FOF=external_FOF, \ - FOF_directory=FOF_directory) - mylog.info("Parsing Halo Catalog %04i", output2_id) - HC2 = HaloCatalog(output2_id, True, external_FOF=external_FOF, \ - FOF_directory=FOF_directory) - mylog.info("Calculating fractions") - pfrac = HC1.calculate_parentage_fractions(HC2) - - if output_basename is not None and pfrac != {}: - f = open("%s.txt" % (output_basename), "w") - for hid1 in sorted(pfrac): - for hid2 in sorted(pfrac[hid1]): - if not str(hid2).isdigit(): continue - p1, p2, npart = pfrac[hid1][hid2] - if p1 == 0.0: continue - f.write( "Halo %s (%s) contributed %0.3e of its particles to %s (%s), which makes up %0.3e of that halo\n" % ( - hid1, output1_id, p2, hid2, output2_id, p1)) - f.close() - - cPickle.dump(pfrac, open("%s.cpkl" % (output_basename), "wb")) - - return HC1.redshift, HC2.redshift, pfrac - -def grab_FOF_halo_info_internal(filename, halo_id): - """ - Finds a specific halo's information in the FOF group output information - and pass relevant parameters to caller. - """ - # open up FOF file to parse for details - groups_file = open(filename, 'r') - for line in groups_file: - if line.startswith("#"): continue - if int(line.split()[0]) == halo_id: - ar = np.array(line.split()).astype('float64') - return ar[1], ar[4:7], ar[7:13] # mass, xyz_dens, xyzvxvyvz_COM - -def plot_halo_evolution(filename, halo_id, x_quantity='cycle', y_quantity='mass', - x_log=False, y_log=True, FOF_directory='FOF'): - """ - Once you have generated a file using the - EnzoFOFMergerTree.save_halo_evolution function, this is a simple way of - plotting the evolution in the quantities of that halo over its lifetime. - - Parameters - ---------- - filename : str - The filename to which you saved the hdf5 data from save_halo_evolution - halo_id : int - The halo in 'filename' that you want to follow - x_quantity : str, optional - The quantity that you want to plot as the x_coord. - Valid options are: - - * cycle - * mass - * fraction - * halo_id - * redshift - * dense_x - * dense_y - * dense_z - * COM_x - * COM_y - * COM_z - * COM_vx - * COM_vy - * COM_vz - - y_quantity : str, optional - The quantity that you want to plot as the y_coord. - x_log : bool, optional - Do you want the x-axis to be in log or linear? - y_log : bool, optional - Do you want the y-axis to be in log or linear? - FOF_directory : str, optional - Directory where FOF files (and hdf file) are located - - Examples - -------- - - >>> # generates mass history plots for the 20 most massive halos at t_fin. - >>> ts = DatasetSeries.from_filenames("DD????/DD????") - >>> # long step--must run FOF on each DD, but saves outputs for later use - >>> for ds in ts: - ... halo_list = FOFHaloFinder(ds) - ... i = int(ds.basename[2:]) - ... halo_list.write_out("FOF/groups_%05i.txt" % i) - ... halo_list.write_particle_lists("FOF/particles_%05i" % i) - ... - >>> mt = EnzoFOFMergerTree(external_FOF=False) - >>> for i in range(20): - ... mt.build_tree(i) - ... mt.save_halo_evolution('halos.h5') - ... - >>> for i in range(20): - ... plot_halo_evolution('halos.h5', i) - """ - import matplotlib.pyplot as plt - f = h5py.File("%s/%s" % (FOF_directory, filename), 'r') - basename = os.path.splitext(filename)[0] - halo = "halo%05d" % halo_id - basename = basename + "_" + halo - g = f[halo] - values = list(g) - index_dict = {'x' : 0, 'y' : 1, 'z' : 2, 'vx' : 3, 'vy' : 4, 'vz' : 5} - coords = {} - fields = {} - for i, quantity in enumerate((x_quantity, y_quantity)): - field = quantity - if quantity.startswith('COM'): - index = index_dict[quantity.split('_')[-1]] - quantity = ('COM') - if quantity.startswith('dense'): - index = index_dict[quantity.split('_')[-1]] - quantity = ('densest_point') - if quantity not in values: - exit('%s not in list of values in %s for halo %d' % \ - (quantity, filename, halo_id)) - if not field == quantity: - coords[i] = g[quantity][index,:] - else: - coords[i] = g[quantity] - if len(coords[i]) == 1: - # ("Only 1 value for Halo %d. Ignoring." % halo_id) - return - fields[i] = field - - ax = plt.axes() - ax.plot(coords[0], coords[1]) - ax.set_title(basename) - ax.set_xlabel(fields[0]) - ax.set_ylabel(fields[1]) - if x_log: - ax.set_xscale("log") - if y_log: - ax.set_yscale("log") - ofn = "%s/%s_%s_%s.png" % (FOF_directory, basename, fields[0], fields[1]) - plt.savefig(ofn) - plt.clf() diff --git a/yt/analysis_modules/halo_analysis/halo_callbacks.py b/yt/analysis_modules/halo_analysis/halo_callbacks.py deleted file mode 100644 index 3c828724385..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_callbacks.py +++ /dev/null @@ -1,588 +0,0 @@ -""" -Halo callback object - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np -import os - -from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \ - periodic_distance -from yt.data_objects.profiles import \ - create_profile -from yt.frontends.ytdata.utilities import \ - _hdf5_yt_array, \ - _yt_array_hdf5 -from yt.units.yt_array import \ - YTArray -from yt.utilities.exceptions import \ - YTSphereTooSmall -from yt.funcs import \ - ensure_list -from yt.utilities.logger import ytLogger as mylog -from yt.utilities.operator_registry import \ - OperatorRegistry -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_root_only -from yt.visualization.profile_plotter import \ - PhasePlot - -callback_registry = OperatorRegistry() - -def add_callback(name, function): - callback_registry[name] = HaloCallback(function) - -class HaloCallback(object): - r""" - A HaloCallback is a function that minimally takes in a Halo object - and performs some analysis on it. This function may attach attributes - to the Halo object, write out data, etc, but does not return anything. - """ - def __init__(self, function, args=None, kwargs=None): - self.function = function - self.args = args - if self.args is None: self.args = [] - self.kwargs = kwargs - if self.kwargs is None: self.kwargs = {} - - def __call__(self, halo): - self.function(halo, *self.args, **self.kwargs) - return True - -def halo_sphere(halo, radius_field="virial_radius", factor=1.0, - field_parameters=None): - r""" - Create a sphere data container to associate with a halo. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - radius_field : string - Field to be retrieved from the quantities dictionary as - the basis of the halo radius. - Default: "virial_radius". - factor : float - Factor to be multiplied by the base radius for defining - the radius of the sphere. - Default: 1.0. - field_parameters : dict - Dictionary of field parameters to be set with the sphere - created. - - """ - - dds = halo.halo_catalog.data_ds - center = dds.arr([halo.quantities["particle_position_%s" % axis] \ - for axis in "xyz"]) - radius = factor * halo.quantities[radius_field] - if radius <= 0.0: - halo.data_object = None - return - try: - sphere = dds.sphere(center, radius) - except YTSphereTooSmall: - halo.data_object = None - return - if field_parameters is not None: - for field, par in field_parameters.items(): - if isinstance(par, tuple) and par[0] == "quantity": - value = halo.quantities[par[1]] - else: - value = par - sphere.set_field_parameter(field, value) - halo.data_object = sphere - -add_callback("sphere", halo_sphere) - -def sphere_field_max_recenter(halo, field): - r""" - Recenter the halo sphere on the location of the maximum of the given field. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - field : string - Field to be used for recentering. - - """ - - if halo.data_object is None: return - s_ds = halo.data_object.ds - old_sphere = halo.data_object - max_vals = old_sphere.quantities.max_location(field) - new_center = s_ds.arr(max_vals[1:]) - new_sphere = s_ds.sphere(new_center.in_units("code_length"), - old_sphere.radius.in_units("code_length")) - mylog.info("Moving sphere center from %s to %s." % (old_sphere.center, - new_sphere.center)) - for par, value in old_sphere.field_parameters.items(): - if par not in new_sphere.field_parameters: - new_sphere.set_field_parameter(par, value) - halo.data_object = new_sphere - -add_callback("sphere_field_max_recenter", sphere_field_max_recenter) - -def sphere_bulk_velocity(halo): - r""" - Set the bulk velocity for the sphere. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - - """ - - halo.data_object.set_field_parameter("bulk_velocity", - halo.data_object.quantities.bulk_velocity(use_particles=True)) - -add_callback("sphere_bulk_velocity", sphere_bulk_velocity) - -def profile(halo, bin_fields, profile_fields, n_bins=32, extrema=None, logs=None, units=None, - weight_field="cell_mass", accumulation=False, fractional=False, - storage="profiles", output_dir="."): - r""" - Create 1, 2, or 3D profiles of a halo. - - Store profile data in a dictionary associated with the halo object. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - bin_fields : list of strings - The binning fields for the profile. - profile_fields : string or list of strings - The fields to be profiled. - n_bins : int or list of ints - The number of bins in each dimension. If None, 32 bins for - each bin are used for each bin field. - Default: 32. - extrema : dict of min, max tuples - Minimum and maximum values of the bin_fields for the profiles. - The keys correspond to the field names. Defaults to the extrema - of the bin_fields of the dataset. If a units dict is provided, extrema - are understood to be in the units specified in the dictionary. - logs : dict of boolean values - Whether or not to log the bin_fields for the profiles. - The keys correspond to the field names. Defaults to the take_log - attribute of the field. - units : dict of strings - The units of the fields in the profiles, including the bin_fields. - weight_field : string - Weight field for profiling. - Default : "cell_mass" - accumulation : bool or list of bools - If True, the profile values for a bin n are the cumulative sum of - all the values from bin 0 to n. If -True, the sum is reversed so - that the value for bin n is the cumulative sum from bin N (total bins) - to n. If the profile is 2D or 3D, a list of values can be given to - control the summation in each dimension independently. - Default: False. - fractional : If True the profile values are divided by the sum of all - the profile data such that the profile represents a probability - distribution function. - storage : string - Name of the dictionary to store profiles. - Default: "profiles" - output_dir : string - Name of directory where profile data will be written. The full path will be - the output_dir of the halo catalog concatenated with this directory. - Default : "." - - """ - - mylog.info("Calculating 1D profile for halo %d." % - halo.quantities["particle_identifier"]) - - dds = halo.halo_catalog.data_ds - - if dds is None: - raise RuntimeError("Profile callback requires a data ds.") - - if not hasattr(halo, "data_object"): - raise RuntimeError("Profile callback requires a data container.") - - if halo.data_object is None: - mylog.info("Skipping halo %d since data_object is None." % - halo.quantities["particle_identifier"]) - return - - if output_dir is None: - output_dir = storage - output_dir = os.path.join(halo.halo_catalog.output_dir, output_dir) - - bin_fields = ensure_list(bin_fields) - my_profile = create_profile(halo.data_object, bin_fields, profile_fields, n_bins=n_bins, - extrema=extrema, logs=logs, units=units, weight_field=weight_field, - accumulation=accumulation, fractional=fractional) - - prof_store = dict([(field, my_profile[field]) \ - for field in my_profile.field_data]) - prof_store[my_profile.x_field] = my_profile.x - if len(bin_fields) > 1: - prof_store[my_profile.y_field] = my_profile.y - if len(bin_fields) > 2: - prof_store[my_profile.z_field] = my_profile.z - if hasattr(halo, storage): - halo_store = getattr(halo, storage) - if "used" in halo_store: - halo_store["used"] &= my_profile.used - else: - halo_store = {"used": my_profile.used} - setattr(halo, storage, halo_store) - halo_store.update(prof_store) - - if my_profile.standard_deviation is not None: - variance_store = dict([(field, my_profile.standard_deviation[field]) \ - for field in my_profile.standard_deviation]) - variance_storage = "%s_variance" % storage - if hasattr(halo, variance_storage): - halo_variance_store = getattr(halo, variance_storage) - else: - halo_variance_store = {} - setattr(halo, variance_storage, halo_variance_store) - halo_variance_store.update(variance_store) - -add_callback("profile", profile) - -@parallel_root_only -def save_profiles(halo, storage="profiles", filename=None, - output_dir="."): - r""" - Save profile data to disk. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - storage : string - Name of the dictionary attribute containing the profile data to be written. - Default: "profiles" - filename : string - The name of the file to be written. The final filename will be - "_.h5". If None, filename is set to the value given - by the storage keyword. - Default: None - output_dir : string - Name of directory where profile data will be written. The full path will be - the output_dir of the halo catalog concatenated with this directory. - Default : "." - - """ - - if not hasattr(halo, storage): - return - - if filename is None: - filename = storage - output_file = os.path.join(halo.halo_catalog.output_dir, output_dir, - "%s_%06d.h5" % (filename, - halo.quantities["particle_identifier"])) - mylog.info("Saving halo %d profile data to %s." % - (halo.quantities["particle_identifier"], output_file)) - - fh = h5py.File(output_file, mode="w") - my_profile = getattr(halo, storage) - profile_group = fh.create_group("profiles") - for field in my_profile: - # Don't write code units because we might not know those later. - if isinstance(my_profile[field], YTArray): - my_profile[field].convert_to_cgs() - _yt_array_hdf5(profile_group, str(field), my_profile[field]) - variance_storage = "%s_variance" % storage - if hasattr(halo, variance_storage): - my_profile = getattr(halo, variance_storage) - variance_group = fh.create_group("variance") - for field in my_profile: - # Don't write code units because we might not know those later. - if isinstance(my_profile[field], YTArray): - my_profile[field].convert_to_cgs() - _yt_array_hdf5(variance_group, str(field), my_profile[field]) - fh.close() - -add_callback("save_profiles", save_profiles) - -def load_profiles(halo, storage="profiles", fields=None, - filename=None, output_dir="."): - r""" - Load profile data from disk. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - storage : string - Name of the dictionary attribute to store profile data. - Default: "profiles" - fields : string or list of strings - The fields to be loaded. If None, all fields present will be loaded. - Default : None - filename : string - The name of the file to be loaded. The final filename will be - "_.h5". If None, filename is set to the value given - by the storage keyword. - Default: None - output_dir : string - Name of directory where profile data will be read. The full path will be - the output_dir of the halo catalog concatenated with this directory. - Default : "." - - """ - - if filename is None: - filename = storage - output_file = os.path.join(halo.halo_catalog.output_dir, output_dir, - "%s_%06d.h5" % (filename, - halo.quantities["particle_identifier"])) - if not os.path.exists(output_file): - raise RuntimeError("Profile file not found: %s." % output_file) - mylog.info("Loading halo %d profile data from %s." % - (halo.quantities["particle_identifier"], output_file)) - - fh = h5py.File(output_file, mode="r") - if fields is None: - profile_fields = fh["profiles"].keys() - else: - profile_fields = fields - my_profile = {} - my_group = fh["profiles"] - for field in profile_fields: - if field not in my_group: - raise RuntimeError("%s field not present in %s." % (field, output_file)) - my_profile[field] = _hdf5_yt_array(my_group, field, - ds=halo.halo_catalog.halos_ds) - setattr(halo, storage, my_profile) - - if "variance" in fh: - my_variance = {} - my_group = fh["variance"] - if fields is None: - profile_fields = my_group.keys() - for field in profile_fields: - if field not in my_group: - raise RuntimeError("%s field not present in %s." % (field, output_file)) - my_variance[field] = _hdf5_yt_array(my_group, field, - ds=halo.halo_catalog.halos_ds) - setattr(halo, "%s_variance" % storage, my_variance) - - fh.close() - -add_callback("load_profiles", load_profiles) - -def virial_quantities(halo, fields, - overdensity_field=("gas", "overdensity"), - critical_overdensity=200, - profile_storage="profiles"): - r""" - Calculate the value of the given fields at the virial radius defined at - the given critical density by interpolating from radial profiles. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - fields : string or list of strings - The fields whose virial values are to be calculated. - overdensity_field : string or tuple of strings - The field used as the overdensity from which interpolation is done to - calculate virial quantities. - Default: ("gas", "overdensity") - critical_overdensity : float - The value of the overdensity at which to evaluate the virial quantities. - Overdensity is with respect to the critical density. - Default: 200 - profile_storage : string - Name of the halo attribute that holds the profiles to be used. - Default: "profiles" - - """ - - mylog.info("Calculating virial quantities for halo %d." % - halo.quantities["particle_identifier"]) - - fields = ensure_list(fields) - fields = [halo.data_object._determine_fields(field)[0] - for field in fields] - - dds = halo.halo_catalog.data_ds - profile_data = getattr(halo, profile_storage) - - if overdensity_field not in profile_data: - raise RuntimeError("virial_quantities callback requires profile of %s." % - str(overdensity_field)) - - overdensity = profile_data[overdensity_field] - dfilter = np.isfinite(overdensity) & profile_data["used"] & (overdensity > 0) - - v_fields = {} - for field in fields: - if isinstance(field, tuple): - my_field = field[-1] - else: - my_field = field - v_fields[field] = my_field - v_field = "%s_%d" % (my_field, critical_overdensity) - if v_field not in halo.halo_catalog.quantities: - halo.halo_catalog.quantities.append(v_field) - vquantities = dict([("%s_%d" % (v_fields[field], critical_overdensity), - dds.quan(0, profile_data[field].units)) \ - for field in fields]) - - if dfilter.sum() < 2: - halo.quantities.update(vquantities) - return - - # find interpolation index - # require a negative slope, but not monotonicity - vod = overdensity[dfilter].to_ndarray() - if (vod > critical_overdensity).all(): - if vod[-1] < vod[-2]: - index = -2 - else: - halo.quantities.update(vquantities) - return - elif (vod < critical_overdensity).all(): - if vod[0] > vod[1]: - index = 0 - else: - halo.quantities.update(vquantities) - return - else: - # take first instance of downward intersection with critical value - intersections = (vod[:-1] >= critical_overdensity) & \ - (vod[1:] < critical_overdensity) - if not intersections.any(): - halo.quantities.update(vquantities) - return - index = np.where(intersections)[0][0] - - for field in fields: - v_prof = profile_data[field][dfilter].to_ndarray() - slope = np.log(v_prof[index + 1] / v_prof[index]) / \ - np.log(vod[index + 1] / vod[index]) - value = dds.quan(np.exp(slope * np.log(critical_overdensity / - vod[index])) * v_prof[index], - profile_data[field].units).in_cgs() - vquantities["%s_%d" % (v_fields[field], critical_overdensity)] = value - - halo.quantities.update(vquantities) - -add_callback("virial_quantities", virial_quantities) - -def phase_plot(halo, output_dir=".", phase_args=None, phase_kwargs=None): - r""" - Make a phase plot for the halo object. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - output_dir : string - Name of directory where profile data will be written. The full path will be - the output_dir of the halo catalog concatenated with this directory. - Default : "." - phase_args : list - List of arguments to be given to PhasePlot. - phase_kwargs : dict - Dictionary of keyword arguments to be given to PhasePlot. - - """ - - if phase_args is None: - phase_args = [] - if phase_kwargs is None: - phase_kwargs = {} - - try: - plot = PhasePlot(halo.data_object, *phase_args, **phase_kwargs) - plot.save(os.path.join(halo.halo_catalog.output_dir, output_dir, - "halo_%06d" % halo.quantities["particle_identifier"])) - except ValueError: - return - -add_callback("phase_plot", phase_plot) - -def delete_attribute(halo, attribute): - r""" - Delete attribute from halo object. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - attribute : string - The attribute to be deleted. - - """ - - if hasattr(halo, attribute): - delattr(halo, attribute) - -add_callback("delete_attribute", delete_attribute) - -def iterative_center_of_mass(halo, radius_field="virial_radius", inner_ratio=0.1, step_ratio=0.9, - units="pc"): - r""" - Adjust halo position by iteratively recalculating the center of mass while - decreasing the radius. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - radius_field : string - The halo quantity to be used as the radius for the sphere. - Default: "virial_radius" - inner_ratio : float - The ratio of the smallest sphere radius used for calculating the center of - mass to the initial radius. The sphere radius is reduced and center of mass - recalculated until the sphere has reached this size. - Default: 0.1 - step_ratio : float - The multiplicative factor used to reduce the radius of the sphere after the - center of mass is calculated. - Default: 0.9 - units : str - The units for printing out the distance between the initial and final centers. - Default : "pc" - - """ - if inner_ratio <= 0.0 or inner_ratio >= 1.0: - raise RuntimeError("iterative_center_of_mass: inner_ratio must be between 0 and 1.") - if step_ratio <= 0.0 or step_ratio >= 1.0: - raise RuntimeError("iterative_center_of_mass: step_ratio must be between 0 and 1.") - - center_orig = halo.halo_catalog.data_ds.arr([halo.quantities["particle_position_%s" % axis] - for axis in "xyz"]) - sphere = halo.halo_catalog.data_ds.sphere(center_orig, halo.quantities[radius_field]) - - while sphere.radius > inner_ratio * halo.quantities[radius_field]: - new_center = sphere.quantities.center_of_mass(use_gas=True, use_particles=True) - sphere = sphere.ds.sphere(new_center, step_ratio * sphere.radius) - - distance = periodic_distance(center_orig.in_units("code_length").to_ndarray(), - new_center.in_units("code_length").to_ndarray()) - distance = halo.halo_catalog.data_ds.quan(distance, "code_length") - mylog.info("Recentering halo %d %f %s away." % - (halo.quantities["particle_identifier"], - distance.in_units(units), units)) - - for i, axis in enumerate("xyz"): - halo.quantities["particle_position_%s" % axis] = sphere.center[i] - del sphere - -add_callback("iterative_center_of_mass", iterative_center_of_mass) diff --git a/yt/analysis_modules/halo_analysis/halo_catalog.py b/yt/analysis_modules/halo_analysis/halo_catalog.py deleted file mode 100644 index 0eb40d5d43c..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_catalog.py +++ /dev/null @@ -1,508 +0,0 @@ -""" -HaloCatalog object - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -import os - -from yt.frontends.ytdata.utilities import \ - save_as_dataset -from yt.funcs import \ - ensure_dir, \ - get_pbar, \ - mylog -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - ParallelAnalysisInterface, \ - parallel_blocking_call, \ - parallel_objects - -from .halo_object import \ - Halo -from .halo_callbacks import \ - callback_registry -from .halo_filters import \ - filter_registry -from .halo_finding_methods import \ - finding_method_registry -from .halo_quantities import \ - quantity_registry -from .halo_recipes import \ - recipe_registry - -class HaloCatalog(ParallelAnalysisInterface): - r"""Create a HaloCatalog: an object that allows for the creation and association - of data with a set of halo objects. - - A HaloCatalog object pairs a simulation dataset and the output from a halo finder, - allowing the user to perform analysis on each of the halos found by the halo finder. - Analysis is performed by providing callbacks: functions that accept a Halo object - and perform independent analysis, return a quantity to be associated with the halo, - or return True or False whether a halo meets various criteria. The resulting set of - quantities associated with each halo is then written out to disk at a "halo catalog." - This halo catalog can then be loaded in with yt as any other simulation dataset. - - Parameters - ---------- - halos_ds : str - Dataset created by a halo finder. If None, a halo finder should be - provided with the finder_method keyword. - data_ds : str - Dataset created by a simulation. - data_source : data container - Data container associated with either the halos_ds or the data_ds. - finder_method : str - Halo finder to be used if no halos_ds is given. - output_dir : str - The top level directory into which analysis output will be written. - Default: "." - finder_kwargs : dict - Arguments to pass to the halo finder if finder_method is given. - - Examples - -------- - - >>> # create profiles or overdensity vs. radius for each halo and save to disk - >>> import yt - >>> from yt.analysis_modules.halo_analysis.api import * - >>> data_ds = yt.load("DD0064/DD0064") - >>> halos_ds = yt.load("rockstar_halos/halos_64.0.bin", - ... output_dir="halo_catalogs/catalog_0064") - >>> hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds) - >>> # filter out halos with mass < 1e13 Msun - >>> hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun") - >>> # create a sphere object with radius of 2 times the virial_radius field - >>> hc.add_callback("sphere", factor=2.0, radius_field="virial_radius") - >>> # make radial profiles - >>> hc.add_callback("profile", "radius", [("gas", "overdensity")], - ... weight_field="cell_volume", accumulation=True) - >>> # save the profiles to disk - >>> hc.add_callback("save_profiles", output_dir="profiles") - >>> # create the catalog - >>> hc.create() - - >>> # load in the saved halo catalog and all the profile data - >>> halos_ds = yt.load("halo_catalogs/catalog_0064/catalog_0064.0.h5") - >>> hc = HaloCatalog(halos_ds=halos_ds, - output_dir="halo_catalogs/catalog_0064") - >>> hc.add_callback("load_profiles", output_dir="profiles") - >>> hc.load() - - See Also - -------- - add_callback, add_filter, add_quantity, add_recipe - - """ - - def __init__(self, halos_ds=None, data_ds=None, - data_source=None, finder_method=None, - finder_kwargs=None, - output_dir="halo_catalogs/catalog"): - ParallelAnalysisInterface.__init__(self) - self.halos_ds = halos_ds - self.data_ds = data_ds - self.output_dir = ensure_dir(output_dir) - if os.path.basename(self.output_dir) != ".": - self.output_prefix = os.path.basename(self.output_dir) - else: - self.output_prefix = "catalog" - - if halos_ds is None: - if data_ds is None: - raise RuntimeError("Must specify a halos_ds, data_ds, or both.") - if finder_method is None: - raise RuntimeError("Must specify a halos_ds or a finder_method.") - - if data_source is None: - if halos_ds is not None: - halos_ds.index - data_source = halos_ds.all_data() - else: - data_source = data_ds.all_data() - self.data_source = data_source - - self.finder_method_name = finder_method - if finder_kwargs is None: - finder_kwargs = {} - if finder_method is not None: - finder_method = finding_method_registry.find(finder_method, - **finder_kwargs) - self.finder_method = finder_method - - # all of the analysis actions to be performed: callbacks, filters, and quantities - self.actions = [] - # fields to be written to the halo catalog - self.quantities = [] - if self.halos_ds is not None: - self.add_default_quantities() - - def add_callback(self, callback, *args, **kwargs): - r""" - Add a callback to the halo catalog action list. - - A callback is a function that accepts and operates on a Halo object and - does not return anything. Callbacks must exist within the callback_registry. - Give additional args and kwargs to be passed to the callback here. - - Parameters - ---------- - callback : string - The name of the callback. - - Examples - -------- - - >>> # Here, a callback is defined and added to the registry. - >>> def _say_something(halo, message): - ... my_id = halo.quantities['particle_identifier'] - ... print "Halo %d: here is a message - %s." % (my_id, message) - >>> add_callback("hello_world", _say_something) - - >>> # Now this callback is accessible to the HaloCatalog object - >>> hc.add_callback("hello_world", "this is my message") - - """ - callback = callback_registry.find(callback, *args, **kwargs) - if "output_dir" in kwargs is not None: - ensure_dir(os.path.join(self.output_dir, kwargs["output_dir"])) - self.actions.append(("callback", callback)) - - def add_quantity(self, key, *args, **kwargs): - r""" - Add a quantity to the halo catalog action list. - - A quantity is a function that accepts a Halo object and return a value or - values. These values are stored in a "quantities" dictionary associated - with the Halo object. Quantities must exist within the quantity_registry. - Give additional args and kwargs to be passed to the quantity function here. - - Parameters - ---------- - key : string - The name of the callback. - field_type : string - If not None, the quantity is the value of the field provided by the - key parameter, taken from the halo finder dataset. This is the way - one pulls values for the halo from the halo dataset. - Default : None - - Examples - -------- - - >>> # pull the virial radius from the halo finder dataset - >>> hc.add_quantity("virial_radius", field_type="halos") - - >>> # define a custom quantity and add it to the register - >>> def _mass_squared(halo): - ... # assume some entry "particle_mass" exists in the quantities dict - ... return halo.quantities["particle_mass"]**2 - >>> add_quantity("mass_squared", _mass_squared) - - >>> # add it to the halo catalog action list - >>> hc.add_quantity("mass_squared") - - """ - if "field_type" in kwargs: - field_type = kwargs.pop("field_type") - else: - field_type = None - prepend = kwargs.pop("prepend",False) - if field_type is None: - quantity = quantity_registry.find(key, *args, **kwargs) - elif (field_type, key) in self.halos_ds.field_info: - quantity = (field_type, key) - else: - raise RuntimeError("HaloCatalog quantity must be a registered function or a field of a known type.") - self.quantities.append(key) - if prepend: - self.actions.insert(0, ("quantity", (key, quantity))) - else: - self.actions.append(("quantity", (key, quantity))) - - def add_filter(self, halo_filter, *args, **kwargs): - r""" - Add a filter to the halo catalog action list. - - A filter is a function that accepts a Halo object and returns either True - or False. If True, any additional actions added to the list are carried out - and the results are added to the final halo catalog. If False, any further - actions are skipped and the halo will be omitted from the final catalog. - Filters must exist within the filter_registry. Give additional args and kwargs - to be passed to the filter function here. - - Parameters - ---------- - halo_filter : string - The name of the filter. - - Examples - -------- - - >>> # define a filter and add it to the register. - >>> def _my_filter(halo, mass_value): - ... return halo.quantities["particle_mass"] > YTQuantity(mass_value, "Msun") - >>> # add it to the register - >>> add_filter("mass_filter", _my_filter) - - >>> # add the filter to the halo catalog actions - >>> hc.add_filter("mass_value", 1e12) - - """ - - halo_filter = filter_registry.find(halo_filter, *args, **kwargs) - self.actions.append(("filter", halo_filter)) - - def add_recipe(self, recipe, *args, **kwargs): - r""" - Add a recipe to the halo catalog action list. - - A recipe is an operation consisting of a series of callbacks, quantities, - and/or filters called in succession. Recipes can be used to store a more - complex series of analysis tasks as a single entity. - - Currently, the available recipe is ``calculate_virial_quantities``. - - Parameters - ---------- - - halo_recipe : string - The name of the recipe. - - Examples - -------- - - >>> import yt - >>> from yt.analysis_modules.halo_analysis.api import HaloCatalog - >>> - >>> data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006') - >>> halos_ds = yt.load('rockstar_halos/halos_0.0.bin') - >>> hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds) - >>> - >>> # Filter out less massive halos - >>> hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun") - >>> - >>> # Calculate virial radii - >>> hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"]) - >>> - >>> hc.create() - - """ - - halo_recipe = recipe_registry.find(recipe, *args, **kwargs) - halo_recipe(self) - - def create(self, save_halos=False, save_catalog=True, njobs=-1, dynamic=False): - r""" - Create the halo catalog given the callbacks, quantities, and filters that - have been provided. - - This is a wrapper around the main _run function with default arguments tuned - for halo catalog creation. By default, halo objects are not saved but the - halo catalog is written, opposite to the behavior of the load function. - - Parameters - ---------- - save_halos : bool - If True, a list of all Halo objects is retained under the "halo_list" - attribute. If False, only the compiles quantities are saved under the - "catalog" attribute. - Default: False - save_catalog : bool - If True, save the final catalog to disk. - Default: True - njobs : int - The number of jobs over which to divide halo analysis. Choose -1 - to allocate one processor per halo. - Default: -1 - dynamic : int - If False, halo analysis is divided evenly between all available processors. - If True, parallelism is performed via a task queue. - Default: False - - See Also - -------- - load - - """ - self._run(save_halos, save_catalog, njobs=njobs, dynamic=dynamic) - - def load(self, save_halos=True, save_catalog=False, njobs=-1, dynamic=False): - r""" - Load a previously created halo catalog. - - This is a wrapper around the main _run function with default arguments tuned - for reloading halo catalogs and associated data. By default, halo objects are - saved and the halo catalog is not written, opposite to the behavior of the - create function. - - Parameters - ---------- - save_halos : bool - If True, a list of all Halo objects is retained under the "halo_list" - attribute. If False, only the compiles quantities are saved under the - "catalog" attribute. - Default: True - save_catalog : bool - If True, save the final catalog to disk. - Default: False - njobs : int - The number of jobs over which to divide halo analysis. Choose -1 - to allocate one processor per halo. - Default: -1 - dynamic : int - If False, halo analysis is divided evenly between all available processors. - If True, parallelism is performed via a task queue. - Default: False - - See Also - -------- - create - - """ - self._run(save_halos, save_catalog, njobs=njobs, dynamic=dynamic) - - @parallel_blocking_call - def _run(self, save_halos, save_catalog, njobs=-1, dynamic=False): - r""" - Run the requested halo analysis. - - Parameters - ---------- - save_halos : bool - If True, a list of all Halo objects is retained under the "halo_list" - attribute. If False, only the compiles quantities are saved under the - "catalog" attribute. - save_catalog : bool - If True, save the final catalog to disk. - njobs : int - The number of jobs over which to divide halo analysis. Choose -1 - to allocate one processor per halo. - Default: -1 - dynamic : int - If False, halo analysis is divided evenly between all available processors. - If True, parallelism is performed via a task queue. - Default: False - - See Also - -------- - create, load - - """ - self.catalog = [] - if save_halos: self.halo_list = [] - - if self.halos_ds is None: - # Find the halos and make a dataset of them - self.halos_ds = self.finder_method(self.data_ds) - if self.halos_ds is None: - mylog.warning('No halos were found for {0}'.format(\ - self.data_ds.basename)) - if save_catalog: - self.halos_ds = self.data_ds - self.save_catalog() - self.halos_ds = None - return - self.halos_ds.index - - # Assign ds and data sources appropriately - self.data_source = self.halos_ds.all_data() - - # Add all of the default quantities that all halos must have - self.add_default_quantities('all') - - halo_index = np.argsort(self.data_source["all", "particle_identifier"]) - # If we have just run hop or fof, halos are already divided amongst processors. - if self.finder_method_name in ["hop", "fof"]: - my_index = halo_index - nhalos = self.comm.mpi_allreduce(halo_index.size, op="sum") - else: - my_index = parallel_objects(halo_index, njobs=njobs, dynamic=dynamic) - nhalos = halo_index.size - - my_i = 0 - my_n = self.comm.size - pbar = get_pbar("Creating catalog", nhalos, parallel=True) - for i in my_index: - my_i += min(my_n, nhalos - my_i) - new_halo = Halo(self) - halo_filter = True - for action_type, action in self.actions: - if action_type == "callback": - action(new_halo) - elif action_type == "filter": - halo_filter = action(new_halo) - if not halo_filter: - pbar.update(my_i) - break - elif action_type == "quantity": - key, quantity = action - if quantity in self.halos_ds.field_info: - new_halo.quantities[key] = \ - self.data_source[quantity][int(i)] - elif callable(quantity): - new_halo.quantities[key] = quantity(new_halo) - else: - raise RuntimeError( - "Action must be a callback, filter, or quantity.") - - if halo_filter: - for quantity in new_halo.quantities.values(): - if hasattr(quantity, "units"): - quantity.convert_to_base() - self.catalog.append(new_halo.quantities) - - if save_halos and halo_filter: - self.halo_list.append(new_halo) - else: - del new_halo - - pbar.update(my_i) - - self.catalog.sort(key=lambda a:a['particle_identifier'].to_ndarray()) - if save_catalog: - self.save_catalog() - - def save_catalog(self): - "Write out hdf5 file with all halo quantities." - - filename = os.path.join(self.output_dir, "%s.%d.h5" % - (self.output_prefix, self.comm.rank)) - n_halos = len(self.catalog) - mylog.info("Saving halo catalog (%d halos) to %s." % - (n_halos, os.path.join(self.output_dir, - self.output_prefix))) - extra_attrs = {"data_type": "halo_catalog", - "num_halos": n_halos} - data = {} - ftypes = {} - if n_halos > 0: - for key in self.quantities: - # This sets each field to be saved in the root hdf5 group, - # as per the HaloCatalog format. - ftypes[key] = "." - data[key] = self.halos_ds.arr( - [halo[key] for halo in self.catalog]) - - save_as_dataset(self.halos_ds, filename, data, - field_types=ftypes, extra_attrs=extra_attrs) - - def add_default_quantities(self, field_type='halos'): - for field in ["particle_identifier", "particle_mass", - "particle_position_x", "particle_position_y", - "particle_position_z", "virial_radius"]: - field_name = (field_type, field) - if field_name not in self.halos_ds.field_list: - mylog.warn("Halo dataset %s has no field %s." % - (self.halos_ds, str(field_name))) - continue - self.add_quantity(field, field_type=field_type, prepend=True) diff --git a/yt/analysis_modules/halo_analysis/halo_filters.py b/yt/analysis_modules/halo_analysis/halo_filters.py deleted file mode 100644 index 9e5fdc151cc..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_filters.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Halo filter object - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013-2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.utilities.operator_registry import \ - OperatorRegistry -from yt.utilities.on_demand_imports import \ - _scipy as scipy - -from .halo_callbacks import HaloCallback - -filter_registry = OperatorRegistry() - -def add_filter(name, function): - filter_registry[name] = HaloFilter(function) - -class HaloFilter(HaloCallback): - r""" - A HaloFilter is a function that minimally takes a Halo object, performs - some analysis, and returns either True or False. The return value determines - whether the Halo is added to the final halo catalog being generated by the - HaloCatalog object. - """ - def __init__(self, function, *args, **kwargs): - HaloCallback.__init__(self, function, args, kwargs) - - def __call__(self, halo): - return self.function(halo, *self.args, **self.kwargs) - -def quantity_value(halo, field, operator, value, units): - r""" - Filter based on a value in the halo quantities dictionary. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - field : string - The field used for the evaluation. - operator : string - The comparison operator to be used ("<", "<=", "==", ">=", ">", etc.) - value : numneric - The value to be compared against. - units : string - Units of the value to be compared. - - """ - - if field not in halo.quantities: - raise RuntimeError("Halo object does not contain %s quantity." % field) - - h_value = halo.quantities[field].in_units(units).to_ndarray() - return eval("%s %s %s" % (h_value, operator, value)) - -add_filter("quantity_value", quantity_value) - -def not_subhalo(halo, field_type="halos"): - """ - Only return true if this halo is not a subhalo. - - This is used for halo finders such as Rockstar that output parent - and subhalos together. - """ - - if not hasattr(halo.halo_catalog, "parent_dict"): - halo.halo_catalog.parent_dict = \ - _create_parent_dict(halo.halo_catalog.data_source, ptype=field_type) - return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1 -add_filter("not_subhalo", not_subhalo) - -def _create_parent_dict(data_source, ptype="halos"): - """ - Create a dictionary of halo parents to allow for filtering of subhalos. - - For a pair of halos whose distance is smaller than the radius of at least - one of the halos, the parent is defined as the halo with the larger radius. - Parent halos (halos with no parents of their own) have parent index values of -1. - """ - pos = np.rollaxis( - np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"), - data_source[ptype, "particle_position_y"].in_units("Mpc"), - data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1) - rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray() - ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int") - parents = -1 * np.ones_like(ids, dtype="int") - boxsize = data_source.ds.domain_width.in_units('Mpc') - my_tree = scipy.spatial.cKDTree(pos, boxsize=boxsize) - - for i in range(ids.size): - neighbors = np.array( - my_tree.query_ball_point(pos[i], rad[i], p=2)) - if neighbors.size > 1: - parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]] - - parents[ids == parents] = -1 - parent_dict = dict(zip(ids, parents)) - return parent_dict diff --git a/yt/analysis_modules/halo_analysis/halo_finding_methods.py b/yt/analysis_modules/halo_analysis/halo_finding_methods.py deleted file mode 100644 index 4d9c98ad1be..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Halo Finding methods - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.analysis_modules.halo_finding.halo_objects import \ - FOFHaloFinder, HOPHaloFinder -from yt.frontends.stream.data_structures import \ - load_particles -from yt.units.dimensions import length -from yt.utilities.operator_registry import \ - OperatorRegistry - -finding_method_registry = OperatorRegistry() - -def add_finding_method(name, function): - finding_method_registry[name] = HaloFindingMethod(function) - -class HaloFindingMethod(object): - r""" - A halo finding method is a callback that performs halo finding on a - dataset and returns a new dataset that is the loaded halo finder output. - """ - def __init__(self, function, args=None, kwargs=None): - self.function = function - self.args = args - if self.args is None: self.args = [] - self.kwargs = kwargs - if self.kwargs is None: self.kwargs = {} - - def __call__(self, ds): - return self.function(ds, *self.args, **self.kwargs) - -def _hop_method(ds, **finder_kwargs): - r""" - Run the Hop halo finding method. - """ - - halo_list = HOPHaloFinder(ds, **finder_kwargs) - halos_ds = _parse_old_halo_list(ds, halo_list) - return halos_ds -add_finding_method("hop", _hop_method) - -def _fof_method(ds, **finder_kwargs): - r""" - Run the FoF halo finding method. - """ - - halo_list = FOFHaloFinder(ds, **finder_kwargs) - halos_ds = _parse_old_halo_list(ds, halo_list) - return halos_ds -add_finding_method("fof", _fof_method) - -def _rockstar_method(ds, **finder_kwargs): - r""" - Run the Rockstar halo finding method. - """ - - from yt.frontends.rockstar.data_structures import \ - RockstarDataset - from yt.analysis_modules.halo_finding.rockstar.api import \ - RockstarHaloFinder - - rh = RockstarHaloFinder(ds, **finder_kwargs) - rh.run() - - if 'outbase' in finder_kwargs: - outbase = finder_kwargs['outbase'] - else: - outbase = "rockstar_halos" - - halos_ds = RockstarDataset(outbase + "/halos_0.0.bin") - try: - halos_ds.create_field_info() - except ValueError: - return None - - return halos_ds -add_finding_method("rockstar", _rockstar_method) - -def _parse_old_halo_list(data_ds, halo_list): - r""" - Convert the halo list into a loaded dataset. - """ - - num_halos = len(halo_list) - - if num_halos == 0: return None - - # Set up fields that we want to pull from identified halos and their units - new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', - 'particle_position_y','particle_position_z', - 'virial_radius'] - new_units = [ '', 'g', 'cm', 'cm','cm','cm'] - - # Set up a dictionary based on those fields - # with empty arrays where we will fill in their values - halo_properties = { f : (np.zeros(num_halos),unit) \ - for f, unit in zip(new_fields,new_units)} - - # Iterate through the halos pulling out their positions and virial quantities - # and filling in the properties dictionary - for i,halo in enumerate(halo_list): - halo_properties['particle_identifier'][0][i] = i - halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs() - halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs() - - com = halo.center_of_mass().in_cgs() - halo_properties['particle_position_x'][0][i] = com[0] - halo_properties['particle_position_y'][0][i] = com[1] - halo_properties['particle_position_z'][0][i] = com[2] - - # Define a bounding box based on original data ds - bbox = np.array([data_ds.domain_left_edge.in_cgs(), - data_ds.domain_right_edge.in_cgs()]).T - - # Create a ds with the halos as particles - particle_ds = load_particles(halo_properties, - bbox=bbox, length_unit = 1, mass_unit=1) - - # Create the field info dictionary so we can reference those fields - particle_ds.create_field_info() - - for attr in ["current_redshift", "current_time", - "domain_dimensions", - "cosmological_simulation", "omega_lambda", - "omega_matter", "hubble_constant"]: - attr_val = getattr(data_ds, attr) - setattr(particle_ds, attr, attr_val) - particle_ds.current_time = particle_ds.current_time.in_cgs() - - particle_ds.unit_registry.modify("h", particle_ds.hubble_constant) - # Comoving lengths - for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit - particle_ds.unit_registry.add(new_unit, particle_ds.unit_registry.lut[my_unit][0] / - (1 + particle_ds.current_redshift), - length, "\\rm{%s}/(1+z)" % my_unit) - - return particle_ds diff --git a/yt/analysis_modules/halo_analysis/halo_object.py b/yt/analysis_modules/halo_analysis/halo_object.py deleted file mode 100644 index ec97a995425..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_object.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Halo object. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -class Halo(object): - particles = None - def __init__(self, halo_catalog): - self.halo_catalog = halo_catalog - self.quantities = {} diff --git a/yt/analysis_modules/halo_analysis/halo_quantities.py b/yt/analysis_modules/halo_analysis/halo_quantities.py deleted file mode 100644 index c10f39ade1e..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_quantities.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Halo quantity object - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013-2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.utilities.operator_registry import \ - OperatorRegistry - -from .halo_callbacks import HaloCallback - -quantity_registry = OperatorRegistry() - -def add_quantity(name, function): - quantity_registry[name] = HaloQuantity(function) - -class HaloQuantity(HaloCallback): - r""" - A HaloQuantity is a function that takes minimally a Halo object, - performs some analysis, and then returns a value that is assigned - to an entry in the Halo.quantities dictionary. - """ - def __init__(self, function, *args, **kwargs): - HaloCallback.__init__(self, function, args, kwargs) - - def __call__(self, halo): - return self.function(halo, *self.args, **self.kwargs) - -def center_of_mass(halo): - if halo.particles is None: - raise RuntimeError("Center of mass requires halo to have particle data.") - return (halo.particles['particle_mass'] * - np.array([halo.particles['particle_position_x'], - halo.particles['particle_position_y'], - halo.particles['particle_position_z']])).sum(axis=1) / \ - halo.particles['particle_mass'].sum() - -add_quantity('center_of_mass', center_of_mass) - -def bulk_velocity(halo): - if halo.particles is None: - raise RuntimeError("Bulk velocity requires halo to have particle data.") - return (halo.particles['particle_mass'] * - np.array([halo.particles['particle_velocity_x'], - halo.particles['particle_velocity_y'], - halo.particles['particle_velocity_z']])).sum(axis=1) / \ - halo.particles['particle_mass'].sum() - -add_quantity('bulk_velocity', bulk_velocity) diff --git a/yt/analysis_modules/halo_analysis/halo_recipes.py b/yt/analysis_modules/halo_analysis/halo_recipes.py deleted file mode 100644 index e01742c0273..00000000000 --- a/yt/analysis_modules/halo_analysis/halo_recipes.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Halo recipe object - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.operator_registry import \ - OperatorRegistry - -recipe_registry = OperatorRegistry() - -def add_recipe(name, function): - recipe_registry[name] = HaloRecipe(function) - -class HaloRecipe(object): - r""" - A HaloRecipe is a function that minimally takes in a Halo object - and performs some analysis on it. This function may attach attributes - to the Halo object, write out data, etc, but does not return anything. - """ - def __init__(self, function, args=None, kwargs=None): - self.function = function - self.args = args - if self.args is None: self.args = [] - self.kwargs = kwargs - if self.kwargs is None: self.kwargs = {} - - def __call__(self, halo_catalog): - return self.function(halo_catalog, *self.args, **self.kwargs) - -def calculate_virial_quantities(hc, fields, - weight_field=None, accumulation=True, - radius_field="virial_radius", factor=2.0, - overdensity_field=("gas", "overdensity"), - critical_overdensity=200): - r""" - Calculate virial quantities with the following procedure: - 1. Create a sphere data container. - 2. Create 1D radial profiles of overdensity and any requested fields. - 3. Call virial_quantities callback to interpolate profiles for value of critical overdensity. - 4. Delete profile and sphere objects from halo. - - Parameters - ---------- - halo : Halo object - The Halo object to be provided by the HaloCatalog. - fields: string or list of strings - The fields for which virial values are to be calculated. - weight_field : string - Weight field for profiling. - Default : "cell_mass" - accumulation : bool or list of bools - If True, the profile values for a bin n are the cumulative sum of - all the values from bin 0 to n. If -True, the sum is reversed so - that the value for bin n is the cumulative sum from bin N (total bins) - to n. If the profile is 2D or 3D, a list of values can be given to - control the summation in each dimension independently. - Default: False. - radius_field : string - Field to be retrieved from the quantities dictionary as - the basis of the halo radius. - Default: "virial_radius". - factor : float - Factor to be multiplied by the base radius for defining - the radius of the sphere. - Default: 2.0. - overdensity_field : string or tuple of strings - The field used as the overdensity from which interpolation is done to - calculate virial quantities. - Default: ("gas", "overdensity") - critical_overdensity : float - The value of the overdensity at which to evaluate the virial quantities. - Overdensity is with respect to the critical density. - Default: 200 - - """ - - storage = "virial_quantities_profiles" - pfields = [field for field in fields if field != "radius"] - - hc.add_callback("sphere", factor=factor) - if pfields: - hc.add_callback("profile", ["radius"], pfields, - weight_field=weight_field, - accumulation=accumulation, - storage=storage) - hc.add_callback("profile", ["radius"], [overdensity_field], - weight_field="cell_volume", accumulation=True, - storage=storage) - hc.add_callback("virial_quantities", fields, - overdensity_field=overdensity_field, - critical_overdensity=critical_overdensity, - profile_storage=storage) - hc.add_callback("delete_attribute", storage) - hc.add_callback("delete_attribute", "data_object") - -add_recipe("calculate_virial_quantities", calculate_virial_quantities) diff --git a/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py b/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py deleted file mode 100644 index dd203866773..00000000000 --- a/yt/analysis_modules/halo_analysis/tests/run_halo_finder.py +++ /dev/null @@ -1,32 +0,0 @@ -from mpi4py import MPI -import os -import sys -import yt -from yt.analysis_modules.halo_analysis.api import \ - HaloCatalog -from yt.data_objects.particle_filters import \ - particle_filter -yt.enable_parallelism() - -method = sys.argv[1] -comm = MPI.Comm.Get_parent() - -methods = {"fof": {}, "hop": {}, - "rockstar": {"num_readers":1, - "num_writers":1, - "particle_type":"dark_matter"}} - -@particle_filter("dark_matter", requires=["creation_time"]) -def _dm_filter(pfilter, data): - return data["creation_time"] <= 0.0 - -ds = yt.load("Enzo_64/DD0043/data0043") -ds.add_particle_filter("dark_matter") - -output_dir = os.path.join(os.path.dirname(__file__), - "halo_catalogs", method) -hc = HaloCatalog(data_ds=ds, output_dir=output_dir, - finder_method=method, finder_kwargs=methods[method]) -hc.create() - -comm.Disconnect() diff --git a/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py b/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py deleted file mode 100644 index 3c9c6b08510..00000000000 --- a/yt/analysis_modules/halo_analysis/tests/test_halo_catalog.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -HaloCatalog answer tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -import os -import shutil -import tempfile - -from yt.analysis_modules.halo_analysis.api import \ - HaloCatalog, \ - add_quantity -from yt.convenience import \ - load -from yt.testing import \ - assert_equal -from yt.utilities.answer_testing.framework import \ - AnswerTestingTest, \ - data_dir_load, \ - requires_ds - -def _nstars(halo): - sp = halo.data_object - return (sp["all", "creation_time"] > 0).sum() -add_quantity("nstars", _nstars) - -class HaloQuantityTest(AnswerTestingTest): - _type_name = "HaloQuantity" - _attrs = () - - def __init__(self, data_ds_fn, halos_ds_fn): - self.data_ds_fn = data_ds_fn - self.halos_ds_fn = halos_ds_fn - self.ds = data_dir_load(data_ds_fn) - - def run(self): - curdir = os.getcwd() - tmpdir = tempfile.mkdtemp() - os.chdir(tmpdir) - - dds = data_dir_load(self.data_ds_fn) - hds = data_dir_load(self.halos_ds_fn) - hc = HaloCatalog( - data_ds=dds, halos_ds=hds, - output_dir=os.path.join(tmpdir, str(dds))) - hc.add_callback("sphere") - hc.add_quantity("nstars") - hc.create() - - fn = os.path.join(tmpdir, str(dds), - "%s.0.h5" % str(dds)) - ds = load(fn) - ad = ds.all_data() - mi, ma = ad.quantities.extrema("nstars") - mean = ad.quantities.weighted_average_quantity( - "nstars", "particle_ones") - - os.chdir(curdir) - shutil.rmtree(tmpdir) - - return np.array([mean, mi, ma]) - - def compare(self, new_result, old_result): - assert_equal(new_result, old_result, verbose=True) - -rh0 = "rockstar_halos/halos_0.0.bin" -e64 = "Enzo_64/DD0043/data0043" - -@requires_ds(rh0) -@requires_ds(e64) -def test_halo_quantity(): - yield HaloQuantityTest(e64, rh0) diff --git a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py b/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py deleted file mode 100644 index 86cada039e9..00000000000 --- a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import sys - -from yt.convenience import load -from yt.frontends.halo_catalog.data_structures import \ - HaloCatalogDataset -from yt.utilities.answer_testing.framework import \ - FieldValuesTest, \ - requires_ds - -_fields = (("halos", "particle_position_x"), - ("halos", "particle_position_y"), - ("halos", "particle_position_z"), - ("halos", "particle_mass")) - -methods = {"fof": 2, "hop": 2, "rockstar": 3} -decimals = {"fof": 10, "hop": 10, "rockstar": 1} - -e64 = "Enzo_64/DD0043/data0043" -@requires_ds(e64, big_data=True) -def test_halo_finders(): - from mpi4py import MPI - filename = os.path.join(os.path.dirname(__file__), - "run_halo_finder.py") - for method in methods: - comm = MPI.COMM_SELF.Spawn(sys.executable, - args=[filename, method], - maxprocs=methods[method]) - comm.Disconnect() - - fn = os.path.join(os.path.dirname(__file__), - "halo_catalogs", method, - "%s.0.h5" % method) - ds = load(fn) - assert isinstance(ds, HaloCatalogDataset) - for field in _fields: - yield FieldValuesTest(ds, field, particle_type=True, - decimals=decimals[method]) diff --git a/yt/analysis_modules/halo_finding/api.py b/yt/analysis_modules/halo_finding/api.py index 8d6b6a352ec..c0db4ee5e0c 100644 --- a/yt/analysis_modules/halo_finding/api.py +++ b/yt/analysis_modules/halo_finding/api.py @@ -1,40 +1,7 @@ -""" -API for halo_finding +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the halo_finding module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .halo_objects import \ - Halo, \ - HOPHalo, \ - LoadedHalo, \ - FOFHalo, \ - HaloList, \ - HOPHaloList, \ - FOFHaloList, \ - LoadedHaloList, \ - GenericHaloFinder, \ - HOPHaloFinder, \ - FOFHaloFinder, \ - HaloFinder, \ - LoadHaloes, \ - LoadTextHalos, \ - LoadTextHaloes +raise YTModuleRemoved( + "halo_finding", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c deleted file mode 100644 index c199a7bb9f2..00000000000 --- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c +++ /dev/null @@ -1,216 +0,0 @@ -/******************************************************************************* -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -*******************************************************************************/ - -// -// EnzoFOF -// A module for running friends-of-friends halo finding on a set of particles -// - -#include "Python.h" -#include -#include -#include -#include -#include "kd.h" -#include "tipsydefs.h" - -#include "numpy/ndarrayobject.h" - - -static PyObject *_FOFerror; - -static PyObject * -Py_EnzoFOF(PyObject *obj, PyObject *args) -{ - PyObject *oxpos, *oypos, *ozpos; - PyArrayObject *xpos, *ypos, *zpos; - float link = 0.2; - float fPeriod[3] = {1.0, 1.0, 1.0}; - int nMembers = 8; - int i, num_particles; - KDFOF kd; - int nBucket,j; - float fEps; - int nGroup,bVerbose=1; - int sec,usec; - PyArrayObject *particle_group_id; - PyObject *return_value; - - xpos=ypos=zpos=NULL; - - if (!PyArg_ParseTuple(args, "OOO|f(fff)i", - &oxpos, &oypos, &ozpos, &link, - &fPeriod[0], &fPeriod[1], &fPeriod[2], - &nMembers)) - return PyErr_Format(_FOFerror, - "EnzoFOF: Invalid parameters."); - - /* First the regular source arrays */ - - xpos = (PyArrayObject *) PyArray_FromAny(oxpos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if(!xpos){ - PyErr_Format(_FOFerror, - "EnzoFOF: xpos didn't work."); - goto _fail; - } - num_particles = PyArray_SIZE(xpos); - - ypos = (PyArrayObject *) PyArray_FromAny(oypos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if((!ypos)||(PyArray_SIZE(ypos) != num_particles)) { - PyErr_Format(_FOFerror, - "EnzoFOF: xpos and ypos must be the same length."); - goto _fail; - } - - zpos = (PyArrayObject *) PyArray_FromAny(ozpos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if((!zpos)||(PyArray_SIZE(zpos) != num_particles)) { - PyErr_Format(_FOFerror, - "EnzoFOF: xpos and zpos must be the same length."); - goto _fail; - } - - /* let's get started with the FOF stuff */ - - /* linking length */ - fprintf(stdout, "Link length is %f\n", link); - fEps = link; - - nBucket = 16; - - /* initialize the kd FOF structure */ - - kdInitFoF(&kd,nBucket,fPeriod); - - /* kdReadTipsyFoF(kd,stdin,bDark,bGas,bStar); */ - - /* Copy positions into kd structure. */ - - fprintf(stdout, "Filling in %d particles\n", num_particles); - kd->nActive = num_particles; - kd->p = (PARTICLEFOF *)malloc(kd->nActive*sizeof(PARTICLEFOF)); - assert(kd->p != NULL); - for (i = 0; i < num_particles; i++) { - kd->p[i].iOrder = i; - kd->p[i].r[0] = (float)(*(npy_float64*) PyArray_GETPTR1(xpos, i)); - kd->p[i].r[1] = (float)(*(npy_float64*) PyArray_GETPTR1(ypos, i)); - kd->p[i].r[2] = (float)(*(npy_float64*) PyArray_GETPTR1(zpos, i)); - } - - kdBuildTreeFoF(kd); - kdTimeFoF(kd,&sec,&usec); - nGroup = kdFoF(kd,fEps); - kdTimeFoF(kd,&sec,&usec); - if (bVerbose) printf("Number of initial groups:%d\n",nGroup); - nGroup = kdTooSmallFoF(kd,nMembers); - if (bVerbose) { - printf("Number of groups:%d\n",nGroup); - printf("FOF CPU TIME: %d.%06d secs\n",sec,usec); - } - kdOrderFoF(kd); - - /* kdOutGroupFoF(kd,ach); */ - - // Now we need to get the groupID, realID. - // This will give us the index into the original array. - // Additionally, note that we don't really need to tie the index - // back to the ID in this code, as we can do that back in the python code. - // All we need to do is group information. - - // Tags are in kd->p[i].iGroup - particle_group_id = (PyArrayObject *) - PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos), - PyArray_DescrFromType(NPY_INT32)); - - for (i = 0; i < num_particles; i++) { - // group tag is in kd->p[i].iGroup - *(npy_int32*)(PyArray_GETPTR1(particle_group_id, i)) = - (npy_int32) kd->p[i].iGroup; - } - - kdFinishFoF(kd); - - PyArray_UpdateFlags(particle_group_id, - NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id)); - return_value = Py_BuildValue("N", particle_group_id); - - Py_DECREF(xpos); - Py_DECREF(ypos); - Py_DECREF(zpos); - - /* We don't need this, as it's done in kdFinish - if(kd->p!=NULL)free(kd->p); - */ - - return return_value; - -_fail: - Py_XDECREF(xpos); - Py_XDECREF(ypos); - Py_XDECREF(zpos); - - if(kd->p!=NULL)free(kd->p); - - return NULL; - -} - -static PyMethodDef _FOFMethods[] = { - {"RunFOF", Py_EnzoFOF, METH_VARARGS}, - {NULL, NULL} /* Sentinel */ -}; - -/* platform independent*/ -#ifdef MS_WIN32 -__declspec(dllexport) -#endif - -PyMODINIT_FUNC -#if PY_MAJOR_VERSION >= 3 -#define _RETVAL m -PyInit_EnzoFOF(void) -#else -#define _RETVAL -initEnzoFOF(void) -#endif -{ - PyObject *m, *d; -#if PY_MAJOR_VERSION >= 3 - static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "EnzoFOF", /* m_name */ - "EnzoFOF Module", /* m_doc */ - -1, /* m_size */ - _FOFMethods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL, /* m_free */ - }; - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("EnzoFOF", _FOFMethods); -#endif - d = PyModule_GetDict(m); - _FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL); - PyDict_SetItemString(d, "error", _FOFerror); - import_array(); - return _RETVAL; -} - -/* - * Local Variables: - * mode: C - * c-file-style: "python" - * End: - */ diff --git a/yt/analysis_modules/halo_finding/fof/README b/yt/analysis_modules/halo_finding/fof/README deleted file mode 100644 index 36f41ef205c..00000000000 --- a/yt/analysis_modules/halo_finding/fof/README +++ /dev/null @@ -1,34 +0,0 @@ - - - FOF v1.1 - - A Group Finder for N-body Simulations - - October 26, 1994 - -Changes from v1.0: - o Fixed bug in tree building, this bug only affected cases where - a very small "bucket" size was chosen and the number of particles - was not a power of two. - -Included are: - README - Makefile - cat1/fof.1 - kd.c - kd.h - main.c - man1/fof.1 - tipsydefs.h - -For detailed information read the man page (either cat1/fof.1 or -man1/fof.1). - -To build: - - > make - -To get further information contact: - - hpccsoft@astro.washington.edu - diff --git a/yt/analysis_modules/halo_finding/fof/fof_main.c b/yt/analysis_modules/halo_finding/fof/fof_main.c deleted file mode 100644 index e4e7e4c1b64..00000000000 --- a/yt/analysis_modules/halo_finding/fof/fof_main.c +++ /dev/null @@ -1,134 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include "kd.h" - - -void usage(void) -{ - fprintf(stderr,"USAGE:\n"); - fprintf(stderr,"fof -e \n"); - fprintf(stderr," [-m ] [-dgs] [-v]\n"); - fprintf(stderr," [-o ] [-p ]\n"); - fprintf(stderr," [-px ] [-py ] [-pz ]\n"); - fprintf(stderr,"Input taken from stdin in tipsy binary format.\n"); - fprintf(stderr,"SEE MAN PAGE: fof(1) for more information.\n"); - exit(1); - } - -void main(int argc,char **argv) -{ - KDFOF kd; - int nBucket,i,j; - char ach[80]; - float fPeriod[3],fEps; - int bDark,bGas,bStar; - int nMembers,nGroup,bVerbose; - int sec,usec; - char *p; - - nBucket = 16; - nMembers = 8; - bDark = 1; - bGas = 1; - bStar = 1; - bVerbose = 0; - strcpy(ach,"fof"); - i = 1; - for (j=0;j<3;++j) fPeriod[j] = HUGE; - while (i < argc) { - if (!strcmp(argv[i],"-e")) { - ++i; - fEps = atof(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-m")) { - ++i; - nMembers = atoi(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-o")) { - ++i; - strcpy(ach,argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-p")) { - ++i; - fPeriod[0] = atof(argv[i]); - fPeriod[1] = atof(argv[i]); - fPeriod[2] = atof(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-px")) { - ++i; - fPeriod[0] = atof(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-py")) { - ++i; - fPeriod[1] = atof(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-pz")) { - ++i; - fPeriod[2] = atof(argv[i]); - ++i; - } - else if (!strcmp(argv[i],"-v")) { - bVerbose = 1; - ++i; - } - else if (*argv[i] == '-') { - p = argv[i]; - ++p; - if (*p == 'd' || *p == 'g' || *p == 's') { - bDark = 0; - bGas = 0; - bStar = 0; - } - else usage(); - while (isalpha(*p)) { - switch (*p) { - case 'd': - bDark = 1; - break; - case 'g': - bGas = 1; - break; - case 's': - bStar = 1; - break; - default: - usage(); - } - ++p; - } - ++i; - } - else usage(); - } - kdInitFoF(&kd,nBucket,fPeriod); - kdReadTipsyFoF(kd,stdin,bDark,bGas,bStar); - kdBuildTreeFoF(kd); - kdTimeFoF(kd,&sec,&usec); - nGroup = kdFoF(kd,fEps); - kdTimeFoF(kd,&sec,&usec); - if (bVerbose) printf("Number of initial groups:%d\n",nGroup); - nGroup = kdTooSmallFoF(kd,nMembers); - if (bVerbose) { - printf("Number of groups:%d\n",nGroup); - printf("FOF CPU TIME: %d.%06d secs\n",sec,usec); - } - kdOrderFoF(kd); - strcat(ach,".grp"); - kdOutGroupFoF(kd,ach); - kdFinishFoF(kd); - } - - - - - diff --git a/yt/analysis_modules/halo_finding/fof/kd.c b/yt/analysis_modules/halo_finding/fof/kd.c deleted file mode 100644 index 95965837f92..00000000000 --- a/yt/analysis_modules/halo_finding/fof/kd.c +++ /dev/null @@ -1,470 +0,0 @@ -#include -#include -#include -#ifdef _WIN32 -#include -#else -#include -#include -#endif -#include -#include "kd.h" -#include "tipsydefs.h" - - -void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro) -{ - -#ifdef _WIN32 - int secs, usecs; - HANDLE hProcess = GetCurrentProcess(); - FILETIME ftCreation, ftExit, ftKernel, ftUser; - SYSTEMTIME stUser; - GetProcessTimes(hProcess, &ftCreation, &ftExit, - &ftKernel, &ftUser); - FileTimeToSystemTime(&ftUser, &stUser); - secs = (int)((double)stUser.wHour*3600.0 + - (double)stUser.wMinute*60.0 + - (double)stUser.wSecond); - usecs = (int)((double)stUser.wMilliseconds/1000.0); - *puMicro = usecs; - *puSecond = secs; - if (*puMicro < 0) { - *puMicro += 1000000; - *puSecond -= 1; - } - kd->uSecond = secs; - kd->uMicro = usecs; -#else - struct rusage ru; - - getrusage(0,&ru); - *puMicro = ru.ru_utime.tv_usec - kd->uMicro; - *puSecond = ru.ru_utime.tv_sec - kd->uSecond; - if (*puMicro < 0) { - *puMicro += 1000000; - *puSecond -= 1; - } - kd->uSecond = ru.ru_utime.tv_sec; - kd->uMicro = ru.ru_utime.tv_usec; -#endif -} - -int kdInitFoF(KDFOF *pkd,int nBucket,float *fPeriod) -{ - KDFOF kd; - int j; - - kd = (KDFOF)malloc(sizeof(struct kdContext)); - assert(kd != NULL); - kd->nBucket = nBucket; - for (j=0;j<3;++j) kd->fPeriod[j] = fPeriod[j]; - kd->p = NULL; - kd->kdNodes = NULL; - *pkd = kd; - return(1); - } - - -void kdReadTipsyFoF(KDFOF kd,FILE *fp,int bDark,int bGas,int bStar) -{ - int i,j,nCnt; - struct dump h; - struct gas_particle gp; - struct dark_particle dp; - struct star_particle sp; - - fread(&h,sizeof(struct dump),1,fp); - kd->nParticles = h.nbodies; - kd->nDark = h.ndark; - kd->nGas = h.nsph; - kd->nStar = h.nstar; - kd->fTime = h.time; - kd->nActive = 0; - if (bDark) kd->nActive += kd->nDark; - if (bGas) kd->nActive += kd->nGas; - if (bStar) kd->nActive += kd->nStar; - kd->bDark = bDark; - kd->bGas = bGas; - kd->bStar = bStar; - /* - ** Allocate particles. - */ - kd->p = (PARTICLEFOF *)malloc(kd->nActive*sizeof(PARTICLEFOF)); - assert(kd->p != NULL); - /* - ** Read Stuff! - */ - nCnt = 0; - for (i=0;ip[nCnt].iOrder = nCnt; - for (j=0;j<3;++j) kd->p[nCnt].r[j] = gp.pos[j]; - ++nCnt; - } - } - for (i=0;ip[nCnt].iOrder = nCnt; - for (j=0;j<3;++j) kd->p[nCnt].r[j] = dp.pos[j]; - ++nCnt; - } - } - for (i=0;ip[nCnt].iOrder = nCnt; - for (j=0;j<3;++j) kd->p[nCnt].r[j] = sp.pos[j]; - ++nCnt; - } - } - } - - -void kdSelectFoF(KDFOF kd,int d,int k,int l,int r) -{ - PARTICLEFOF *p,t; - double v; - int i,j; - - p = kd->p; - while (r > l) { - v = p[k].r[d]; - t = p[r]; - p[r] = p[k]; - p[k] = t; - i = l - 1; - j = r; - while (1) { - while (i < j) if (p[++i].r[d] >= v) break; - while (i < j) if (p[--j].r[d] <= v) break; - t = p[i]; - p[i] = p[j]; - p[j] = t; - if (j <= i) break; - } - p[j] = p[i]; - p[i] = p[r]; - p[r] = t; - if (i >= k) r = i - 1; - if (i <= k) l = i + 1; - } - } - - -void kdCombineFoF(KDNFOF *p1,KDNFOF *p2,KDNFOF *pOut) -{ - int j; - - /* - ** Combine the bounds. - */ - for (j=0;j<3;++j) { - if (p2->bnd.fMin[j] < p1->bnd.fMin[j]) - pOut->bnd.fMin[j] = p2->bnd.fMin[j]; - else - pOut->bnd.fMin[j] = p1->bnd.fMin[j]; - if (p2->bnd.fMax[j] > p1->bnd.fMax[j]) - pOut->bnd.fMax[j] = p2->bnd.fMax[j]; - else - pOut->bnd.fMax[j] = p1->bnd.fMax[j]; - } - } - - -void kdUpPassFoF(KDFOF kd,int iCell) -{ - KDNFOF *c; - int l,u,pj,j; - - c = kd->kdNodes; - if (c[iCell].iDim != -1) { - l = LOWERFOF(iCell); - u = UPPERFOF(iCell); - kdUpPassFoF(kd,l); - kdUpPassFoF(kd,u); - kdCombineFoF(&c[l],&c[u],&c[iCell]); - } - else { - l = c[iCell].pLower; - u = c[iCell].pUpper; - for (j=0;j<3;++j) { - c[iCell].bnd.fMin[j] = kd->p[u].r[j]; - c[iCell].bnd.fMax[j] = kd->p[u].r[j]; - } - for (pj=l;pjp[pj].r[j] < c[iCell].bnd.fMin[j]) - c[iCell].bnd.fMin[j] = kd->p[pj].r[j]; - if (kd->p[pj].r[j] > c[iCell].bnd.fMax[j]) - c[iCell].bnd.fMax[j] = kd->p[pj].r[j]; - } - } - } - } - -void kdBuildTreeFoF(KDFOF kd) -{ - int l,n,i,d,m,j,diff; - KDNFOF *c; - BNDFOF bnd; - - n = kd->nActive; - kd->nLevels = 1; - l = 1; - while (n > kd->nBucket) { - n = n>>1; - l = l<<1; - ++kd->nLevels; - } - kd->nSplit = l; - kd->nNodes = l<<1; - if (kd->kdNodes != NULL) free(kd->kdNodes); - kd->kdNodes = (KDNFOF *)malloc(kd->nNodes*sizeof(KDNFOF)); - assert(kd->kdNodes != NULL); - /* - ** Calculate Bounds. - */ - for (j=0;j<3;++j) { - bnd.fMin[j] = kd->p[0].r[j]; - bnd.fMax[j] = kd->p[0].r[j]; - } - for (i=1;inActive;++i) { - for (j=0;j<3;++j) { - if (bnd.fMin[j] > kd->p[i].r[j]) - bnd.fMin[j] = kd->p[i].r[j]; - else if (bnd.fMax[j] < kd->p[i].r[j]) - bnd.fMax[j] = kd->p[i].r[j]; - } - } - /* - ** Set up ROOTFOF node - */ - c = kd->kdNodes; - c[ROOTFOF].pLower = 0; - c[ROOTFOF].pUpper = kd->nActive-1; - c[ROOTFOF].bnd = bnd; - i = ROOTFOF; - while (1) { - assert(c[i].pUpper - c[i].pLower + 1 > 0); - if (i < kd->nSplit && (c[i].pUpper - c[i].pLower) > 0) { - d = 0; - for (j=1;j<3;++j) { - if (c[i].bnd.fMax[j]-c[i].bnd.fMin[j] > - c[i].bnd.fMax[d]-c[i].bnd.fMin[d]) d = j; - } - c[i].iDim = d; - - m = (c[i].pLower + c[i].pUpper)/2; - kdSelectFoF(kd,d,m,c[i].pLower,c[i].pUpper); - - c[i].fSplit = kd->p[m].r[d]; - c[LOWERFOF(i)].bnd = c[i].bnd; - c[LOWERFOF(i)].bnd.fMax[d] = c[i].fSplit; - c[LOWERFOF(i)].pLower = c[i].pLower; - c[LOWERFOF(i)].pUpper = m; - c[UPPERFOF(i)].bnd = c[i].bnd; - c[UPPERFOF(i)].bnd.fMin[d] = c[i].fSplit; - c[UPPERFOF(i)].pLower = m+1; - c[UPPERFOF(i)].pUpper = c[i].pUpper; - diff = (m-c[i].pLower+1)-(c[i].pUpper-m); - assert(diff == 0 || diff == 1); - i = LOWERFOF(i); - } - else { - c[i].iDim = -1; - SETNEXTFOF(i); - if (i == ROOTFOF) break; - } - } - kdUpPassFoF(kd,ROOTFOF); - } - - -int kdFoF(KDFOF kd,float fEps) -{ - PARTICLEFOF *p; - KDNFOF *c; - int pi,pj,pn,cp; - - int iGroup; - - int *Fifo,iHead,iTail,nFifo; - float fEps2; - float dx,dy,dz,x,y,z,lx,ly,lz,sx,sy,sz,fDist2; - - p = kd->p; - c = kd->kdNodes; - lx = kd->fPeriod[0]; - ly = kd->fPeriod[1]; - lz = kd->fPeriod[2]; - fEps2 = fEps*fEps; - for (pn=0;pnnActive;++pn) p[pn].iGroup = 0; - nFifo = kd->nActive; - Fifo = (int *)malloc(nFifo*sizeof(int)); - assert(Fifo != NULL); - iHead = 0; - iTail = 0; - iGroup = 0; - for (pn=0;pnnActive;++pn) { - if (p[pn].iGroup) continue; - ++iGroup; - /* - ** Mark it and add to the do-fifo. - */ - p[pn].iGroup = iGroup; - Fifo[iTail++] = pn; - if (iTail == nFifo) iTail = 0; - while (iHead != iTail) { - pi = Fifo[iHead++]; - if (iHead == nFifo) iHead = 0; - /* - ** Now do an fEps-Ball Gather! - */ - x = p[pi].r[0]; - y = p[pi].r[1]; - z = p[pi].r[2]; - cp = ROOTFOF; - while (1) { - INTERSECTFOF(c,cp,fEps2,lx,ly,lz,x,y,z,sx,sy,sz); - /* - ** We have an intersection to test. - */ - if (c[cp].iDim >= 0) { - cp = LOWERFOF(cp); - continue; - } - else { - for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) { - if (p[pj].iGroup) continue; - dx = sx - p[pj].r[0]; - dy = sy - p[pj].r[1]; - dz = sz - p[pj].r[2]; - fDist2 = dx*dx + dy*dy + dz*dz; - if (fDist2 < fEps2) { - /* - ** Mark it and add to the do-fifo. - */ - p[pj].iGroup = iGroup; - Fifo[iTail++] = pj; - if (iTail == nFifo) iTail = 0; - } - } - SETNEXTFOF(cp); - if (cp == ROOTFOF) break; - continue; - } - ContainedCell: - for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) { - if (p[pj].iGroup) continue; - /* - ** Mark it and add to the do-fifo. - */ - p[pj].iGroup = iGroup; - Fifo[iTail++] = pj; - if (iTail == nFifo) iTail = 0; - } - GetNextCell: - SETNEXTFOF(cp); - if (cp == ROOTFOF) break; - } - } - } - free(Fifo); - kd->nGroup = iGroup+1; - return(kd->nGroup-1); - } - - -int kdTooSmallFoF(KDFOF kd,int nMembers) -{ - int *pnMembers,*pMap; - int i,pi,nGroup; - - pnMembers = (int *)malloc(kd->nGroup*sizeof(int)); - assert(pnMembers != NULL); - pMap = (int *)malloc(kd->nGroup*sizeof(int)); - assert(pMap != NULL); - for (i=0;inGroup;++i) pnMembers[i] = 0; - for (pi=0;pinActive;++pi) { - ++pnMembers[kd->p[pi].iGroup]; - } - for (i=1;inGroup;++i) { - if (pnMembers[i] < nMembers) { - pnMembers[i] = 0; - } - } - /* - ** Create a remapping! - */ - pMap[0] = 0; - nGroup = 1; - for (i=1;inGroup;++i) { - pMap[i] = nGroup; - if (pnMembers[i] == 0) { - pMap[i] = -1; /* was 0 */ - } - else { - ++nGroup; - } - } - /* - ** Remap the groups. - */ - for (pi=0;pinActive;++pi) { - kd->p[pi].iGroup = pMap[kd->p[pi].iGroup]; - } - free(pMap); - free(pnMembers); - kd->nGroup = nGroup; - return(nGroup-1); - } - - -int CmpParticlesFoF(const void *v1,const void *v2) -{ - PARTICLEFOF *p1 = (PARTICLEFOF *)v1; - PARTICLEFOF *p2 = (PARTICLEFOF *)v2; - return(p1->iOrder - p2->iOrder); - } - -void kdOrderFoF(KDFOF kd) -{ - qsort(kd->p,kd->nActive,sizeof(PARTICLEFOF),CmpParticlesFoF); - } - - -void kdOutGroupFoF(KDFOF kd,char *pszFile) -{ - FILE *fp; - int i,iCnt; - - fp = fopen(pszFile,"w"); - assert(fp != NULL); - fprintf(fp,"%d\n",kd->nParticles); - iCnt = 0; - for (i=0;inGas;++i) { - if (kd->bGas) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup); - else fprintf(fp,"0\n"); - } - for (i=0;inDark;++i) { - if (kd->bDark) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup); - else fprintf(fp,"0\n"); - } - for (i=0;inStar;++i) { - if (kd->bStar) fprintf(fp,"%d\n",kd->p[iCnt++].iGroup); - else fprintf(fp,"0\n"); - } - fclose(fp); - } - - -void kdFinishFoF(KDFOF kd) -{ - free(kd->p); - free(kd->kdNodes); - free(kd); - } - diff --git a/yt/analysis_modules/halo_finding/fof/kd.h b/yt/analysis_modules/halo_finding/fof/kd.h deleted file mode 100644 index ded81c32f8d..00000000000 --- a/yt/analysis_modules/halo_finding/fof/kd.h +++ /dev/null @@ -1,203 +0,0 @@ -#ifndef KDFOF_HINCLUDED -#define KDFOF_HINCLUDED - -#define ROOTFOF 1 -#define LOWERFOF(i) (i<<1) -#define UPPERFOF(i) ((i<<1)+1) -#define PARENTFOF(i) (i>>1) -#define SIBLINGFOF(i) ((i&1)?i-1:i+1) -#define SETNEXTFOF(i)\ -{\ - while (i&1) i=i>>1;\ - ++i;\ - } - -#define DARKFOF 1 -#define GASFOF 2 -#define STARFOF 4 - -#define KDFOF_ORDERTEMP 256 - -typedef struct Particle { - float r[3]; - int iGroup; - int iOrder; - } PARTICLEFOF; - -typedef struct bndBound { - float fMin[3]; - float fMax[3]; - } BNDFOF; - -typedef struct kdNode { - float fSplit; - BNDFOF bnd; - int iDim; - int pLower; - int pUpper; - } KDNFOF; - -typedef struct kdContext { - int nBucket; - int nParticles; - int nDark; - int nGas; - int nStar; - int bDark; - int bGas; - int bStar; - int nActive; - float fTime; - float fPeriod[3]; - int nLevels; - int nNodes; - int nSplit; - PARTICLEFOF *p; - KDNFOF *kdNodes; - int nGroup; - int uSecond; - int uMicro; - } * KDFOF; - - -#define INTERSECTFOF(c,cp,fBall2,lx,ly,lz,x,y,z,sx,sy,sz)\ -{\ - float dx,dy,dz,dx1,dy1,dz1,fDist2,fMax2;\ - dx = c[cp].bnd.fMin[0]-x;\ - dx1 = x-c[cp].bnd.fMax[0];\ - dy = c[cp].bnd.fMin[1]-y;\ - dy1 = y-c[cp].bnd.fMax[1];\ - dz = c[cp].bnd.fMin[2]-z;\ - dz1 = z-c[cp].bnd.fMax[2];\ - if (dx > 0.0) {\ - if (dx1+lx < dx) {\ - dx1 += lx;\ - dx -= lx;\ - sx = x+lx;\ - fDist2 = dx1*dx1;\ - fMax2 = dx*dx;\ - }\ - else {\ - sx = x;\ - fDist2 = dx*dx;\ - fMax2 = dx1*dx1;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dx1 > 0.0) {\ - if (dx+lx < dx1) {\ - dx += lx;\ - dx1 -= lx;\ - sx = x-lx;\ - fDist2 = dx*dx;\ - fMax2 = dx1*dx1;\ - }\ - else {\ - sx = x;\ - fDist2 = dx1*dx1;\ - fMax2 = dx*dx;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - sx = x;\ - fDist2 = 0.0;\ - if (dx < dx1) fMax2 = dx*dx;\ - else fMax2 = dx1*dx1;\ - }\ - if (dy > 0.0) {\ - if (dy1+ly < dy) {\ - dy1 += ly;\ - dy -= ly;\ - sy = y+ly;\ - fDist2 += dy1*dy1;\ - fMax2 += dy*dy;\ - }\ - else {\ - sy = y;\ - fDist2 += dy*dy;\ - fMax2 += dy1*dy1;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dy1 > 0.0) {\ - if (dy+ly < dy1) {\ - dy += ly;\ - dy1 -= ly;\ - sy = y-ly;\ - fDist2 += dy*dy;\ - fMax2 += dy1*dy1;\ - }\ - else {\ - sy = y;\ - fDist2 += dy1*dy1;\ - fMax2 += dy*dy;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - sy = y;\ - if (dy < dy1) fMax2 += dy*dy;\ - else fMax2 += dy1*dy1;\ - }\ - if (dz > 0.0) {\ - if (dz1+lz < dz) {\ - dz1 += lz;\ - dz -= lz;\ - sz = z+lz;\ - fDist2 += dz1*dz1;\ - fMax2 += dz*dz;\ - }\ - else {\ - sz = z;\ - fDist2 += dz*dz;\ - fMax2 += dz1*dz1;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dz1 > 0.0) {\ - if (dz+lz < dz1) {\ - dz += lz;\ - dz1 -= lz;\ - sz = z-lz;\ - fDist2 += dz*dz;\ - fMax2 += dz1*dz1;\ - }\ - else {\ - sz = z;\ - fDist2 += dz1*dz1;\ - fMax2 += dz*dz;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - sz = z;\ - if (dz < dz1) fMax2 += dz*dz;\ - else fMax2 += dz1*dz1;\ - }\ - if (fMax2 < fBall2) goto ContainedCell;\ - } - - -void kdTimeFoF(KDFOF,int *,int *); -int kdInitFoF(KDFOF *,int,float *); -void kdReadTipsyFoF(KDFOF,FILE *,int,int,int); -void kdBuildTreeFoF(KDFOF); -int kdFoF(KDFOF,float); -int kdTooSmallFoF(KDFOF,int); -void kdOrderFoF(KDFOF); -void kdOutGroupFoF(KDFOF,char *); -void kdFinishFoF(KDFOF); - -#endif - - - - - - - - - - - diff --git a/yt/analysis_modules/halo_finding/fof/tipsydefs.h b/yt/analysis_modules/halo_finding/fof/tipsydefs.h deleted file mode 100644 index 290be611b78..00000000000 --- a/yt/analysis_modules/halo_finding/fof/tipsydefs.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef _TIPSYDEFS_H -#define _TIPSYDEFS_H -#define MAXDIM 3 -#define forever for(;;) - -typedef float Real; - -struct gas_particle { - Real mass; - Real pos[MAXDIM]; - Real vel[MAXDIM]; - Real rho; - Real temp; - Real hsmooth; - Real metals ; - Real phi ; -} ; - -struct dark_particle { - Real mass; - Real pos[MAXDIM]; - Real vel[MAXDIM]; - Real eps; - Real phi ; -} ; - -struct star_particle { - Real mass; - Real pos[MAXDIM]; - Real vel[MAXDIM]; - Real metals ; - Real tform ; - Real eps; - Real phi ; -} ; - -struct dump { - double time ; - int nbodies ; - int ndim ; - int nsph ; - int ndark ; - int nstar ; -} ; - -#endif diff --git a/yt/analysis_modules/halo_finding/halo_objects.py b/yt/analysis_modules/halo_finding/halo_objects.py deleted file mode 100644 index a07df87ca73..00000000000 --- a/yt/analysis_modules/halo_finding/halo_objects.py +++ /dev/null @@ -1,1708 +0,0 @@ -""" -HOP-output data handling - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import gc -from yt.utilities.on_demand_imports import _h5py as h5py -import math -import numpy as np -import os.path as path -from functools import cmp_to_key -from yt.extern.six.moves import zip as izip - -from yt.config import ytcfg -from yt.funcs import mylog, ensure_dir_exists -from yt.utilities.math_utils import \ - get_rotation_matrix, \ - periodic_dist -from yt.utilities.physical_constants import \ - mass_sun_cgs -from yt.utilities.physical_ratios import \ - rho_crit_g_cm3_h2, \ - TINY - -from .hop.EnzoHop import RunHOP -from .fof.EnzoFOF import RunFOF - -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - ParallelAnalysisInterface, \ - parallel_blocking_call - - -class Halo(object): - """ - A data source that returns particle information about the members of a - HOP-identified halo. - """ - _distributed = False - _processing = False - _owner = 0 - indices = None - dont_wrap = ["get_sphere", "write_particle_list"] - extra_wrap = ["__getitem__"] - - def __init__(self, halo_list, id, indices=None, size=None, CoM=None, - max_dens_point=None, group_total_mass=None, max_radius=None, - bulk_vel=None, tasks=None, rms_vel=None, supp=None, ptype=None): - if ptype is None: - ptype = "all" - self.ptype = ptype - self.halo_list = halo_list - self._max_dens = halo_list._max_dens - self.id = id - self.data = halo_list._data_source - self.ds = self.data.ds - self.gridsize = (self.ds.domain_right_edge - \ - self.ds.domain_left_edge) - if indices is not None: - self.indices = halo_list._base_indices[indices] - else: - self.indices = None - # We assume that if indices = None, the instantiator has OTHER plans - # for us -- i.e., setting it somehow else - self.size = size - self.CoM = CoM - self.max_dens_point = max_dens_point - self.group_total_mass = group_total_mass - self.max_radius = max_radius - self.bulk_vel = bulk_vel - self.tasks = tasks - self.rms_vel = rms_vel - self.bin_count = None - self.overdensity = None - # A supplementary data dict. - if supp is None: - self.supp = {} - else: - self.supp = supp - self._saved_fields = {} - self._ds_sort = None - self._particle_mask = None - - @property - def particle_mask(self): - # Dynamically create the masking array for particles, and get - # the data using standard yt methods. - if self._particle_mask is not None: - return self._particle_mask - # This is from disk. - pid = self.__getitem__('particle_index') - # This is from the sphere. - if self._name == "RockstarHalo": - ds = self.ds.sphere(self.CoM, self._radjust * self.max_radius) - elif self._name == "LoadedHalo": - ds = self.ds.sphere(self.CoM, np.maximum(self._radjust * \ - self.ds.quan(self.max_radius, 'code_length'), \ - self.ds.index.get_smallest_dx())) - sp_pid = ds['particle_index'] - self._ds_sort = sp_pid.argsort() - sp_pid = sp_pid[self._ds_sort] - # This matches them up. - self._particle_mask = np.in1d(sp_pid, pid) - return self._particle_mask - - def center_of_mass(self): - r"""Calculate and return the center of mass. - - The center of mass of the halo is directly calculated and returned. - - Examples - -------- - >>> com = halos[0].center_of_mass() - """ - if self.CoM is not None: - return self.CoM - pm = self["particle_mass"].in_units('Msun') - c = {} - # We shift into a box where the origin is the left edge - c[0] = self["particle_position_x"] - self.ds.domain_left_edge[0] - c[1] = self["particle_position_y"] - self.ds.domain_left_edge[1] - c[2] = self["particle_position_z"] - self.ds.domain_left_edge[2] - com = [] - for i in range(3): - # A halo is likely periodic around a boundary if the distance - # between the max and min particle - # positions are larger than half the box. - # So skip the rest if the converse is true. - # Note we might make a change here when periodicity-handling is - # fully implemented. - if (c[i].max() - c[i].min()) < (self.ds.domain_width[i] / 2.): - com.append(c[i]) - continue - # Now we want to flip around only those close to the left boundary. - sel = (c[i] <= (self.ds.domain_width[i]/2)) - c[i][sel] += self.ds.domain_width[i] - com.append(c[i]) - - c = (com * pm).sum(axis=1) / pm.sum() - c = self.ds.arr(c, 'code_length') - - return c%self.ds.domain_width + self.ds.domain_left_edge - - def maximum_density(self): - r"""Return the HOP-identified maximum density. Not applicable to - FOF halos. - - Return the HOP-identified maximum density. Not applicable to FOF halos. - - Examples - -------- - >>> max_dens = halos[0].maximum_density() - """ - if self.max_dens_point is not None: - return self.max_dens_point[0] - return self._max_dens[self.id][0] - - def maximum_density_location(self): - r"""Return the location HOP identified as maximally dense. Not - applicable to FOF halos. - - Return the location HOP identified as maximally dense. - - Examples - -------- - >>> max_dens_loc = halos[0].maximum_density_location() - """ - if self.max_dens_point is not None: - return self.max_dens_point[1:] - return np.array([ - self._max_dens[self.id][1], - self._max_dens[self.id][2], - self._max_dens[self.id][3]]) - - def total_mass(self): - r"""Returns the total mass in solar masses of the halo. - - Returns the total mass in solar masses of just the particles in the - halo. - - Examples - -------- - >>> halos[0].total_mass() - """ - if self.group_total_mass is not None: - return self.group_total_mass - return self["particle_mass"].in_units('Msun').sum() - - def bulk_velocity(self): - r"""Returns the mass-weighted average velocity in cm/s. - - This calculates and returns the mass-weighted average velocity of just - the particles in the halo in cm/s. - - Examples - -------- - >>> bv = halos[0].bulk_velocity() - """ - if self.bulk_vel is not None: - return self.bulk_vel - pm = self["particle_mass"].in_units('Msun') - vx = (self["particle_velocity_x"] * pm).sum() - vy = (self["particle_velocity_y"] * pm).sum() - vz = (self["particle_velocity_z"] * pm).sum() - return self.ds.arr([vx, vy, vz], vx.units) / pm.sum() - - def rms_velocity(self): - r"""Returns the mass-weighted RMS velocity for the halo - particles in cgs units. - - Calculate and return the mass-weighted RMS velocity for just the - particles in the halo. The bulk velocity of the halo is subtracted - before computation. - - Examples - -------- - >>> rms_vel = halos[0].rms_velocity() - """ - if self.rms_vel is not None: - return self.rms_vel - bv = self.bulk_velocity() - pm = self["particle_mass"].in_units('Msun') - sm = pm.sum() - vx = (self["particle_velocity_x"] - bv[0]) * pm / sm - vy = (self["particle_velocity_y"] - bv[1]) * pm / sm - vz = (self["particle_velocity_z"] - bv[2]) * pm / sm - s = vx ** 2. + vy ** 2. + vz ** 2. - ms = np.mean(s) - return np.sqrt(ms) * pm.size - - def maximum_radius(self, center_of_mass=True): - r"""Returns the maximum radius in the halo for all particles, - either from the point of maximum density or from the - center of mass. - - The maximum radius from the most dense point is calculated. This - accounts for periodicity. - - Parameters - ---------- - center_of_mass : bool - True chooses the center of mass when - calculating the maximum radius. - False chooses from the maximum density location for HOP halos - (it has no effect for FOF halos). - Default = True. - - Examples - -------- - >>> radius = halos[0].maximum_radius() - """ - if self.max_radius is not None: - return self.max_radius - if center_of_mass: - center = self.center_of_mass() - else: - center = self.maximum_density_location() - rx = np.abs(self["particle_position_x"] - center[0]) - ry = np.abs(self["particle_position_y"] - center[1]) - rz = np.abs(self["particle_position_z"] - center[2]) - DW = self.data.ds.domain_right_edge - self.data.ds.domain_left_edge - r = np.sqrt(np.minimum(rx, DW[0] - rx) ** 2.0 - + np.minimum(ry, DW[1] - ry) ** 2.0 - + np.minimum(rz, DW[2] - rz) ** 2.0) - return r.max() - - def __getitem__(self, key): - return self.data[(self.ptype, key)][self.indices] - - def get_sphere(self, center_of_mass=True): - r"""Returns a sphere source. - - This will generate a new, empty sphere source centered on this halo, - with the maximum radius of the halo. This can be used like any other - data container in yt. - - Parameters - ---------- - center_of_mass : bool, optional - True chooses the center of mass when - calculating the maximum radius. - False chooses from the maximum density location for HOP halos - (it has no effect for FOF halos). - Default = True. - - Returns - ------- - sphere : `yt.data_objects.api.YTSphere` - The empty data source. - - Examples - -------- - >>> sp = halos[0].get_sphere() - """ - if center_of_mass: - center = self.center_of_mass() - else: - center = self.maximum_density_location() - radius = self.maximum_radius() - # A bit of a long-reach here... - sphere = self.data.ds.sphere(center, radius=radius) - return sphere - - def get_size(self): - if self.size is not None: - return self.size - return self.indices.size - - def write_particle_list(self, handle): - self._processing = True - gn = "Halo%08i" % (self.id) - handle.create_group("/%s" % gn) - for field in ["particle_position_%s" % ax for ax in 'xyz'] \ - + ["particle_velocity_%s" % ax for ax in 'xyz'] \ - + ["particle_index"]: - handle.create_dataset("/%s/%s" % (gn, field), data=self[field]) - handle.create_dataset("/%s/particle_mass" % gn, - data=self["particle_mass"].in_units('Msun')) - if ('io','creation_time') in self.data.ds.field_list: - handle.create_dataset("/%s/creation_time" % gn, - data=self['creation_time']) - self._processing = False - - def virial_mass(self, virial_overdensity=200., bins=300): - r"""Return the virial mass of the halo in Msun, - using only the particles - in the halo (no baryonic information used). - - The virial mass is calculated, using the built in `Halo.virial_info` - functionality. The mass is then returned. - - Parameters - ---------- - virial_overdensity : float - The overdensity threshold compared to the universal average when - calculating the virial mass. Default = 200. - bins : int - The number of spherical bins used to calculate overdensities. - Default = 300. - - Returns - ------- - mass : float - The virial mass in solar masses of the particles in the halo. -1 - if not virialized. - - Examples - -------- - >>> vm = halos[0].virial_mass() - """ - self.virial_info(bins=bins) - vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, - bins=bins) - if vir_bin != -1: - return self.mass_bins[vir_bin] - else: - return -1 - - def virial_radius(self, virial_overdensity=200., bins=300): - r"""Return the virial radius of the halo in code units. - - The virial radius of the halo is calculated, using only the particles - in the halo (no baryonic information used). Returns -1 if the halo is - not virialized. - - Parameters - ---------- - virial_overdensity : float - The overdensity threshold compared to the universal average when - calculating the virial radius. Default = 200. - bins : integer - The number of spherical bins used to calculate overdensities. - Default = 300. - - Returns - ------- - radius : float - The virial radius in code units of the particles in the halo. -1 - if not virialized. - - Examples - -------- - >>> vr = halos[0].virial_radius() - """ - self.virial_info(bins=bins) - vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, - bins=bins) - if vir_bin != -1: - return self.radial_bins[vir_bin] - else: - return -1 - - def virial_bin(self, virial_overdensity=200., bins=300): - r"""Returns the bin index of the virial radius of the halo. Generally, - it is better to call virial_radius instead, which calls this function - automatically. - """ - self.virial_info(bins=bins) - over = (self.overdensity > virial_overdensity) - if over.any(): - vir_bin = max(np.arange(bins + 1)[over]) - return vir_bin - else: - return -1 - - def virial_info(self, bins=300): - r"""Calculates the virial information for the halo. Generally, it is - better to call virial_radius or virial_mass instead, which calls this - function automatically. - """ - # Skip if we've already calculated for this number of bins. - if self.bin_count == bins and self.overdensity is not None: - return None - self.bin_count = bins - # Cosmology - h = self.ds.hubble_constant - Om_matter = self.ds.omega_matter - z = self.ds.current_redshift - period = self.ds.domain_right_edge - \ - self.ds.domain_left_edge - thissize = self.get_size() - rho_crit = rho_crit_g_cm3_h2 * h ** 2.0 * Om_matter # g cm^-3 - Msun2g = mass_sun_cgs - rho_crit = rho_crit * ((1.0 + z) ** 3.0) - # Get some pertinent information about the halo. - self.mass_bins = self.ds.arr(np.zeros(self.bin_count + 1, - dtype='float64'),'Msun') - dist = np.empty(thissize, dtype='float64') - cen = self.center_of_mass() - mark = 0 - # Find the distances to the particles. I don't like this much, but I - # can't see a way to eliminate a loop like this, either here or in - # yt.math. - for pos in izip(self["particle_position_x"], - self["particle_position_y"], self["particle_position_z"]): - dist[mark] = periodic_dist(cen, pos, period) - mark += 1 - # Set up the radial bins. - # Multiply min and max to prevent issues with digitize below. - self.radial_bins = np.logspace(math.log10(min(dist) * .99 + TINY), - math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1) - self.radial_bins = self.ds.arr(self.radial_bins,'code_length') - # Find out which bin each particle goes into, and add the particle - # mass to that bin. - inds = np.digitize(dist, self.radial_bins) - 1 - if self["particle_position_x"].size > 1: - for index in np.unique(inds): - self.mass_bins[index] += \ - np.sum(self["particle_mass"][inds == index]).in_units('Msun') - # Now forward sum the masses in the bins. - for i in range(self.bin_count): - self.mass_bins[i + 1] += self.mass_bins[i] - # Calculate the over densities in the bins. - self.overdensity = self.mass_bins * Msun2g / \ - (4./3. * math.pi * rho_crit * \ - (self.radial_bins )**3.0) - - def _get_ellipsoid_parameters_basic(self): - np.seterr(all='ignore') - # check if there are 4 particles to form an ellipsoid - # neglecting to check if the 4 particles in the same plane, - # that is almost certainly never to occur, - # will deal with it later if it ever comes up - if np.size(self["particle_position_x"]) < 4: - mylog.warning("Too few particles for ellipsoid parameters.") - return (0, 0, 0, 0, 0, 0, 0) - # Calculate the parameters that describe the ellipsoid of - # the particles that constitute the halo. This function returns - # all the parameters except for the center of mass. - com = self.center_of_mass() - position = [self["particle_position_x"], - self["particle_position_y"], - self["particle_position_z"]] - # Locate the furthest particle from com, its vector length and index - DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]]) - position = [position[0] - com[0], - position[1] - com[1], - position[2] - com[2]] - # different cases of particles being on other side of boundary - for axis in range(np.size(DW)): - cases = np.array([position[axis], - position[axis] + DW[axis], - position[axis] - DW[axis]]) - # pick out the smallest absolute distance from com - position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases) - # find the furthest particle's index - r = np.sqrt(position[0]**2 + - position[1]**2 + - position[2]**2) - A_index = r.argmax() - mag_A = r.max() - # designate the A vector - A_vector = (position[0][A_index], - position[1][A_index], - position[2][A_index]) - # designate the e0 unit vector - e0_vector = A_vector / mag_A - # locate the tB particle position by finding the max B - e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64') - for i in range(3): - e0_vector_copy[:, i] = e0_vector[i] - rr = np.array([position[0], - position[1], - position[2]]).T # Similar to tB_vector in old code. - tC_vector = np.cross(e0_vector_copy, rr) - te2 = tC_vector.copy() - for dim in range(3): - te2[:,dim] *= np.sum(tC_vector**2., axis = 1)**(-0.5) - te1 = np.cross(te2, e0_vector_copy) - length = np.abs(-np.sum(rr * te1, axis = 1) * \ - (1. - np.sum(rr * e0_vector_copy, axis = 1)**2. * \ - mag_A**-2.)**(-0.5)) - # This problem apparently happens sometimes, that the NaNs are turned - # into infs, which messes up the nanargmax below. - length[length == np.inf] = 0. - tB_index = np.nanargmax(length) # ignores NaNs created above. - mag_B = length[tB_index] - e1_vector = te1[tB_index] - e2_vector = te2[tB_index] - temp_e0 = rr.copy() - temp_e1 = rr.copy() - temp_e2 = rr.copy() - for dim in range(3): - temp_e0[:,dim] = e0_vector[dim] - temp_e1[:,dim] = e1_vector[dim] - temp_e2[:,dim] = e2_vector[dim] - length = np.abs(np.sum(rr * temp_e2, axis = 1) * (1 - \ - np.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \ - np.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2.)**(-0.5)) - length[length == np.inf] = 0. - tC_index = np.nanargmax(length) - mag_C = length[tC_index] - # tilt is calculated from the rotation about x axis - # needed to align e1 vector with the y axis - # after e0 is aligned with x axis - # find the t1 angle needed to rotate about z axis to align e0 onto x-z plane - t1 = np.arctan(-e0_vector[1] / e0_vector[0]) - RZ = get_rotation_matrix(t1, (0, 0, 1)) - r1 = np.dot(RZ, e0_vector) - # find the t2 angle needed to rotate about y axis to align e0 to x - t2 = np.arctan(r1[2] / r1[0]) - RY = get_rotation_matrix(t2, (0, 1, 0)) - r2 = np.dot(RY, np.dot(RZ, e1_vector)) - # find the tilt angle needed to rotate about x axis to align e1 to y and e2 to z - tilt = np.arctan(-r2[2] / r2[1]) - return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1], - e0_vector[2], tilt) - -class HOPHalo(Halo): - _name = "HOPHalo" - pass - - -class FOFHalo(Halo): - - def maximum_density(self): - r"""Not implemented.""" - return -1 - - def maximum_density_location(self): - r"""Not implemented.""" - return self.center_of_mass() - - -class LoadedHalo(Halo): - _name = "LoadedHalo" - # See particle_mask - _radjust = 1.05 - - def __init__(self, ds, id, size=None, CoM=None, - max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None, - rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None, - e0_vec=None, tilt=None, supp=None): - - self.ds = ds - self.gridsize = (self.ds.domain_right_edge - \ - self.ds.domain_left_edge) - self.id = id - self.size = size - self.CoM = CoM - self.max_dens_point = max_dens_point - self.group_total_mass = group_total_mass - self.max_radius = max_radius - self.bulk_vel = bulk_vel - self.rms_vel = rms_vel - self.mag_A = mag_A - self.mag_B = mag_B - self.mag_C = mag_C - self.e0_vec = e0_vec - self.tilt = tilt - # locs=the names of the h5 files that have particle data for this halo - self.fnames = fnames - self.bin_count = None - self.overdensity = None - self.indices = np.array([]) # Never used for a LoadedHalo. - self._saved_fields = {} - self._ds_sort = None - self._particle_mask = None - # A supplementary data dict. - if supp is None: - self.supp = {} - else: - self.supp = supp - self._saved_fields = {} - self._ds_sort = None - self._particle_mask = None - self._pid_sort = None - - - def __getitem__(self, key): - # This function will try to get particle data in one of three ways, - # in descending preference. - # 1. From saved_fields, e.g. we've already got it. - # 2. From the halo h5 files off disk. - # 3. Use the unique particle indexes of the halo to select a missing - # field from a Sphere. - if key in self._saved_fields: - # We've already got it. - return self._saved_fields[key] - # Gotta go get it from the halo h5 files. - field_data = self._get_particle_data(self.id, self.fnames, - self.size, key) - if field_data is not None: - if key == 'particle_index': - #this is an index for turning data sorted by particle index - #into the same order as the fields on disk - self._pid_sort = field_data.argsort().argsort() - #convert to YTArray using the data from disk - if key == 'particle_mass': - field_data = self.ds.arr(field_data, 'Msun') - else: - field_data = self.ds.arr(field_data, - self.ds._get_field_info('unknown',key).units) - self._saved_fields[key] = field_data - return self._saved_fields[key] - # We won't store this field below in saved_fields because - # that would mean keeping two copies of it, one in the yt - # machinery and one here. - ds = self.ds.sphere(self.CoM, np.maximum(self._radjust * \ - self.ds.quan(self.max_radius, 'code_length'), \ - self.ds.index.get_smallest_dx())) - # If particle_mask hasn't been called once then _ds_sort won't have - # the proper values set yet - if self._particle_mask is None: - self.particle_mask - return ds[key][self._ds_sort][self.particle_mask][self._pid_sort] - - def _get_particle_data(self, halo, fnames, size, field): - # Given a list of file names, a halo, its size, and the desired field, - # this returns the particle data for that halo. - # First get the list of fields from the first file. Not all fields - # are saved all the time (e.g. creation_time, particle_type). - mylog.info("Getting field %s from hdf5 halo particle files." % field) - f = h5py.File(fnames[0], mode='r') - fields = f["Halo%08d" % halo].keys() - # If we dont have this field, we can give up right now. - if field not in fields: - return None - elif field == 'particle_index' or field == 'particle_type': - # the only integer field - field_data = np.empty(size, dtype='int64') - else: - field_data = np.empty(size, dtype='float64') - f.close() - # Apparently, there's a bug in h5py that was keeping the file pointer - # f closed, even though it's re-opened below. This del seems to fix - # that. - del f - offset = 0 - for fname in fnames: - f = h5py.File(fname, mode='r') - this = f["Halo%08d" % halo][field][:] - s = this.size - field_data[offset:offset + s] = this - offset += s - f.close() - del f - return field_data - - def _get_ellipsoid_parameters_basic_loadedhalo(self): - if self.mag_A is not None: - return (self.mag_A, self.mag_B, self.mag_C, self.e0_vec[0], - self.e0_vec[1], self.e0_vec[2], self.tilt) - else: - return self._get_ellipsoid_parameters_basic() - - def get_ellipsoid_parameters(self): - r"""Calculate the parameters that describe the ellipsoid of - the particles that constitute the halo. - - Parameters - ---------- - None - - Returns - ------- - tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt) - The 6-tuple has in order: - #. The center of mass as an array. - #. mag_A as a float. - #. mag_B as a float. - #. mag_C as a float. - #. e0_vector as an array. - #. tilt as a float. - - Examples - -------- - >>> params = halos[0].get_ellipsoid_parameters() - """ - - basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo() - toreturn = [self.center_of_mass()] - updated = [basic_parameters[0], basic_parameters[1], - basic_parameters[2], np.array([basic_parameters[3], - basic_parameters[4], basic_parameters[5]]), basic_parameters[6]] - toreturn.extend(updated) - return tuple(toreturn) - - def get_ellipsoid(self): - r"""Returns an ellipsoidal data object. - This will generate a new, empty ellipsoidal data object for this - halo. - - Parameters - ---------- - None. - - Returns - ------- - ellipsoid : `yt.data_objects.data_containers.YTEllipsoid` - The ellipsoidal data object. - - Examples - -------- - >>> ell = halos[0].get_ellipsoid() - """ - ep = self.get_ellipsoid_parameters() - ell = self.ds.ellipsoid(ep[0], ep[1], ep[2], ep[3], ep[4], ep[5]) - return ell - - def get_sphere(self): - r"""Returns a sphere source. - - This will generate a new, empty sphere source centered on this halo, - with the maximum radius of the halo. This can be used like any other - data container in yt. - - Parameters - ---------- - center_of_mass : bool, optional - True chooses the center of mass when - calculating the maximum radius. - False chooses from the maximum density location for HOP halos - (it has no effect for FOF halos). - Default = True. - - Returns - ------- - sphere : `yt.data_objects.api.YTSphere` - The empty data source. - - Examples - -------- - >>> sp = halos[0].get_sphere() - """ - cen = self.center_of_mass() - r = self.maximum_radius() - return self.ds.sphere(cen, r) - -class TextHalo(LoadedHalo): - def __init__(self, ds, id, size=None, CoM=None, - - max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None, - rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None, - e0_vec=None, tilt=None, supp=None): - - self.ds = ds - self.gridsize = (self.ds.domain_right_edge - \ - self.ds.domain_left_edge) - self.id = id - self.size = size - self.CoM = CoM - self.max_dens_point = max_dens_point - self.group_total_mass = group_total_mass - self.max_radius = max_radius - self.bulk_vel = bulk_vel - self.rms_vel = rms_vel - self.mag_A = mag_A - self.mag_B = mag_B - self.mag_C = mag_C - self.e0_vec = e0_vec - self.tilt = tilt - self.bin_count = None - self.overdensity = None - self.indices = np.array([]) # Never used for a LoadedHalo. - # A supplementary data dict. - if supp is None: - self.supp = {} - else: - self.supp = supp - - def __getitem__(self, key): - # We'll just pull it from the sphere. - return self.get_sphere()[key] - - def maximum_density(self): - r"""Undefined for text halos.""" - return -1 - - def maximum_density_location(self): - r"""Undefined, default to CoM""" - return self.center_of_mass() - - def get_size(self): - # Have to just get it from the sphere. - return self["particle_position_x"].size - - -class HaloList(object): - - _fields = ["particle_position_%s" % ax for ax in 'xyz'] - - def __init__(self, data_source, dm_only=True, redshift=-1, - ptype=None): - """ - Run hop on *data_source* with a given density *threshold*. If - *dm_only* is True (default), only run it on the dark matter particles, - otherwise on all particles. Returns an iterable collection of - *HopGroup* items. - """ - self._data_source = data_source - self.dm_only = dm_only - if ptype is None: - ptype = "all" - self.ptype = ptype - self._groups = [] - self._max_dens = {} - self.__obtain_particles() - self._run_finder() - mylog.info("Parsing outputs") - self._parse_output() - mylog.debug("Finished. (%s)", len(self)) - self.redshift = redshift - - def __obtain_particles(self): - if self.dm_only: - ii = self._get_dm_indices() - else: - ii = slice(None) - self.particle_fields = {} - for field in self._fields: - tot_part = self._data_source[(self.ptype, field)].size - if field == "particle_index": - self.particle_fields[field] = \ - self._data_source[(self.ptype, field)][ii].astype('int64') - else: - self.particle_fields[field] = \ - self._data_source[(self.ptype, field)][ii].astype('float64') - del self._data_source[(self.ptype, field)] - self._base_indices = np.arange(tot_part)[ii] - gc.collect() - - def _get_dm_indices(self): - if ('io','creation_time') in self._data_source.index.field_list: - mylog.debug("Differentiating based on creation time") - return (self._data_source["creation_time"] <= 0) - elif ('io','particle_type') in self._data_source.index.field_list: - mylog.debug("Differentiating based on particle type") - return (self._data_source["particle_type"] == 1) - else: - mylog.warning("No particle_type, no creation_time, so not distinguishing.") - return slice(None) - - def _parse_output(self): - unique_ids = np.unique(self.tags) - counts = np.bincount(self.tags + 1) - sort_indices = np.argsort(self.tags) - grab_indices = np.indices(self.tags.shape).ravel()[sort_indices] - dens = self.densities[sort_indices] - cp = 0 - for i in unique_ids: - cp_c = cp + counts[i + 1] - if i == -1: - cp += counts[i + 1] - continue - group_indices = grab_indices[cp:cp_c] - self._groups.append(self._halo_class(self, i, group_indices, - ptype=self.ptype)) - md_i = np.argmax(dens[cp:cp_c]) - px, py, pz = \ - [self.particle_fields['particle_position_%s' % ax][group_indices] - for ax in 'xyz'] - self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i], - py[md_i], pz[md_i]) - cp += counts[i + 1] - - def __len__(self): - return len(self._groups) - - def __iter__(self): - for i in self._groups: - yield i - - def __getitem__(self, key): - return self._groups[key] - - def write_out(self, filename, ellipsoid_data=False): - r"""Write out standard halo information to a text file. - - Parameters - ---------- - filename : String - The name of the file to write to. - - ellipsoid_data : bool. - Whether to print the ellipsoidal information to the file. - Default = False. - - Examples - -------- - >>> halos.write_out("HopAnalysis.out") - """ - if hasattr(filename, 'write'): - f = filename - else: - f = open(filename, "w") - f.write("# HALOS FOUND WITH %s\n" % (self._name)) - f.write("# REDSHIFT OF OUTPUT = %f\n" % (self.redshift)) - - if not ellipsoid_data: - f.write("\t".join(["# Group","Mass","# part","max dens" - "x","y","z", "center-of-mass", - "x","y","z", - "vx","vy","vz","max_r","rms_v","\n"])) - else: - f.write("\t".join(["# Group","Mass","# part","max dens" - "x","y","z", "center-of-mass", - "x","y","z", - "vx","vy","vz","max_r","rms_v", - "mag_A", "mag_B", "mag_C", "e0_vec0", - "e0_vec1", "e0_vec2", "tilt", "\n"])) - - for group in self: - f.write("%10i\t" % group.id) - f.write("%0.9e\t" % group.total_mass()) - f.write("%10i\t" % group.get_size()) - f.write("%0.9e\t" % group.maximum_density()) - f.write("\t".join(["%0.9e" % v for v in \ - group.maximum_density_location()])) - f.write("\t") - f.write("\t".join(["%0.9e" % v for v in group.center_of_mass()])) - f.write("\t") - f.write("\t".join(["%0.9e" % v for v in group.bulk_velocity()])) - f.write("\t") - f.write("%0.9e\t" % group.maximum_radius()) - f.write("%0.9e\t" % group.rms_velocity()) - if ellipsoid_data: - f.write("\t".join(["%0.9e" % v for v in group._get_ellipsoid_parameters_basic()])) - f.write("\n") - f.flush() - f.close() - - def write_particle_lists_txt(self, prefix, fp=None): - r"""Write out the names of the HDF5 files containing halo particle data - to a text file. Needed in particular for parallel analysis output. - - Parameters - ---------- - prefix : String - The prefix for the name of the file. - - Examples - -------- - >>> halos.write_particle_lists_txt("halo-parts") - """ - if hasattr(fp, 'write'): - f = fp - else: - f = open("%s.txt" % prefix, "w") - for group in self: - if group.tasks is not None: - fn = "" - for task in group.tasks: - fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task) - elif self._distributed: - fn = "%s.h5" % self.comm.get_filename(prefix, - rank=group._owner) - else: - fn = "%s.h5" % self.comm.get_filename(prefix) - gn = "Halo%08i" % (group.id) - f.write("%s %s\n" % (gn, fn)) - f.flush() - f.close() - -class HOPHaloList(HaloList): - """ - Run hop on *data_source* with a given density *threshold*. If - *dm_only* is True (default), only run it on the dark matter particles, otherwise - on all particles. Returns an iterable collection of *HopGroup* items. - """ - _name = "HOP" - _halo_class = HOPHalo - _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \ - ["particle_mass"] - - def __init__(self, data_source, threshold=160.0, dm_only=True, - ptype=None): - self.threshold = threshold - mylog.info("Initializing HOP") - HaloList.__init__(self, data_source, dm_only, ptype=ptype) - - def _run_finder(self): - self.densities, self.tags = \ - RunHOP(self.particle_fields["particle_position_x"] / self.period[0], - self.particle_fields["particle_position_y"] / self.period[1], - self.particle_fields["particle_position_z"] / self.period[2], - self.particle_fields["particle_mass"].in_units('Msun'), - self.threshold) - self.particle_fields["densities"] = self.densities - self.particle_fields["tags"] = self.tags - - def write_out(self, filename="HopAnalysis.out", ellipsoid_data=False): - r"""Write out standard halo information to a text file. - - Parameters - ---------- - filename : String - The name of the file to write to. Default = "HopAnalysis.out". - - ellipsoid_data : bool. - Whether to print the ellipsoidal information to the file. - Default = False. - - Examples - -------- - >>> halos.write_out("HopAnalysis.out") - """ - HaloList.write_out(self, filename, ellipsoid_data) - - -class FOFHaloList(HaloList): - _name = "FOF" - _halo_class = FOFHalo - - def __init__(self, data_source, link=0.2, dm_only=True, redshift=-1, - ptype=None): - self.link = link - mylog.info("Initializing FOF") - HaloList.__init__(self, data_source, dm_only, redshift=redshift, - ptype=ptype) - - def _run_finder(self): - self.tags = \ - RunFOF(self.particle_fields["particle_position_x"] / self.period[0], - self.particle_fields["particle_position_y"] / self.period[1], - self.particle_fields["particle_position_z"] / self.period[2], - self.link) - self.densities = np.ones(self.tags.size, dtype='float64') * -1 - self.particle_fields["densities"] = self.densities - self.particle_fields["tags"] = self.tags - - def write_out(self, filename="FOFAnalysis.out", ellipsoid_data=False): - r"""Write out standard halo information to a text file. - - Parameters - ---------- - filename : String - The name of the file to write to. Default = "FOFAnalysis.out". - - ellipsoid_data : bool. - Whether to print the ellipsoidal information to the file. - Default = False. - - Examples - -------- - >>> halos.write_out("FOFAnalysis.out") - """ - HaloList.write_out(self, filename, ellipsoid_data) - - -class LoadedHaloList(HaloList): - _name = "Loaded" - - def __init__(self, ds, basename): - ParallelAnalysisInterface.__init__(self) - self.ds = ds - self._groups = [] - self.basename = basename - self._retrieve_halos() - - def _retrieve_halos(self): - # First get the halo particulars. - with open("%s.out" % self.basename, 'r') as fh: - lines = fh.readlines() - # The location of particle data for each halo. - locations = self._collect_halo_data_locations() - halo = 0 - for line in lines: - orig = line - # Skip the comment lines at top. - if line[0] == "#": continue - line = line.split() - # get the particle data - size = int(line[2]) - fnames = locations[halo] - # Everything else - CoM = np.array([float(line[7]), float(line[8]), float(line[9])]) - max_dens_point = np.array([float(line[3]), float(line[4]), - float(line[5]), float(line[6])]) - group_total_mass = float(line[1]) - max_radius = float(line[13]) - bulk_vel = np.array([float(line[10]), float(line[11]), - float(line[12])]) - rms_vel = float(line[14]) - if len(line) == 15: - # No ellipsoid information - self._groups.append(LoadedHalo(self.ds, halo, size = size, - CoM = CoM, - max_dens_point = max_dens_point, - group_total_mass = group_total_mass, max_radius = max_radius, - bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames)) - elif len(line) == 22: - # Ellipsoid information - mag_A = float(line[15]) - mag_B = float(line[16]) - mag_C = float(line[17]) - e0_vec0 = float(line[18]) - e0_vec1 = float(line[19]) - e0_vec2 = float(line[20]) - e0_vec = np.array([e0_vec0, e0_vec1, e0_vec2]) - tilt = float(line[21]) - self._groups.append(LoadedHalo(self.ds, halo, size = size, - CoM = CoM, - max_dens_point = max_dens_point, - group_total_mass = group_total_mass, max_radius = max_radius, - bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames, - mag_A = mag_A, mag_B = mag_B, mag_C = mag_C, e0_vec = e0_vec, - tilt = tilt)) - else: - mylog.error("I am unable to parse this line. Too many or too few items. %s" % orig) - halo += 1 - - def _collect_halo_data_locations(self): - # The halos are listed in order in the file. - with open("%s.txt" % self.basename, 'r') as fh: - lines = fh.readlines() - locations = [] - realpath = path.realpath("%s.txt" % self.basename) - for line in lines: - line = line.split() - # Prepend the hdf5 file names with the full path. - temp = [] - for item in line[1:]: - # This assumes that the .txt is in the same place as - # the h5 files, which is a good one I think. - item = item.split("/") - temp.append(path.join(path.dirname(realpath), item[-1])) - locations.append(temp) - return locations - -class TextHaloList(HaloList): - _name = "Text" - - def __init__(self, ds, fname, columns, comment): - ParallelAnalysisInterface.__init__(self) - self.ds = ds - self._groups = [] - self._retrieve_halos(fname, columns, comment) - - def _retrieve_halos(self, fname, columns, comment): - # First get the halo particulars. - with open(fname, 'r') as fh: - lines = fh.readlines() - halo = 0 - base_set = ['x', 'y', 'z', 'r'] - keys = columns.keys() - extra = (len(keys) > 4) - for line in lines: - # Skip commented lines. - if line[0] == comment: continue - line = line.split() - x = float(line[columns['x']]) - y = float(line[columns['y']]) - z = float(line[columns['z']]) - r = float(line[columns['r']]) - cen = np.array([x, y, z]) - # Now we see if there's anything else. - if extra: - temp_dict = {} - for key in columns: - if key not in base_set: - val = float(line[columns[key]]) - temp_dict[key] = val - self._groups.append(TextHalo(self.ds, halo, - CoM = cen, max_radius = r, supp = temp_dict)) - halo += 1 - -class GenericHaloFinder(HaloList, ParallelAnalysisInterface): - def __init__(self, ds, data_source, padding=0.0, ptype=None): - ParallelAnalysisInterface.__init__(self) - self.ds = ds - self.index = ds.index - self.center = (np.array(data_source.right_edge) + - np.array(data_source.left_edge)) / 2.0 - if ptype is None: - ptype = "all" - self.ptype = ptype - - def _parse_halolist(self, threshold_adjustment): - groups = [] - max_dens = {} - hi = 0 - LE, RE = self.bounds - for halo in self._groups: - this_max_dens = halo.maximum_density_location() - # if the most dense particle is in the box, keep it - if np.all((this_max_dens >= LE) & (this_max_dens <= RE)): - # Now we add the halo information to OURSELVES, taken from the - # self.hop_list - # We need to mock up the HOPHaloList thingie, so we need to - # set self._max_dens - max_dens_temp = list(self._max_dens[halo.id])[0] / \ - threshold_adjustment - max_dens[hi] = [max_dens_temp] + \ - list(self._max_dens[halo.id])[1:4] - groups.append(self._halo_class(self, hi, ptype=self.ptype)) - groups[-1].indices = halo.indices - self.comm.claim_object(groups[-1]) - hi += 1 - del self._groups, self._max_dens # explicit >> implicit - self._groups = groups - self._max_dens = max_dens - - def _join_halolists(self): - # First we get the total number of halos the entire collection - # has identified - # Note I have added a new method here to help us get information - # about processors and ownership and so forth. - # _mpi_info_dict returns a dict of {proc: whatever} where whatever is - # what is fed in on each proc. - mine, halo_info = self.comm.mpi_info_dict(len(self)) - nhalos = sum(halo_info.values()) - # Figure out our offset - my_first_id = sum([v for k, v in halo_info.items() if k < mine]) - # Fix our max_dens - max_dens = {} - for i, m in self._max_dens.items(): - max_dens[i + my_first_id] = m - self._max_dens = max_dens - for halo in self._groups: - halo._max_dens = self._max_dens - # sort the list by the size of the groups - # Now we add ghost halos and reassign all the IDs - # Note: we already know which halos we own! - after = my_first_id + len(self._groups) - # One single fake halo, not owned, does the trick - self._groups = [self._halo_class(self, i, ptype=self.ptype) - for i in range(my_first_id)] + \ - self._groups + \ - [self._halo_class(self, i, ptype=self.ptype) - for i in range(after, nhalos)] - id = 0 - for proc in sorted(halo_info.keys()): - for halo in self._groups[id:id + halo_info[proc]]: - halo.id = id - halo._distributed = self._distributed - halo._owner = proc - id += 1 - - def haloCmp(h1, h2): - def cmp(a, b): - return (a > b) ^ (a < b) - c = cmp(h1.total_mass(), h2.total_mass()) - if c != 0: - return -1 * c - if c == 0: - return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0]) - self._groups.sort(key=cmp_to_key(haloCmp)) - sorted_max_dens = {} - for i, halo in enumerate(self._groups): - if halo.id in self._max_dens: - sorted_max_dens[i] = self._max_dens[halo.id] - halo.id = i - self._max_dens = sorted_max_dens - for i, halo in enumerate(self._groups): - halo._max_dens = self._max_dens - - def _reposition_particles(self, bounds): - # This only does periodicity. We do NOT want to deal with anything - # else. The only reason we even do periodicity is the - LE, RE = bounds - dw = self.ds.domain_right_edge - self.ds.domain_left_edge - for i, ax in enumerate('xyz'): - arr = self._data_source[self.ptype, "particle_position_%s" % ax] - arr[arr < LE[i] - self.padding] += dw[i] - arr[arr > RE[i] + self.padding] -= dw[i] - - def write_out(self, filename, ellipsoid_data=False): - r"""Write out standard halo information to a text file. - - Parameters - ---------- - filename : String - The name of the file to write to. - - ellipsoid_data : bool. - Whether to print the ellipsoidal information to the file. - Default = False. - - Examples - -------- - >>> halos.write_out("HopAnalysis.out") - """ - ensure_dir_exists(filename) - f = self.comm.write_on_root(filename) - HaloList.write_out(self, f, ellipsoid_data) - - - def write_particle_lists_txt(self, prefix): - r"""Write out the names of the HDF5 files containing halo particle data - to a text file. - - This function writes out the names of all the HDF5 files that would - contain halo particle data. Only the root processor writes out. - - Parameters - ---------- - prefix : String - The prefix for the name of the file. - - Examples - -------- - >>> halos.write_particle_lists_txt("halo-parts") - """ - ensure_dir_exists(prefix) - f = self.comm.write_on_root("%s.txt" % prefix) - HaloList.write_particle_lists_txt(self, prefix, fp=f) - - - @parallel_blocking_call - def write_particle_lists(self, prefix): - r"""Write out the particle data for halos to HDF5 files. - - This function will accept a filename prefix, and for every halo it will - write out an HDF5 file containing the positions, velocities, indices - and masses of the constituent particles. However, if the halo finder - is run in parallel, halos will only be written out on the processors to - which they belong. See `Halo.write_particle_lists_txt` for how to - track these halos globally across files. - - Parameters - ---------- - prefix : String - The prefix for the name(s) of the HDF5 files. - - Examples - -------- - >>> halos.write_particle_lists("halo-parts") - """ - ensure_dir_exists(prefix) - fn = "%s.h5" % self.comm.get_filename(prefix) - f = h5py.File(fn, mode="w") - for halo in self._groups: - if not self.comm.is_mine(halo): continue - halo.write_particle_list(f) - f.close() - - def dump(self, basename="HopAnalysis", ellipsoid_data=False): - r"""Save the full halo data to disk. - - This function will save the halo data in such a manner that it can be - easily re-loaded later using `GenericHaloFinder.load`. - This is similar in concept to - pickling the data, but outputs the data in the already-established - data formats. The simple halo data is written to a text file - (e.g. "HopAnalysis.out") using write_out(), and the particle data - to hdf5 files (e.g. "HopAnalysis.h5") - using write_particle_lists(). - - Parameters - ---------- - basename : String - The base name for the files the data will be written to. Default = - "HopAnalysis". - - ellipsoid_data : bool. - Whether to save the ellipsoidal information to the files. - Default = False. - - Examples - -------- - >>> halos.dump("MyHalos") - """ - ensure_dir_exists(basename) - self.write_out("%s.out" % basename, ellipsoid_data) - self.write_particle_lists(basename) - self.write_particle_lists_txt(basename) - -class HOPHaloFinder(GenericHaloFinder, HOPHaloList): - r"""HOP halo finder. - - Halos are built by: - 1. Calculating a density for each particle based on a smoothing kernel. - 2. Recursively linking particles to other particles from lower density - particles to higher. - 3. Geometrically proximate chains are identified and - 4. merged into final halos following merging rules. - - Lower thresholds generally produce more halos, and the largest halos - become larger. Also, halos become more filamentary and over-connected. - - Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body - Simulations." ApJ (1998) vol. 498 pp. 137-142 - - Parameters - ---------- - ds : `Dataset` - The dataset on which halo finding will be conducted. - subvolume : `yt.data_objects.data_containers.YTSelectionContainer`, optional - A region over which HOP will be run, which can be used to run HOP - on a subvolume of the full volume. Default = None, which defaults - to the full volume automatically. - threshold : float - The density threshold used when building halos. Default = 160.0. - dm_only : bool (deprecated) - If True, only dark matter particles are used when building halos. - This has been deprecated. Instead, the ptype keyword should be - used to specify a particle type. - Default = True. - ptype : string - When dm_only is set to False, this sets the type of particle to be - used for halo finding, with a default of "all". This should not be - used when dm_only is set to True. - padding : float - When run in parallel, the finder needs to surround each subvolume - with duplicated particles for halo finding to work. This number - must be no smaller than the radius of the largest halo in the box - in code units. Default = 0.02. - total_mass : float - If HOP is run on the same dataset mulitple times, the total mass - of particles in Msun units in the full volume can be supplied here - to save time. - This must correspond to the particles being operated on, meaning - if stars are included in the halo finding, they must be included - in this mass as well, and visa-versa. - If halo finding on a subvolume, this still corresponds with the - mass in the entire volume. - Default = None, which means the total mass is automatically - calculated. - - Examples - -------- - >>> ds = load("RedshiftOutput0000") - >>> halos = HaloFinder(ds) - """ - def __init__(self, ds, subvolume=None, threshold=160, dm_only=True, - ptype=None, padding=0.02, total_mass=None): - if subvolume is not None: - ds_LE = np.array(subvolume.left_edge) - ds_RE = np.array(subvolume.right_edge) - self.period = ds.domain_right_edge - ds.domain_left_edge - self._data_source = ds.all_data() - GenericHaloFinder.__init__(self, ds, self._data_source, padding, - ptype=ptype) - # do it once with no padding so the total_mass is correct - # (no duplicated particles), and on the entire volume, even if only - # a small part is actually going to be used. - self.padding = 0.0 - padded, LE, RE, self._data_source = \ - self.partition_index_3d(ds=self._data_source, - padding=self.padding) - - if dm_only: - mylog.warn("dm_only is deprecated. " + - "Use ptype to specify a particle type, instead.") - - # Don't allow dm_only=True and setting a ptype. - if dm_only and ptype is not None: - raise RuntimeError( - "If dm_only is True, ptype must be None. " + \ - "dm_only must be False if ptype is set.") - - if ptype is None: - ptype = "all" - self.ptype = ptype - - # For scaling the threshold, note that it's a passthrough - if total_mass is None: - if dm_only: - select = self._get_dm_indices() - total_mass = \ - self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum') - else: - total_mass = self.comm.mpi_allreduce( - self._data_source.quantities.total_quantity( - (self.ptype, "particle_mass")).in_units('Msun'), op='sum') - # MJT: Note that instead of this, if we are assuming that the particles - # are all on different processors, we should instead construct an - # object representing the entire domain and sum it "lazily" with - # Derived Quantities. - if subvolume is not None: - self._data_source = ds.region([0.] * 3, ds_LE, ds_RE) - else: - self._data_source = ds.all_data() - self.padding = padding # * ds["unitary"] # This should be clevererer - padded, LE, RE, self._data_source = \ - self.partition_index_3d(ds=self._data_source, - padding=self.padding) - self.bounds = (LE, RE) - # sub_mass can be skipped if subvolume is not used and this is not - # parallel. - if subvolume is None and \ - ytcfg.getint("yt", "__topcomm_parallel_size") == 1: - sub_mass = total_mass - elif dm_only: - select = self._get_dm_indices() - sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64') - else: - sub_mass = \ - self._data_source.quantities.total_quantity( - (self.ptype, "particle_mass")).in_units('Msun') - HOPHaloList.__init__(self, self._data_source, - threshold * total_mass / sub_mass, dm_only, ptype=self.ptype) - self._parse_halolist(total_mass / sub_mass) - - -class FOFHaloFinder(GenericHaloFinder, FOFHaloList): - r"""Friends-of-friends halo finder. - - Halos are found by linking together all pairs of particles closer than - some distance from each other. Particles may have multiple links, - and halos are found by recursively linking together all such pairs. - - Larger linking lengths produce more halos, and the largest halos - become larger. Also, halos become more filamentary and over-connected. - - Davis et al. "The evolution of large-scale structure in a universe - dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394 - - Parameters - ---------- - ds : `Dataset` - The dataset on which halo finding will be conducted. - subvolume : `yt.data_objects.data_containers.YTSelectionContainer`, optional - A region over which HOP will be run, which can be used to run HOP - on a subvolume of the full volume. Default = None, which defaults - to the full volume automatically. - link : float - If positive, the interparticle distance (compared to the overall - average) used to build the halos. If negative, this is taken to be - the *actual* linking length, and no other calculations will be - applied. Default = 0.2. - dm_only : bool (deprecated) - If True, only dark matter particles are used when building halos. - This has been deprecated. Instead, the ptype keyword should be - used to specify a particle type. - Default = True. - ptype : string - When dm_only is set to False, this sets the type of particle to be - used for halo finding, with a default of "all". This should not be - used when dm_only is set to True. - padding : float - When run in parallel, the finder needs to surround each subvolume - with duplicated particles for halo finding to work. This number - must be no smaller than the radius of the largest halo in the box - in code units. Default = 0.02. - - Examples - -------- - >>> ds = load("RedshiftOutput0000") - >>> halos = FOFHaloFinder(ds) - """ - def __init__(self, ds, subvolume=None, link=0.2, dm_only=True, - ptype=None, padding=0.02): - if subvolume is not None: - ds_LE = np.array(subvolume.left_edge) - ds_RE = np.array(subvolume.right_edge) - self.period = ds.domain_right_edge - ds.domain_left_edge - self.ds = ds - self.index = ds.index - self.redshift = ds.current_redshift - self._data_source = ds.all_data() - GenericHaloFinder.__init__(self, ds, self._data_source, padding) - self.padding = 0.0 # * ds["unitary"] # This should be clevererer - # get the total number of particles across all procs, with no padding - padded, LE, RE, self._data_source = \ - self.partition_index_3d(ds=self._data_source, - padding=self.padding) - - if dm_only: - mylog.warn("dm_only is deprecated. " + - "Use ptype to specify a particle type, instead.") - - # Don't allow dm_only=True and setting a ptype. - if dm_only and ptype is not None: - raise RuntimeError( - "If dm_only is True, ptype must be None. " + \ - "dm_only must be False if ptype is set.") - - if ptype is None: - ptype = "all" - self.ptype = ptype - - if link > 0.0: - n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum') - # get the average spacing between particles - #l = ds.domain_right_edge - ds.domain_left_edge - #vol = l[0] * l[1] * l[2] - # Because we are now allowing for datasets with non 1-periodicity, - # but symmetric, vol is always 1. - vol = 1. - avg_spacing = (float(vol) / n_parts) ** (1. / 3.) - linking_length = link * avg_spacing - else: - linking_length = np.abs(link) - self.padding = padding - if subvolume is not None: - self._data_source = ds.region([0.] * 3, ds_LE, - ds_RE) - else: - self._data_source = ds.all_data() - padded, LE, RE, self._data_source = \ - self.partition_index_3d(ds=self._data_source, - padding=self.padding) - self.bounds = (LE, RE) - # reflect particles around the periodic boundary - #self._reposition_particles((LE, RE)) - # here is where the FOF halo finder is run - mylog.info("Using a linking length of %0.3e", linking_length) - FOFHaloList.__init__(self, self._data_source, linking_length, dm_only, - redshift=self.redshift, ptype=self.ptype) - self._parse_halolist(1.) - -HaloFinder = HOPHaloFinder - - -class LoadHaloes(GenericHaloFinder, LoadedHaloList): - r"""Load the full halo data into memory. - - This function takes the output of `GenericHaloFinder.dump` and - re-establishes the list of halos in memory. This enables the full set - of halo analysis features without running the halo finder again. To - be precise, the particle data for each halo is only read in when - necessary, so examining a single halo will not require as much memory - as is required for halo finding. - - Parameters - ---------- - basename : String - The base name of the files that will be read in. This should match - what was used when `GenericHaloFinder.dump` was called. Default = - "HopAnalysis". - - Examples - -------- - >>> ds = load("data0005") - >>> halos = LoadHaloes(ds, "HopAnalysis") - """ - def __init__(self, ds, basename): - self.basename = basename - LoadedHaloList.__init__(self, ds, self.basename) - -class LoadTextHaloes(GenericHaloFinder, TextHaloList): - r"""Load a text file of halos. - - Like LoadHaloes, but when all that is available is a plain - text file. This assumes the text file has the 3-positions of halos - along with a radius. The halo objects created are spheres. - - Parameters - ---------- - fname : String - The name of the text file to read in. - - columns : dict - A dict listing the column name : column number pairs for data - in the text file. It is zero-based (like Python). - An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}. - Any column name outside of ['x', 'y', 'z', 'r'] will be attached - to each halo object in the supplementary dict 'supp'. See - example. - - comment : String - If the first character of a line is equal to this, the line is - skipped. Default = "#". - - Examples - -------- - >>> ds = load("data0005") - >>> halos = LoadTextHaloes(ds, "list.txt", - {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}, - comment = ";") - >>> halos[0].supp['m'] - 3.28392048e14 - """ - def __init__(self, ds, filename, columns, comment = "#"): - TextHaloList.__init__(self, ds, filename, columns, comment) - -LoadTextHalos = LoadTextHaloes diff --git a/yt/analysis_modules/halo_finding/hop/EnzoHop.c b/yt/analysis_modules/halo_finding/hop/EnzoHop.c deleted file mode 100644 index 34323b3ed3b..00000000000 --- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c +++ /dev/null @@ -1,472 +0,0 @@ -/******************************************************************************* -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -*******************************************************************************/ - -// -// EnzoHop -// A module for running HOP halo finding on a set of particles -// - -#include "Python.h" -#include "structmember.h" -#include -#include -#include -#include -#include "kd.h" -#include "hop.h" -#include "hop_numpy.h" - -#include "numpy/ndarrayobject.h" - -#ifndef Py_TYPE - #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) -#endif - -void PrepareKD(KD kd); -int kdMedianJst(KD kd, int d, int l, int u); -void kdUpPass(KD kd, int iCell); -void initgrouplist(Grouplist *g); -void hop_main(KD kd, HC *my_comm, float densthresh); -void regroup_main(float dens_outer, HC *my_comm); -static PyObject *_HOPerror; - -int convert_particle_arrays( - PyObject *oxpos, PyObject *oypos, PyObject *ozpos, PyObject *omass, - PyArrayObject **xpos, PyArrayObject **ypos, PyArrayObject **zpos, - PyArrayObject **mass) -{ - int num_particles; - - /* First the regular source arrays */ - - *xpos = (PyArrayObject *) PyArray_FromAny(oxpos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if(!*xpos){ - PyErr_Format(_HOPerror, - "EnzoHop: xpos didn't work."); - return -1; - } - num_particles = PyArray_SIZE(*xpos); - - *ypos = (PyArrayObject *) PyArray_FromAny(oypos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if((!*ypos)||(PyArray_SIZE(*ypos) != num_particles)) { - PyErr_Format(_HOPerror, - "EnzoHop: xpos and ypos must be the same length."); - return -1; - } - - *zpos = (PyArrayObject *) PyArray_FromAny(ozpos, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if((!*zpos)||(PyArray_SIZE(*zpos) != num_particles)) { - PyErr_Format(_HOPerror, - "EnzoHop: xpos and zpos must be the same length."); - return -1; - } - - *mass = (PyArrayObject *) PyArray_FromAny(omass, - PyArray_DescrFromType(NPY_FLOAT64), 1, 1, - NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL); - if((!*mass)||(PyArray_SIZE(*mass) != num_particles)) { - PyErr_Format(_HOPerror, - "EnzoHop: xpos and mass must be the same length."); - return -1; - } - - return num_particles; - -} - - -static PyObject * -Py_EnzoHop(PyObject *obj, PyObject *args) -{ - PyObject *oxpos, *oypos, *ozpos, - *omass; - - PyArrayObject *xpos, *ypos, *zpos, - *mass; - npy_float64 totalmass = 0.0; - float normalize_to = 1.0; - float thresh = 160.0; - int i, num_particles; - KD kd; - int nBucket = 16, kdcount = 0; - PyArrayObject *particle_density; - HC my_comm; - PyArrayObject *particle_group_id; - PyObject *return_value; - - xpos=ypos=zpos=mass=NULL; - - if (!PyArg_ParseTuple(args, "OOOO|ff", - &oxpos, &oypos, &ozpos, &omass, &thresh, &normalize_to)) - return PyErr_Format(_HOPerror, - "EnzoHop: Invalid parameters."); - - num_particles = convert_particle_arrays( - oxpos, oypos, ozpos, omass, - &xpos, &ypos, &zpos, &mass); - if (num_particles < 0) goto _fail; - - for(i = 0; i < num_particles; i++) - totalmass+=*(npy_float64*)PyArray_GETPTR1(mass,i); - totalmass /= normalize_to; - - /* initialize the kd hop structure */ - - kdInit(&kd, nBucket); - kd->nActive = num_particles; - kd->p = malloc(sizeof(PARTICLE)*num_particles); - if (kd->p == NULL) { - fprintf(stderr, "failed allocating particles.\n"); - goto _fail; - } - - /* Copy positions into kd structure. */ - particle_density = (PyArrayObject *) - PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos), - PyArray_DescrFromType(NPY_FLOAT64)); - - fprintf(stdout, "Copying arrays for %d particles\n", num_particles); - kd->np_masses = (npy_float64*) PyArray_DATA(mass); - kd->np_pos[0] = (npy_float64*) PyArray_DATA(xpos); - kd->np_pos[1] = (npy_float64*) PyArray_DATA(ypos); - kd->np_pos[2] = (npy_float64*) PyArray_DATA(zpos); - kd->np_densities = (npy_float64*) PyArray_DATA(particle_density); - kd->totalmass = totalmass; - for (i = 0; i < num_particles; i++) kd->p[i].np_index = i; - - my_comm.s = newslice(); - my_comm.gl = (Grouplist*)malloc(sizeof(Grouplist)); - if(my_comm.gl == NULL) { - fprintf(stderr, "failed allocating Grouplist\n"); - goto _fail; - } - initgrouplist(my_comm.gl); - - fprintf(stderr, "Calling hop... %d %0.3e\n",num_particles,thresh); - hop_main(kd, &my_comm, thresh); - - fprintf(stderr, "Calling regroup...\n"); - regroup_main(thresh, &my_comm); - - // Now we need to get the groupID, realID and the density. - // This will give us the index into the original array. - // Additionally, note that we don't really need to tie the index - // back to the ID in this code, as we can do that back in the python code. - // All we need to do is provide density and group information. - - // Tags (as per writetagsf77) are in gl.s->ntag+1 and there are gl.s->numlist of them. - particle_group_id = (PyArrayObject *) - PyArray_SimpleNewFromDescr(1, PyArray_DIMS(xpos), - PyArray_DescrFromType(NPY_INT32)); - - for (i = 0; i < num_particles; i++) { - // tag is in gl.s->ntag[i+1] - *(npy_int32*)(PyArray_GETPTR1(particle_group_id, i)) = - (npy_int32) my_comm.s->ntag[i+1]; - } - - kdFinish(kd); - free(my_comm.gl); - free_slice(my_comm.s); - - PyArray_UpdateFlags(particle_density, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_density)); - PyArray_UpdateFlags(particle_group_id, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id)); - return_value = Py_BuildValue("NN", particle_density, particle_group_id); - - Py_DECREF(xpos); - Py_DECREF(ypos); - Py_DECREF(zpos); - Py_DECREF(mass); - - /* We don't need this, as it's done in kdFinish - if(kd->p!=NULL)free(kd->p); - */ - - return return_value; - -_fail: - Py_XDECREF(xpos); - Py_XDECREF(ypos); - Py_XDECREF(zpos); - Py_XDECREF(mass); - - if(kd->p!=NULL)free(kd->p); - - return NULL; - -} - -static PyMethodDef _HOPMethods[] = { - {"RunHOP", Py_EnzoHop, METH_VARARGS}, - {NULL, NULL} /* Sentinel */ -}; - -/* platform independent*/ -#ifdef MS_WIN32 -__declspec(dllexport) -#endif - -// -// Now a fun wrapper class for the kD-tree -// - -typedef struct { - PyObject_HEAD - KD kd; - PyArrayObject *xpos, *ypos, *zpos; - PyArrayObject *mass, *densities; - int num_particles; -} kDTreeType; - -static int -kDTreeType_init(kDTreeType *self, PyObject *args, PyObject *kwds) -{ - int nBuckets = 16, i; - float normalize_to = 1.0; - static char *kwlist[] = {"xpos", "ypos", "zpos", "mass", - "nbuckets", "norm", NULL}; - PyObject *oxpos, *oypos, *ozpos, - *omass; - npy_float64 totalmass = 0.0; - - self->xpos=self->ypos=self->zpos=self->mass=NULL; - - - if (! PyArg_ParseTupleAndKeywords(args, kwds, "OOOO|if", kwlist, - &oxpos, &oypos, &ozpos, &omass, - &nBuckets, &normalize_to)) - return -1; /* Should this give an error? */ - - kdInit(&self->kd, nBuckets); - - self->num_particles = convert_particle_arrays( - oxpos, oypos, ozpos, omass, - &self->xpos, &self->ypos, &self->zpos, &self->mass); - - self->kd->nActive = self->num_particles; - self->kd->p = malloc(sizeof(PARTICLE)*self->num_particles); - if (self->kd->p == NULL) { - fprintf(stderr, "failed allocating particles.\n"); - goto _fail; - } - - /* Now we set up our Density array */ - self->densities = (PyArrayObject *) - PyArray_SimpleNewFromDescr(1, PyArray_DIMS(self->xpos), - PyArray_DescrFromType(NPY_FLOAT64)); - - for(i= 0; i < self->num_particles; i++) { - self->kd->p[i].np_index = i; - *(npy_float64*)(PyArray_GETPTR1(self->densities, i)) = 0.0; - totalmass+=*(npy_float64*)PyArray_GETPTR1(self->mass,i); - } - totalmass /= normalize_to; - - - self->kd->np_masses = (npy_float64 *)PyArray_DATA(self->mass); - self->kd->np_pos[0] = (npy_float64 *)PyArray_DATA(self->xpos); - self->kd->np_pos[1] = (npy_float64 *)PyArray_DATA(self->ypos); - self->kd->np_pos[2] = (npy_float64 *)PyArray_DATA(self->zpos); - self->kd->np_densities = (npy_float64 *)PyArray_DATA(self->densities); - self->kd->totalmass = totalmass; - - PrepareKD(self->kd); - kdBuildTree(self->kd); - - return 0; - - _fail: - Py_XDECREF(self->xpos); - Py_XDECREF(self->ypos); - Py_XDECREF(self->zpos); - Py_XDECREF(self->mass); - - if(self->kd->p!=NULL)free(self->kd->p); - - return -1; -} - -static void -kDTreeType_dealloc(kDTreeType *self) -{ - kdFinish(self->kd); - Py_XDECREF(self->xpos); - Py_XDECREF(self->ypos); - Py_XDECREF(self->zpos); - Py_XDECREF(self->mass); - - Py_TYPE(self)->tp_free((PyObject*)self); -} - -static PyObject * -kDTreeType_up_pass(kDTreeType *self, PyObject *args) { - int iCell; - - if (!PyArg_ParseTuple(args, "i", &iCell)) - return PyErr_Format(_HOPerror, - "kDTree.up_pass: invalid parameters."); - - if(iCell >= self->num_particles) - return PyErr_Format(_HOPerror, - "kDTree.up_pass: iCell cannot be >= num_particles!"); - - kdUpPass(self->kd, iCell); - return Py_None; -} - -static PyObject * -kDTreeType_median_jst(kDTreeType *self, PyObject *args) { - int d, l, u, median; - PyObject *omedian; - - if (!PyArg_ParseTuple(args, "iii", &d, &l, &u)) - return PyErr_Format(_HOPerror, - "kDTree.median_jst: invalid parameters."); - - if(d >= 3) - return PyErr_Format(_HOPerror, - "kDTree.median_jst: d cannot be >= 3!"); - - if(l >= self->num_particles) - return PyErr_Format(_HOPerror, - "kDTree.median_jst: l cannot be >= num_particles!"); - - if(u >= self->num_particles) - return PyErr_Format(_HOPerror, - "kDTree.median_jst: u cannot be >= num_particles!"); - - median = kdMedianJst(self->kd, d, l, u); - - omedian = PyLong_FromLong((long)median); - return omedian; -} - -static PyMemberDef kDTreeType_members[] = { - { "xpos", T_OBJECT, offsetof(kDTreeType, xpos), 0, - "The xposition array."}, - { "ypos", T_OBJECT, offsetof(kDTreeType, ypos), 0, - "The yposition array."}, - { "zpos", T_OBJECT, offsetof(kDTreeType, zpos), 0, - "The zposition array."}, - { "mass", T_OBJECT, offsetof(kDTreeType, mass), 0, - "The mass array."}, - { "densities", T_OBJECT, offsetof(kDTreeType, densities), 0, - "The density array."}, - { "num_particles", T_INT, offsetof(kDTreeType, num_particles), 0, - "The number of particles"}, - { NULL } -}; - -static PyMethodDef -kDTreeType_methods[] = { - { "up_pass", (PyCFunction) kDTreeType_up_pass, METH_VARARGS, - "Pass up something or another, I'm not really sure."}, - { "median_jst", (PyCFunction) kDTreeType_median_jst, METH_VARARGS, - "Use the JST Median algorithm on two points along a dimension."}, - // typically there would be more here... - - { NULL } -}; - -static PyTypeObject -kDTreeTypeDict = { - PyVarObject_HEAD_INIT(NULL, 0) - /* ob_size */ - "kDTree", /* tp_name */ - sizeof(kDTreeType), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)kDTreeType_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags*/ - "kDTree object", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - kDTreeType_methods, /* tp_methods */ - kDTreeType_members, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)kDTreeType_init, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ -}; - -PyMODINIT_FUNC -#if PY_MAJOR_VERSION >= 3 -#define _RETVAL m -PyInit_EnzoHop(void) -#else -#define _RETVAL -initEnzoHop(void) -#endif -{ - PyObject *m, *d; -#if PY_MAJOR_VERSION >= 3 - static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "EnzoHop", /* m_name */ - "EnzoHop Module", /* m_doc */ - -1, /* m_size */ - _HOPMethods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL, /* m_free */ - }; - m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("EnzoHop", _HOPMethods); -#endif - d = PyModule_GetDict(m); - _HOPerror = PyErr_NewException("EnzoHop.HOPerror", NULL, NULL); - PyDict_SetItemString(d, "error", _HOPerror); - - kDTreeTypeDict.tp_new = PyType_GenericNew; - if (PyType_Ready(&kDTreeTypeDict) < 0) { - return _RETVAL; - } - - Py_INCREF(&kDTreeTypeDict); - PyModule_AddObject(m, "kDTree", (PyObject*)&kDTreeTypeDict); - - import_array(); - return _RETVAL; -} - -/* - * Local Variables: - * mode: C - * c-file-style: "python" - * End: - */ diff --git a/yt/analysis_modules/halo_finding/hop/README b/yt/analysis_modules/halo_finding/hop/README deleted file mode 100644 index 31d35486a23..00000000000 --- a/yt/analysis_modules/halo_finding/hop/README +++ /dev/null @@ -1,22 +0,0 @@ -Matthew Turk -May 2008 - -This code (described below) has been modified to be wrapped as a shared library -callable from Python, as a part of the yt toolkit. - -Stephen Skory -May/June 2007 - -This is a new implementation of hop for enzo datasets, to replace the -fragile 'enzohop.' enzohop uses the enzo grid functionality to extract -the particle data from the HDF5 datasets. newhop uses plain HDF5 C++ calls -to extract the data, which is then fed into the hop mechanism. As far as I -know, this version is fine with 64 bit integers/floats, which enzohop isn't. - -There are a few versions of newhop which build on datastar just fine. I -haven't tested it on other machines. The default build 'newhop' is for -packed datasets and will include both stars and dm in the grouping. -THe other versions are for non-packed datasets, or if you want to only -consider dm particles for grouping. Hop doesn't like datasets with too -many particles, (I've never done tests, but I know that 20 million -particles give hop problems) so sometimes dm-only is the only way to go. diff --git a/yt/analysis_modules/halo_finding/hop/__init__.py b/yt/analysis_modules/halo_finding/hop/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/halo_finding/hop/hop.h b/yt/analysis_modules/halo_finding/hop/hop.h deleted file mode 100644 index e13ed72c89b..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop.h +++ /dev/null @@ -1,36 +0,0 @@ -#include "slice.h" -//#define free(A) if(A==NULL)fprintf(stderr,"FREEING DOUBLE\n");fprintf(stderr,"Freeing "#A" ("__FILE__":%d)\n",__LINE__);free(A); - -/* ----------------------------------------------------------------------- */ -/* The following structures track all the information about the groups */ - -typedef struct groupstruct { - int npart; /* Number of particles in the group */ - int npartcum; /* Cumulative number of particles */ - int nread; /* Number read so far, also a utility field */ - double compos[3], comvel[3];/* Lists of group CoM position and velocities */ - double comtemp[3]; /* Temporary CoM position */ - int idmerge; /* The new group ID # after merging. Not necessarily - unique! */ - int rootgroup; /* The fully traced group id */ -} Group; /* Type Group is defined */ - -typedef struct groupliststruct { - int npart; /* Number of particles in the simulation */ - int ngroups; /* Number of groups in list */ - int nnewgroups; /* Number of groups after relabeling */ - int npartingroups; /* Number of particles in groups */ - Group *list; /* List of groups, zero-offset */ -} Grouplist; /* Type Grouplist is defined */ - - -typedef struct hopComm { - int ngroups; - int nb; - float *gdensity; - float *g1vec; - float *g2vec; - float *fdensity; - Grouplist *gl; - Slice *s; -} HC; diff --git a/yt/analysis_modules/halo_finding/hop/hop_hop.c b/yt/analysis_modules/halo_finding/hop/hop_hop.c deleted file mode 100644 index 4c990ffaf42..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop_hop.c +++ /dev/null @@ -1,943 +0,0 @@ -/* HOP.C -- Daniel Eisenstein, 1997 -Based on a paper by Daniel Eisenstein & Piet Hut, -"HOP: A New Group-Finding Algorithm for N-body Simulations." -See the included documentation or view it at -http://www.sns.ias.edu/~eisenste/hop/hop_doc.html */ - -/* main() was customized from that of SMOOTH v2.0.1, by Joachim Stadel - and the NASA HPCC ESS at the University of Washington Dept. of Astronomy. */ -/* PrepareKD() is a code fragment from the same program. */ -/* ssort() is a C-translation of the Slatec FORTRAN routine of - R.E. Jones and J.A. Wisniewski (SNLA) */ -/* The other routines were written by DJE. */ - -/* Version 1.0 (12/15/97) -- Original Release */ - -#include -#include -#ifdef _WIN32 -#define _USE_MATH_DEFINES -#endif -#include -#include -#include -#include -#include "kd.h" -#include "hop.h" -#include "smooth.h" -#include "hop_numpy.h" - -//#include "macros_and_parameters.h" -#define ISYM "d" -#define GSYM "g" -#define FSYM "f" - -/* To give info to the user: INFORM("info"); */ -#define INFORM(string) printf(string); fflush(stdout) - -int SnapNumber; - -int ReadSimulationFile(KD, FILE *); - -void smDensityTH(SMX smx,int pi,int nSmooth,int *pList,float *fList); - -void smHop(SMX smx,int pi,int nSmooth,int *pList,float *fList); -void FindGroups(SMX smx); -void SortGroups(SMX smx); - -void MergeGroupsHash(SMX smx); -void smMergeHash(SMX smx,int pi,int nSmooth,int *pList,float *fList); -void ReSizeSMX(SMX smx, int nSmooth); - -void PrepareKD(KD kd); -void binOutHop(SMX smx, HC *my_comm, float densthresh); -void outGroupMerge(SMX smx, HC *my_comm); - -/* void main(int argc,char **argv) */ -void hop_main(KD kd, HC *my_comm, float densthresh) -{ - /* KD kd; */ - SMX smx; - int nBucket,nSmooth,i,j; - FILE *fp, *fpb; - char ach[80],achFile[80], *inputfile, *densfile; - float fPeriod[3]; - int bDensity,bGroup,bSym,bMerge,nDens,nHop,nMerge,bTopHat; - float fDensThresh; - - nBucket = 16; - nSmooth = 64; - nDens = 64; - nHop = -1; -/* fDensThresh = 3.0; */ - fDensThresh = -1.0; - bDensity = 3; - bGroup = 3; - bMerge = 3; - bSym = 1; - bTopHat = 0; - strcpy(achFile,"output_hop"); - inputfile = NULL; - i = 1; -/* for (j=0;j<3;++j) fPeriod[j] = HUGE; */ - for (j=0;j<3;++j) fPeriod[j] = 1.0; - nMerge = 4; - - - if (nHop<0) nHop=nDens; - if (bDensity==0) nSmooth = nHop+1; - else nSmooth = nDens+1; - /* When smSmooth() is asked for nSmooth particles, it seems to - generally return nSmooth-1 particles, including primary itself. - Hence, when we want nDens or nSmooth particles (including the - primary) we should ask for one more. By convention, I've chosen - to have nMerge reflect the number *not* including the primary, - so in this case we need to ask for two more! */ - - assert(!bMerge || nMergenHop = nHop; - smx->nDens = nDens; - smx->nMerge = nMerge; - smx->nGroups = 0; - smx->fDensThresh = fDensThresh; - - INFORM("Building Tree...\n"); - kdBuildTree(kd); - - if (bDensity) { - INFORM("Finding Densities...\n"); - if (bTopHat) smSmooth(smx,smDensityTH); - else if (bSym) smSmooth(smx,smDensitySym); - else smSmooth(smx,smDensity); - } /* Else, we've read them */ - if (bGroup) { - INFORM("Finding Densest Neighbors...\n"); - if (bDensity && nHop=nSmooth) { - nSmooth = nHop+1; - ReSizeSMX(smx,nSmooth); - } - smSmooth(smx,smHop); - } - } - - INFORM("Grouping...\n"); - if (bGroup) FindGroups(smx); - if (bGroup) SortGroups(smx); - - if (bMerge) { - INFORM("Merging Groups...\n"); - MergeGroupsHash(smx); - } - - kdOrder(kd); - INFORM("Writing Output...\n"); - - if (bMerge&2) { - smx->nSmooth=nSmooth; /* Restore this for output */ - outGroupMerge(smx, my_comm); - } - if (bMerge) free(smx->hash); - - if (bGroup&2) { - binOutHop(smx, my_comm, densthresh); - } - if (bGroup) {free(smx->densestingroup); free(smx->nmembers);} - smFinish(smx); - //kdFinish(kd); - INFORM("All Done!"); - return; -} - - -/* ============================================================= */ -/* ===================== New Density Routine =================== */ -/* ============================================================= */ - -void smDensityTH(SMX smx,int pi,int nSmooth,int *pList,float *fList) -/* Find density only using top-hat kernal. */ -{ -#ifdef DIFFERENT_MASSES - int j; - float totalmass; - for (j=0,totalmass=0.0; jkd, pList[j]); - NP_DENS(smx->kd, pi) = totalmass*0.75*M_1_PI/ - smx->pfBall2[pi]/sqrt(smx->pfBall2[pi]); -#else - /* This case is simple: the total mass is nSmooth times the mass - per particle */ - NP_DENS(smx->kd, pi) = (nSmooth)*smx->kd->fMass*0.75*M_1_PI/ - smx->pfBall2[pi]/sqrt(smx->pfBall2[pi]); -#endif - return; -} - -/* ============================================================= */ -/* ================== Hop to Neighbors/Form Groups ============= */ -/* ============================================================= */ - -void smHop(SMX smx,int pi,int nSmooth,int *pList,float *fList) -/* Look at the nHop nearest particles and find the one with the -highest density. Store its ID number in iHop as -1-ID (to make it negative) */ -/* nSmooth tends to be the expected value (smx->nSmooth-1) but can vary plus -or minus 1 (at least). */ -/* If Merging is turned off, nMerge should be huge so as to avoid extra -sorting below */ -{ - int i,max, search, didsort; - float maxden; - void ssort(float X[], int Y[], int N, int KFLAG); - - /* If the density is less than the threshold requirement, then assign 0 */ - if (NP_DENS(smx->kd, pi)fDensThresh) { - smx->kd->p[pi].iHop = 0; - return; - } - - /* If we have exactly the right number, then it doesn't matter how - we search them. Otherwise, we need to sort first. */ - /* We can destroy pList and fList if we want. fList holds the square radii*/ - - search = smx->nHop; - if (smx->nHop>nSmooth) search = nSmooth; /* Very rare exception */ - - if (smx->nHopnMerge+2kd, pList[i])>maxden) { - max = i; - maxden = NP_DENS(smx->kd, pList[i]); - } - } - smx->kd->p[pi].iHop = -1-pList[max]; - - /* check to see if the particle we link to doesn't link back - to ourselves, pi. If it does, connect this particle (pi) to itself. - This can only happen if pList[max] < pi*/ - if (pList[max] < pi) { - if (smx->kd->p[pList[max]].iHop == -1-pi) { - smx->kd->p[pi].iHop = -1-pi; - } - } - - /* If a sort was done, then we can save time in the Merge step by - recording the new Ball radius. */ - /* Place the new radius in between the two boundary because occasionally - the floating point math comes out strange when comparing two floats */ - if (didsort && smx->nMerge+2pfBall2[pi] = 0.5*(fList[smx->nMerge+1]+fList[smx->nMerge]); - return; -} - -/* ----------------------------------------------------------------- */ - -void FindGroups(SMX smx) -/* Number the maxima. Trace each particle uphill to a maximum. */ -/* The local maxima were stored as in iHop as -1-ID (negative numbers); -now we will store group number as positive numbers (1..n) in the same spot */ -/* Zero is left as an error condition */ -/* The particles MUST be left in the BuildTree order, for that is how the -iHop tracing is done */ -/* Allocate space for densestingroup, from 0 to nGroups -(inclusive) and store the particle number of the maxima, which is the -densest particle in the group. Ditto for nmembers[], the number of -particles in the group */ -{ - int j, ng, current, localmax; - PARTICLE *p; - - smx->nGroups = 0; - /* First look for maxima, where particle ID = iHop. Number the groups */ - for (j=0, p=smx->kd->p;jkd->nActive;j++,p++) - if (p->iHop == -1-j) { /* Was p->iOrder */ - /* Yes, it's a maximum */ - smx->nGroups++; - /* p->iHop = smx->nGroups; */ - } - - /* Now catalog the maxima, before numbering the groups */ - smx->densestingroup = (int *)malloc((size_t)((smx->nGroups+1)*sizeof(int))); - assert(smx->densestingroup!=NULL); - smx->nmembers = (int *)malloc((size_t)(sizeof(int)*(smx->nGroups+1))); - assert(smx->nmembers!=NULL); - - ng = 0; - for (j=0,p=smx->kd->p;jkd->nActive;j++,p++) - if (p->iHop== -1-j) { - /* It's a maximum */ - ng++; - smx->densestingroup[ng] = p->iOrder; - p->iHop = ng; - } - - /* Now take the remaining particles and trace up to a maximum */ - for (j=0,p=smx->kd->p;jkd->nActive;j++,p++) { - if (p->iHop>=0) continue; /* It's a maximum or an error */ - localmax = -1-p->iHop; - while (smx->kd->p[localmax].iHop<0) - localmax = -1-smx->kd->p[localmax].iHop; - ng = smx->kd->p[localmax].iHop; - /* Now assign this group number to the whole lineage! */ - /* Note that errors (iHop=0) will propagate */ - current = -1-p->iHop; - p->iHop = ng; - while (smx->kd->p[current].iHop<0) { - localmax = -1-smx->kd->p[current].iHop; - smx->kd->p[current].iHop = ng; - current = localmax; - } - } - return; -} - -/* ----------------------------------------------------------------- */ - -void SortGroups(SMX smx) -/* Renumber the groups in order of number of members. */ -/* Move the group numbering from unit offset to zero offset */ -/* Move error condition (group=0) to (group=-1) */ -/* Store the number of members and most dense particle in each group */ -{ - int j, *indx, *irank, *ip; - PARTICLE *p; - void make_rank_table(int n, int *ivect, int *rank); - - indx = (int *)malloc((size_t)(sizeof(int)*(smx->nGroups+1))); - assert(indx!=NULL); - irank = (int *)malloc((size_t)(sizeof(int)*(smx->nGroups+1))); - assert(irank!=NULL); - - /* Count the number of members in each group */ - for (j=0;j<=smx->nGroups;j++) smx->nmembers[j]=0; - for (j=0,p=smx->kd->p;jkd->nActive;j++,p++) - smx->nmembers[p->iHop]++; - - make_rank_table(smx->nGroups, smx->nmembers,irank); - - for (j=1;j<=smx->nGroups;j++) irank[j] = smx->nGroups-irank[j]; - irank[0] = -1; /* Move old 0's to new -1's */ - /* irank[j] is the new group number of group j: zero-offset, ordered - large to small */ - - /* Relabel all the particles */ - for (j=0,p=smx->kd->p;jkd->nActive;j++,p++) - p->iHop = irank[p->iHop]; - - /* Sort the nmembers and densestingroup lists to reflect the new ordering */ - /* Use indx as a temp space */ - for (j=1;j<=smx->nGroups;j++) indx[irank[j]]=smx->densestingroup[j]; - ip = smx->densestingroup; - smx->densestingroup = indx; - indx = ip; - /* Number of error (old_group=0) is in nmembers[0]; move it to [nGroups] */ - for (j=1;j<=smx->nGroups;j++) indx[irank[j]]=smx->nmembers[j]; - indx[smx->nGroups]=smx->nmembers[0]; - free(smx->nmembers); - smx->nmembers = indx; - - free(irank); - /* Note that the memory allocated to indx is now used by smx->densestingroup - and so it should not be free'd. */ - return; -} - -/* ================================================================== */ -/* ========================== Group Merging ========================= */ -/* ================================================================== */ - -void MergeGroupsHash(SMX smx) -/* We're going to look through the particles looking for boundary particles, -defined as particles with close neighbors of a different group, and store -the most dense boundary point (average of the two points) */ -/* The matrix of boundaries is stored in a hash table */ -/* SortGroups() should be called previous to this, so that all the -particles are in the assumed group numbering, i.e. 0 to ngroup-1, with --1 being unattached. The tags are not altered */ -/* In smHop, if nMerge+2 was smaller than nSmooth, we set the new radius -for searching. If not, we left the old radius alone. Either way, we're -ready to go. */ -{ - int j, k, g, next, newgroup; - - ReSizeSMX(smx, smx->nMerge+2); /* Alter the smoothing scale on smx */ - smx->nHashLength = smx->nGroups*10+1; - smx->hash = (Boundary *)malloc(smx->nHashLength*sizeof(Boundary)); - assert(smx->hash!=NULL); - for (j=0;jnHashLength;j++) { - smx->hash[j].nGroup1 = -1; - smx->hash[j].nGroup2 = -1; - smx->hash[j].fDensity = -1.0; - } /* Mark the slot as unused */ - - smReSmooth(smx,smMergeHash); /* Record all the boundary particles */ - return; -} - -/* ----------------------------------------------------------------- */ - -void smMergeHash(SMX smx,int pi,int nSmooth,int *pList,float *fList) -/* Look at the list for groups which are not that of the particle */ -/* If found, and if density is high enough, then mark it as a boundary */ -/* by recording it in the hash table */ -{ - int j,group; - float averdensity; - int g1,g2,count; - unsigned long hashpoint; - Boundary *hp; - int search; - void ssort(float X[], int Y[], int N, int KFLAG); - - group = smx->kd->p[pi].iHop; - if (group==(-1)) return; /* This particle isn't in a group */ - - /* It seems that BallGather doesn't always get the right number of - particles....*/ - search = nSmooth; - if (nSmooth>smx->nMerge+1) { - ssort(fList-1,pList-1,nSmooth,2); - search = smx->nMerge+1; - } - for (j=0;jkd->p[pList[j]].iHop; - if (g2==-1 || g2==group) continue; /* Same group or unassigned */ - /* It's in a different group; we need to connect the two */ - if (groupkd, pi) + - NP_DENS(smx->kd, pList[j])); - hashpoint = (g1+1)*g2; /* Avoid multiplying by 0 */ - hashpoint = hashpoint % smx->nHashLength; - hp = smx->hash+hashpoint; - count = 0; - for (;;) { - if (hp->nGroup1==(-1)) { /* Empty slot */ - hp->nGroup1 = g1; - hp->nGroup2 = g2; - hp->fDensity = averdensity; - break; - } - if (hp->nGroup1==g1 && hp->nGroup2==g2) { - /* We've seen this pair of groups before */ - if (hp->fDensity > averdensity) break; - else { - hp->fDensity = averdensity; - break; - } - } - /* Else, this slot was full, go to the next one */ - hp++; - if (hp>=smx->hash+smx->nHashLength) hp = smx->hash; - if (++count>1000000) { - fprintf(stderr,"Hash Table is too full.\n"); - exit(1); - } - } - /* Look at the next particle */ - } - return; -} - - -/* ----------------------------------------------------------------- */ - -void ReSizeSMX(SMX smx, int nSmooth) -/* Set a new smoothing length, resizing the arrays which depend on this, -but leaving the particle information intact. */ -/* However, because we won't always have resized pfBall2 (the search -radius) correctly, we won't reduce the size of the fList and pList -arrays */ -{ - PQ_STATIC; - if (nSmooth>smx->nSmooth) { /* We're increasing the size */ - smx->nListSize = nSmooth+RESMOOTH_SAFE; - free(smx->fList); - smx->fList = (float *)malloc(smx->nListSize*sizeof(float)); - assert(smx->fList != NULL); - free(smx->pList); - smx->pList = (int *)malloc(smx->nListSize*sizeof(int)); - assert(smx->pList != NULL); - } - smx->nSmooth=nSmooth; - free(smx->pq); - smx->pq = (PQ *)malloc(nSmooth*sizeof(PQ)); - assert(smx->pq != NULL); - PQ_INIT(smx->pq,nSmooth); - return; -} - -/* ===================================================================== */ -/* ===================== Input/Output, Binary and ASCII ================ */ -/* ===================================================================== */ - -void PrepareKD(KD kd) -/* This labels all the particles and finds the min/max of the positions */ -/* It used to appear in kd.c within kdReadTipsy(), but it seems so general -that I'll spare the user the trouble of including it in any custom input -routines */ -{ - BND bnd; - int i, j; - - /* Label the particles, so that we can restore the order at the end */ - for (i=0;inActive;i++) { - kd->p[i].iOrder=i; - } - /* - ** Calculate Bounds. - */ - for (j=0;j<3;++j) { - bnd.fMin[j] = NP_POS(kd, 0, j); - bnd.fMax[j] = NP_POS(kd, 0, j); - } - for (i=1;inActive;++i) { - for (j=0;j<3;++j) { - if (bnd.fMin[j] > NP_POS(kd, i, j)) - bnd.fMin[j] = NP_POS(kd, i, j); - else if (bnd.fMax[j] < NP_POS(kd, i, j)) - bnd.fMax[j] = NP_POS(kd, i, j); - } - } - kd->bnd = bnd; - return; -} - -void binOutHop(SMX smx, HC *my_comm, float densthresh) -/* Write Group tag for each particle. Particles should be ordered. */ -/* Binary file: nActive, nGroups, list of Groups */ -{ - int j,dummy; - /* FILE *blahfp = fopen("Part-PreMergeGroup.txt","w"); *//* S Skory */ - Grouplist *g = my_comm->gl; - Slice *s = my_comm->s; - - g->npart = s->numlist = s->numpart = smx->kd->nActive; - g->ngroups = smx->nGroups; - s->ntag = ivector(1,s->numlist); - //s->ID = ivector(1,s->numlist); - for (j=0;jkd->nActive;j++) { - //s->ID[1+j] = smx->kd->p[j].iID; /* S Skory's addition */ - if (NP_DENS(smx->kd,j) < densthresh) s->ntag[j+1] = -1; - else s->ntag[j+1] = smx->kd->p[j].iHop; - - } - - /* Here I'm going to add on the end of the file the real particle IDs for all the particles - added above, in the same order as above. S Skory */ - return; -} - -/* ----------------------------------------------------------------- */ - -void outGroupMerge(SMX smx, HC *my_comm) -/* Write an ASCII file with information on the groups and group merging */ -/* Start the boundary list with the only ### line */ -/* Groups should be ordered before calling this (else densities will be wrong)*/ -{ - int j, den; - Boundary *hp; - int nb = 0; - - my_comm->gdensity = vector(0,smx->nGroups-1); - for (j=0;jnGroups;j++) { - den = smx->densestingroup[j]; - my_comm->gdensity[j]=NP_DENS(smx->kd, den); - } - for (j=0, hp=smx->hash;jnHashLength; j++,hp++) - if (hp->nGroup1>=0)nb++; - my_comm->ngroups = smx->nGroups; - my_comm->nb = nb; - my_comm->g1vec = vector(0,nb); - my_comm->g2vec = vector(0,nb); - my_comm->fdensity = vector(0,smx->nHashLength); - nb = 0; - for (j=0, hp=smx->hash;jnHashLength; j++,hp++) - if (hp->nGroup1>=0){ - my_comm->g1vec[nb] = hp->nGroup1; - my_comm->g2vec[nb] = hp->nGroup2; - my_comm->fdensity[nb++] = hp->fDensity; - } - return; -} - - -/* ================================================================== */ -/* ======================= Sorting ================================== */ -/* ================================================================== */ - -typedef struct index_struct { - float value; - int index; -} *ptrindex; - -int cmp_index(const void *a, const void *b) -{ - if ( ((ptrindex)a)->value<((ptrindex)b)->value) return -1; - else if ( ((ptrindex)a)->value>((ptrindex)b)->value) return 1; - else return 0; -} - -void make_rank_table(int n, int *ivect, int *rank) -/* Given a vector of integers ivect[1..n], construct a rank table rank[1..n] -so that rank[j] contains the ordering of element j, with rank[j]=n indicating -that the jth element was the highest, and rank[j]=1 indicating that it -was the lowest. Storage for rank[] should be declared externally */ -/* I don't think this routine is particularly fast, but it's a -miniscule fraction of the runtime */ -{ - int j; - ptrindex sortvect; - - sortvect = (ptrindex)malloc(n*sizeof(struct index_struct)); - for (j=0;j T) { - X[IJ] = X[I]; - X[I] = T; - T = X[IJ]; - } - L = J; - - /* If last element of array is less than than T, interchange with T */ - - if (X[J] < T) { - X[IJ] = X[J]; - X[J] = T; - T = X[IJ]; - - /* If first element of array is greater than T, interchange with T */ - - if (X[I] > T) { - X[IJ] = X[I]; - X[I] = T; - T = X[IJ]; - } - } - - /* Find an element in the second half of the array which is smaller */ - /* than T */ - -line40: L = L-1; - if (X[L] > T) goto line40; - - /* Find an element in the first half of the array which is greater */ - /* than T */ - -line50: K = K+1; - if (X[K] < T) goto line50; - - /* Interchange these elements */ - - if (K <= L) { - TT = X[L]; - X[L] = X[K]; - X[K] = TT; - goto line40; - } - - /* Save upper and lower subscripts of the array yet to be sorted */ - - if (L-I > J-K) { - IL[M] = I; - IU[M] = L; - I = K; - M = M+1; - } else { - IL[M] = K; - IU[M] = J; - J = L; - M = M+1; - } - goto line70; - - /* Begin again on another portion of the unsorted array */ - -line60: M = M-1; - if (M == 0) goto line190; - I = IL[M]; - J = IU[M]; - -line70: if (J-I >= 1) goto line30; - if (I == 1) goto line20; - I = I-1; - -line80: I = I+1; - if (I == J) goto line60; - T = X[I+1]; - if (X[I] <= T) goto line80; - K = I; - -line90: X[K+1] = X[K]; - K = K-1; - if (T < X[K]) goto line90; - X[K+1] = T; - goto line80; - - /* Sort X and carry Y along */ - -line100: M = 1; - I = 1; - J = NN; - R = 0.375E0; - -line110: if (I == J) goto line150; - if (R <= 0.5898437E0) - R = R+3.90625E-2; - else R = R-0.21875E0; - -line120: K = I; - - /* Select a central element of the array and save it in location T */ - - IJ = I + (int)((J-I)*R); - T = X[IJ]; - TY = Y[IJ]; - - /* If first element of array is greater than T, interchange with T */ - - if (X[I] > T) { - X[IJ] = X[I]; - X[I] = T; - T = X[IJ]; - Y[IJ] = Y[I]; - Y[I] = TY; - TY = Y[IJ]; - } - L = J; -; - /* If last element of array is less than T, interchange with T */ - - if (X[J] < T) { - X[IJ] = X[J]; - X[J] = T; - T = X[IJ]; - Y[IJ] = Y[J]; - Y[J] = TY; - TY = Y[IJ]; - - /* If first element of array is greater than T, interchange with T */ - - if (X[I] > T) { - X[IJ] = X[I]; - X[I] = T; - T = X[IJ]; - Y[IJ] = Y[I]; - Y[I] = TY; - TY = Y[IJ]; - } - } - - /* Find an element in the second half of the array which is smaller */ - /* than T */ - -line130: L = L-1; - if (X[L] > T) goto line130; - - /* Find an element in the first half of the array which is greater */ - /* than T */ - -line140: K = K+1; - if (X[K] < T) goto line140; - - /* Interchange these elements */ - - if (K <= L) { - TT = X[L]; - X[L] = X[K]; - X[K] = TT; - TTY = Y[L]; - Y[L] = Y[K]; - Y[K] = TTY; - goto line130; - } - - /* Save upper and lower subscripts of the array yet to be sorted */ - - if (L-I > J-K) { - IL[M] = I; - IU[M] = L; - I = K; - M = M+1; - } else { - IL[M] = K; - IU[M] = J; - J = L; - M = M+1; - } - goto line160; - - /* Begin again on another portion of the unsorted array */ - -line150: M = M-1; - if (M == 0) goto line190; - I = IL[M]; - J = IU[M]; - -line160: if (J-I >= 1) goto line120; - if (I == 1) goto line110; - I = I-1; - -line170: I = I+1; - if (I == J) goto line150; - T = X[I+1]; - TY = Y[I+1]; - if (X[I] <= T) goto line170; - K = I; - -line180: X[K+1] = X[K]; - Y[K+1] = Y[K]; - K = K-1; - if (T < X[K]) goto line180; - X[K+1] = T; - Y[K+1] = TY; - goto line170; - - /* Clean up */ - -line190: if (KFLAG <= -1) - for (I=1; I<=NN; I++) - X[I] = -X[I]; - - return; -} diff --git a/yt/analysis_modules/halo_finding/hop/hop_kd.c b/yt/analysis_modules/halo_finding/hop/hop_kd.c deleted file mode 100644 index 3e0f22cb3ca..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop_kd.c +++ /dev/null @@ -1,251 +0,0 @@ -/* KD.C */ -/* This was written by Joachim Stadel and the NASA HPCC ESS at -the University of Washington Department of Astronomy as part of -the SMOOTH program, v2.0.1. -URL: http://www-hpcc.astro.washington.edu/tools/SMOOTH */ - -/* DJE--I have removed all the subroutines not used by HOP, notably -the input and output routines. */ - -/* HOP Version 1.0 (12/15/97) -- Original Release */ - -#include -#include -#include -#ifdef _WIN32 -#include -#else -#include -#include -#endif -#include -#include "kd.h" -#include "hop_numpy.h" -//#include "macros_and_parameters.h" -/* #include "tipsydefs.h" */ /* Don't need this, since I removed kdReadTipsy()*/ - - -#define MAX_ROOT_ITTR 32 - - -void kdTime(KD kd,int *puSecond,int *puMicro) -{ - -#ifdef _WIN32 - int secs, usecs; - HANDLE hProcess = GetCurrentProcess(); - FILETIME ftCreation, ftExit, ftKernel, ftUser; - SYSTEMTIME stUser; - GetProcessTimes(hProcess, &ftCreation, &ftExit, - &ftKernel, &ftUser); - FileTimeToSystemTime(&ftUser, &stUser); - secs = (int)((double)stUser.wHour*3600.0 + - (double)stUser.wMinute*60.0 + - (double)stUser.wSecond); - usecs = (int)((double)stUser.wMilliseconds/1000.0); - *puMicro = usecs; - *puSecond = secs; - if (*puMicro < 0) { - *puMicro += 1000000; - *puSecond -= 1; - } - kd->uSecond = secs; - kd->uMicro = usecs; -#else - struct rusage ru; - - getrusage(0,&ru); - *puMicro = ru.ru_utime.tv_usec - kd->uMicro; - *puSecond = ru.ru_utime.tv_sec - kd->uSecond; - if (*puMicro < 0) { - *puMicro += 1000000; - *puSecond -= 1; - } - kd->uSecond = ru.ru_utime.tv_sec; - kd->uMicro = ru.ru_utime.tv_usec; -#endif -} - -int kdInit(KD *pkd,int nBucket) -{ - KD kd; - - kd = (KD)malloc(sizeof(struct kdContext)); - assert(kd != NULL); - kd->nBucket = nBucket; - kd->kdNodes = NULL; - *pkd = kd; - return(1); - } - -/* - ** JST's Median Algorithm - */ -int kdMedianJst(KD kd,int d,int l,int u) -{ - npy_float64 fm; - int i,k,m; - PARTICLE *p,t; - - p = kd->p; - k = (l+u)/2; - m = k; - while (l < u) { - m = (l+u)/2; - fm = NP_POS(kd, m, d); - t = p[m]; - p[m] = p[u]; - p[u] = t; - i = u-1; - m = l; - while (NP_POS(kd, m, d) < fm) ++m; - while (m < i) { - while (NP_POS(kd, i, d) >= fm) if (--i == m) break; - /* - ** Swap - */ - t = p[m]; - p[m] = p[i]; - p[i] = t; - --i; - while (NP_POS(kd, m, d) < fm) ++m; - } - t = p[m]; - p[m] = p[u]; - p[u] = t; - if (k <= m) u = m-1; - if (k >= m) l = m+1; - } - return(m); - } - - -void kdCombine(KDN *p1,KDN *p2,KDN *pOut) -{ - int j; - - /* - ** Combine the bounds. - */ - for (j=0;j<3;++j) { - if (p2->bnd.fMin[j] < p1->bnd.fMin[j]) - pOut->bnd.fMin[j] = p2->bnd.fMin[j]; - else - pOut->bnd.fMin[j] = p1->bnd.fMin[j]; - if (p2->bnd.fMax[j] > p1->bnd.fMax[j]) - pOut->bnd.fMax[j] = p2->bnd.fMax[j]; - else - pOut->bnd.fMax[j] = p1->bnd.fMax[j]; - } - } - - -void kdUpPass(KD kd,int iCell) -{ - KDN *c; - int l,u,pj,j; - - c = kd->kdNodes; - if (c[iCell].iDim != -1) { - l = LOWER(iCell); - u = UPPER(iCell); - kdUpPass(kd,l); - kdUpPass(kd,u); - kdCombine(&c[l],&c[u],&c[iCell]); - } - else { - l = c[iCell].pLower; - u = c[iCell].pUpper; - for (j=0;j<3;++j) { - c[iCell].bnd.fMin[j] = NP_POS(kd, u, j); - c[iCell].bnd.fMax[j] = NP_POS(kd, u, j); - } - for (pj=l;pj c[iCell].bnd.fMax[j]) - c[iCell].bnd.fMax[j] = NP_POS(kd, pj, j); - } - } - } - } - -int kdBuildTree(KD kd) -{ - int l,n,i,d,m,j,ct; - KDN *c; - - n = kd->nActive; - kd->nLevels = 1; - l = 1; - while (n > kd->nBucket) { - n = n>>1; - l = l<<1; - ++kd->nLevels; - } - kd->nSplit = l; - kd->nNodes = l<<1; - kd->kdNodes = (KDN *)malloc(kd->nNodes*sizeof(KDN)); - assert(kd->kdNodes != NULL); - /* - ** Set up ROOT node - */ - c = kd->kdNodes; - c[ROOT].pLower = 0; - c[ROOT].pUpper = kd->nActive-1; - c[ROOT].bnd = kd->bnd; - i = ROOT; - ct = ROOT; - SETNEXT(ct); - while (1) { - if (i < kd->nSplit) { - d = 0; - for (j=1;j<3;++j) { - if (c[i].bnd.fMax[j]-c[i].bnd.fMin[j] > - c[i].bnd.fMax[d]-c[i].bnd.fMin[d]) d = j; - } - c[i].iDim = d; - m = kdMedianJst(kd,d,c[i].pLower,c[i].pUpper); - c[i].fSplit = NP_POS(kd, m, d); - c[LOWER(i)].bnd = c[i].bnd; - c[LOWER(i)].bnd.fMax[d] = c[i].fSplit; - c[LOWER(i)].pLower = c[i].pLower; - c[LOWER(i)].pUpper = m-1; - c[UPPER(i)].bnd = c[i].bnd; - c[UPPER(i)].bnd.fMin[d] = c[i].fSplit; - c[UPPER(i)].pLower = m; - c[UPPER(i)].pUpper = c[i].pUpper; - i = LOWER(i); - } - else { - c[i].iDim = -1; - SETNEXT(i); - if (i == ct) break; - } - } - kdUpPass(kd,ROOT); - return(1); - } - - -int cmpParticles(const void *v1,const void *v2) -{ - PARTICLE *p1=(PARTICLE *)v1,*p2=(PARTICLE *)v2; - - return(p1->iOrder - p2->iOrder); - } - - -void kdOrder(KD kd) -{ - qsort(kd->p,kd->nActive,sizeof(PARTICLE),cmpParticles); - } - -void kdFinish(KD kd) -{ - if(kd->p!=NULL)free(kd->p); - if(kd->kdNodes!=NULL)free(kd->kdNodes); - free(kd); - } - diff --git a/yt/analysis_modules/halo_finding/hop/hop_numpy.h b/yt/analysis_modules/halo_finding/hop/hop_numpy.h deleted file mode 100644 index ed5e7980995..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop_numpy.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _NUMPY_HOP_H -#include "Python.h" -#include "numpy/ndarrayobject.h" - -#define NP_DENS(kd, in) \ - kd->np_densities[kd->p[in].np_index] -#define NP_POS(kd, in, dim) \ - kd->np_pos[dim][kd->p[in].np_index] -#define NP_MASS(kd, in) \ - (kd->np_masses[kd->p[in].np_index]/kd->totalmass) - -#endif diff --git a/yt/analysis_modules/halo_finding/hop/hop_regroup.c b/yt/analysis_modules/halo_finding/hop/hop_regroup.c deleted file mode 100644 index 9cd1160c590..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop_regroup.c +++ /dev/null @@ -1,728 +0,0 @@ -/* REGROUP.C, Daniel Eisenstein, 1997 */ -/* Based on a paper by Daniel Eisenstein & Piet Hut, -"HOP: A New Group-Finding Algorithm for N-body Simulations." -See the included documentation or view it at -http://www.sns.ias.edu/~eisenste/hop/hop_doc.html */ - -/* Version 1.0 (12/15/97) -- Original Release */ - -#include "slice.h" -#include -#include -#include -#include -#include -#include -//#include "macros_and_parameters.h" -#include "hop.h" - -#define ISYM "d" -#define GSYM "g" -#define FSYM "f" - -/* #define MINDENS (-FLT_MAX/3.0) */ -#define MINDENS (-1.e+30/3.0) -/* This is the most negative density that can be accommodated. Note -that MINDENS*2.0 is referenced in the code and so must be properly -represented by the machine. There's no reason for this to be close to -the actual minimum of the density. */ - -#define INFORM(pstr) printf(pstr); fflush(stdout) -/* Used for messages, e.g. INFORM("Doing this"); */ - -#define UNBOUND -2 /* The tag marker for unbound particles */ - -/* ----------------------------------------------------------------------- */ -/* Prototypes */ -void initgrouplist(Grouplist *g); -void readtags(Slice *s, Grouplist *g, char *fname); -void densitycut(Slice *s, char *fname, float densthresh); -void writegmerge(Slice *s, Grouplist *gl, char *fname, float pt, float mt); -void readgmerge(Slice *s, Grouplist *gl, char *fname); -void merge_groups_boundaries(Slice *s, Grouplist *gl, char *fname, - float peakdensthresh, float saddledensthresh, float densthresh, HC *my_comm); -void translatetags(Slice *s, Grouplist *gl); -void writetags(Slice *s, Grouplist *gl, char *fname); -void writetagsf77(Slice *s, Grouplist *gl, char *fname); -void count_membership(Slice *s, Grouplist *g); -void sort_groups(Slice *s, Grouplist *gl, int mingroupsize, char *fname); - -/* ----------------------------------------------------------------------- */ -/* We use the following structure to handle the user interface: */ - -typedef struct controlstruct { - char *tagname; /* Input file for group tags */ - char *densname; /* Input file for density file */ - char *gmergename; /* Input file for group boundary specifications, OR - input file for group merging data */ - char *outsizename; /* Output file for size output*/ - char *outtagname; /* Output file for group tags */ - char *outgmergename; /* Output file for group merging */ - - int qdenscut; /* =1 if we're making a density cut, =0 otherwise */ - float densthresh; /* The outer density threshold (delta_outer)*/ - - int qgbound; /* =1 if we are to read the boundaries file and - determine the merging.*/ - float peak_thresh; /* Density threshold for peak (delta_peak) */ - float saddle_thresh; /* Density threshold for merging (delta_saddle) */ - int qgmerge_given; /* =1 if we are to use a group translation from file */ - - int mingroupsize; /* The minimum group size we follow */ - int qoutput; /* =1 if we are to write the tags */ - int qf77; /* =1 if binary output if in f77 format */ - int qpipe; /* =1 if we are to write the output tags to stdout */ - int qsort; /* =1 if we are to sort */ - - /* The following aren't used in the present version, but I included - them in case the user wants to customize the program: */ - char *dataname; /* Input file for particle data */ - int qunbind; /* =1 if we are to unbind at all */ -} Controls; /* Type Controls is defined */ - -/* ====================================================================== */ -/* ===================== User Interface ================================= */ -/* ====================================================================== */ - -void parsecommandline(float dens_outer, Controls *c) -{ - int narg, qmerge; - char *outname, *rootname; - narg = 1; - rootname = c->dataname = c->densname = c->gmergename = c->tagname = - outname = c->outsizename = c->outtagname = c->outgmergename = NULL; - c->qdenscut = -1; - qmerge = 1; - c->qgmerge_given = 0; - - c->qunbind = 0; - c->qoutput = 1; - c->qsort = 1; - c->qpipe = 0; - c->qf77 = 0; - - c->mingroupsize = -1; - if (2.0*MINDENS>=MINDENS || MINDENS>=0) - myerror("MINDENS seems to be illegal."); - /* Need MINDENS<0 and 2*MINDENS to be machine-representable */ - c->densthresh = 2.0*MINDENS; - c->saddle_thresh = 2.0*MINDENS; - c->peak_thresh = 2.0*MINDENS; - - /* GLB: hard-code some parameters. */ - - c->peak_thresh = 3.0*dens_outer; - c->saddle_thresh = 2.5*dens_outer; - c->densthresh = dens_outer; - c->qdenscut = 1; - rootname = "output_hop"; - - /* Get the input files ready */ - if (c->qdenscut==-1) { - /* Neither -douter nor -nodens was chosen. */ - mywarn("Outer density threshold left unspecified. Skipping this cut."); - c->qdenscut = 0; - } else if (c->qdenscut==1) { - /* We have a chosen density. Need to figure out the density file. */ - if (c->densname==NULL) { - if (rootname==NULL) - myerror("No density file name or root has been specified."); - c->densname = (char *)malloc(80); - strcpy(c->densname,rootname); strcat(c->densname, ".den"); - } - } else c->densname = NULL; /* We have no reason to read it */ - - if (c->tagname==NULL) { - if (rootname==NULL) - myerror("No .hop file name or root has been specified."); - c->tagname = (char *)malloc(80); - strcpy(c->tagname,rootname); strcat(c->tagname, ".hop"); - } - - if (qmerge==1) { - if (c->qgmerge_given==0) { - /* We need to have a .gbound file */ - c->qgbound = 1; - if (c->saddle_threshpeak_threshgmergename==NULL) { - if (rootname==NULL) - myerror("No .gbound file name or root has been specified."); - c->gmergename = (char *)malloc(80); - strcpy(c->gmergename,rootname); - strcat(c->gmergename, ".gbound"); - } - } else c->qgbound = 0; /* We know c->mergename is ready to go */ - } else c->gmergename = NULL; /* No reason to read it */ - - /* Get the output files ready */ - /* If a default name wasn't given, we'll assume zregroup */ - if (outname==NULL) { - outname = (char *)malloc(20); - strcpy(outname,"zregroup"); - } - /* Primary tag output: */ - if (c->qoutput) { /* Need to figure out where we're sending the output */ - if (c->qpipe&&c->outtagname) - myerror("Conflicting instructions--gave specific output name and told to pipe."); - if (c->qpipe>0) mywarn("Writing tags to stdout."); - if (c->qpipe) c->outtagname = NULL; /* Our signal to send to stdout */ - else if (c->outtagname==NULL) { - c->outtagname = (char *)malloc(80); - strcpy(c->outtagname, outname); - strcat(c->outtagname, ".tag"); - } /* Otherwise the name was set by the user */ - } else { - /* We're not outputing tags */ - if (c->qpipe) myerror("Conflicting instructions--told to pipe and not to output."); - } - - if (c->qsort) { - if (c->qpipe>=0) { /* The user didn't specify quiet */ - c->outsizename = (char *)malloc(80); - strcpy(c->outsizename, outname); - strcat(c->outsizename, ".size"); - } - } - - if (c->qpipe>=0) { /* The user didn't specify quiet */ - c->outgmergename = (char *)malloc(80); - strcpy(c->outgmergename, outname); - strcat(c->outgmergename, ".gmerge"); - } - - if (c->mingroupsize >= 0 && !c->qsort) - myerror("Imposition of a certain group size occurs within the sort routine."); - if (c->qsort && c->mingroupsize < 0) { - mywarn("No minimum group size specified. Assuming 10 particles."); - c->mingroupsize = 10; - } - - if (c->densthreshdensthresh=MINDENS; - /* This is our default--a very negative number */ - - return; -} - -/* ====================================================================== */ -/* ============================== MAIN() ================================ */ -/* ====================================================================== */ - -/* void main(int argc, char *argv[]) */ -void regroup_main(float dens_outer, HC *my_comm) -{ - Grouplist *gl = my_comm->gl; - Slice *s = my_comm->s; - FILE *f; - Controls c; - - /* parsecommandline(argc, argv, &c); */ - parsecommandline(dens_outer, &c); - - //initgrouplist(gl); - //s=newslice(); - - /* We need to read the tag file and perhaps perform a density cut */ - // We don't read anymore (mjt) - //readtags(s,gl,c.tagname); - - // We cut in advance now (mjt) - //if (c.qdenscut) densitycut(s,c.densname,c.densthresh); - - /* Next do the merging of the input groups */ - if (c.qgbound) { - /* We're going to read a .gbound file and merge groups */ - merge_groups_boundaries(s,gl,c.gmergename, - c.peak_thresh, c.saddle_thresh, c.densthresh, my_comm); - /* Renumber the groups from large to small; remove any tiny ones */ - //if (c.qsort) sort_groups(s, gl, c.mingroupsize, c.outsizename); - if (c.qsort) sort_groups(s, gl, c.mingroupsize, NULL); - //writegmerge(s, gl, c.outgmergename, c.peak_thresh, c.saddle_thresh); - translatetags(s,gl); - } - else if (c.qgmerge_given) { - /* We're going to read a .gmerge file and merge groups as it says */ - readgmerge(s, gl, c.gmergename); - translatetags(s, gl); - } /* Else we'll use the tags as given by the original .hop file */ - - /* If one wants to manipulate the groups any more, this is a good - place to do it. For example, you might want to remove unbound particles: - if (c.qunbind) { - get_particle_data(s, gl, c.dataname); - unbind_particles(s, gl, c.mingroupsize); - } - */ - - /* Write the output */ - /*if (c.qoutput) { - if (c.qf77) writetagsf77(s, gl, c.outtagname); - else writetags(s, gl, c.outtagname); - }*/ - - //free_slice(s); - return; -} - -/* ================================================================= */ -/* =================== Initialization Routines ===================== */ -/* ================================================================= */ - -void initgrouplist(Grouplist *g) -/* Just make sure this stuff is zero */ -{ - g->list = NULL; - g->npartingroups = g->npart = g->ngroups = 0; g->nnewgroups = 0; - return; -} - -void readtags(Slice *s, Grouplist *g, char *fname) -/* Read the tag file named fname into s->ntag[] */ -/* Groups need not be sorted, but must be numbered from 0 to ngroups-1 */ -{ - FILE *f; - - if ((f=fopen(fname,"r"))==NULL) myerror("Input tag file not found."); - if (fread(&(g->npart),sizeof(int),1,f)!=1) myerror("Tag file read error."); - if (fread(&(g->ngroups),sizeof(int),1,f)!=1) myerror("Tag file read error."); - fprintf(stderr,"Number of particles: %"ISYM". Number of groups: %"ISYM".\n", - g->npart, g->ngroups); - - s->numpart = g->npart; - s->numlist = g->npart; - s->ntag = ivector(1,s->numlist); - //s->ID = ivector(1,s->numlist); - fread(s->ntag+1, sizeof(int), s->numlist, f); /* Read in all the tags */ - //fread(s->ID+1, sizeof(int), s->numlist,f); /* Read in the real particle IDs. S Skory */ - fclose(f); - - return; -} - -/* ========================== Density Cut ======================== */ - -#define MAXBLOCK 65536 /* Read the file 256k at a time */ - -void densitycut(Slice *s, char *fname, float densthresh) -/* Read the density file and change the tag on any particle with density -less than densthresh to -1, thus removing them from groups */ -/* This will leave some groups with no particles, which is fine */ -/* We read the file in segments, so as to reduce memory consumption */ -{ - FILE *f; - int j, numread, npart, block; /* block was a float by mistake */ - float density[MAXBLOCK]; - - if ((f=fopen(fname,"r"))==NULL) - myerror("Density file not found."); - npart = 0; fread(&npart,sizeof(int),1,f); - if (npart!=s->numpart) - mywarn("Density file doesn't match slice description."); - - numread = 0; - block = MAXBLOCK; /* Start off big */ - while (numreadntag[numread+j]=(-1); /* s->ntag is unit-offset */ - numread+=block; - } - fclose(f); - return; -} - -/* ====================== Read/Write .gmerge files ======================= */ -/* The gmerge file is just a map from the old (pre-regroup) group numbers -to the new (post-regroup) group numbers. Of course, there are more "old" -groups than "new" groups, since the point of regroup() is to merge groups. */ - -void writegmerge(Slice *s, Grouplist *gl, char *fname, float pt, float mt) -/* Write the translation between old groups and new groups, ASCII */ -{ - FILE *f; - int j; - Group *gr; - - if (fname==NULL) return; /* We've been told not to write anything */ - - if ((f=fopen(fname,"w"))==NULL) myerror("Can't open gmerge file for write."); - fprintf(f,"%"ISYM"\n%"ISYM"\n%"ISYM"\n", gl->npart, gl->ngroups, gl->nnewgroups); - fprintf(f,"%"FSYM"\n%"FSYM"\n", pt, mt); - for (j=0,gr=gl->list;jngroups;j++,gr++) - fprintf(f,"%"ISYM" %"ISYM"\n", j, gr->idmerge); - fclose(f); - return; -} - -void readgmerge(Slice *s, Grouplist *gl, char *fname) -/* Read the translation between old groups and new groups, ASCII */ -/* Also, set up gl->list for translation */ -{ - FILE *f; - int j, dummy; - Group *gr; - float pt, mt; - - if ((f=fopen(fname,"r"))==NULL) myerror("Can't open gmerge read file."); - if (fscanf(f,"%"ISYM"\n%"ISYM"\n%"ISYM"\n", &(gl->npart), &(gl->ngroups), - &(gl->nnewgroups))!=3) myerror("Error in header of gmerge file."); - if (gl->npart!=s->numpart) myerror("Number of particles in gmerge file doesn't match that of tags file."); - fscanf(f,"%"FSYM" %"FSYM"\n", &pt, &mt); - - if (gl->list!=NULL) free(gl->list); - gl->list = (Group *)malloc((size_t)(gl->ngroups *sizeof(Group))); - if (gl->list==NULL) myerror("Error in allocating gl->list."); - - for (j=0,gr=gl->list; jngroups; j++,gr++) { - if (fscanf(f,"%"ISYM" %"ISYM"\n", &dummy, &(gr->idmerge))!=2 || dummy!=j) - myerror("Error in reading gmerge file."); - gr->npart = -1; /* We're not setting this */ - } - fclose(f); - return; -} - -/* ====================== GROUP MERGING BY BOUNDARIES ================ */ - -void merge_groups_boundaries(Slice *s, Grouplist *gl, char *mergename, - float peakdensthresh, float saddledensthresh, float densthresh, - HC *my_comm) -/* Read in the gmerge file and decide which groups are to be merged. -Groups are numbered 0 to ngroups-1. Groups with boundaries greater -than saddledensthresh are merged. Groups with maximum densities -less than peakdensthresh are merged to the group with -maxdensity above peakdensthresh with which it shares the highest -density border. */ -/* Only groups with maximum densities above peakdensthresh can be group -centers. */ -/* Allocate space for the grouplist and store the merging results in -the idmerge field. */ -/* I think this will work even if saddledensthreshgdensity; - int *g1temp,*g2temp; - float *denstemp; - int temppos = 0; - - ngroups = my_comm->ngroups; - - if (densthreshngroups = ngroups; - if (gl->list!=NULL) free(gl->list); - gl->list = (Group *)malloc((size_t)(gl->ngroups *sizeof(Group))); - fprintf(stderr,"ngroups = %d\n",ngroups); - if (gl->list==NULL) myerror("Error in allocating gl->list."); - for (j=0,gr=gl->list;jngroups;j++,gr++) { - /* If group is too underdense, it cannot be a group center */ - if (gdensity[j]idmerge=(-1);} - else {gr->idmerge = j;} - gr->npart = -1; /* Not doing anything with this */ - densestbound[j] = 2.0*MINDENS; /* Initialize */ - densestboundgroup[j] = -1; /* Initialize */ - } - - /* Now step through the list of boundaries */ - /* If a boundary is between two groups with max densities above - peakdensthresh and if the boundary is above saddledensthresh, then - merge the groups (keeping the lower number of the two). */ - /* If one of the groups is above peakdensthresh and the other is - below, and if the boundary density is higher than any seen previously - for the lower density group, then record this information */ - /* If neither group is above peakdensthresh, skip the boundary */ - - /* make few arrays to eliminate the need to write a file to disk. The entries in - the arrays should be no larger than my_comm->nb. - Skory. - */ - - g1temp = (int *)malloc(sizeof(int) * my_comm->nb); - g2temp = (int *)malloc(sizeof(int) * my_comm->nb); - denstemp = (float *)malloc(sizeof(float) * my_comm->nb); - - for(j=0;j<(my_comm->nb);j++) { - g1 = my_comm->g1vec[j]; - g2 = my_comm->g2vec[j]; - dens = my_comm->fdensity[j]; - if (gdensity[g1]densthresh && gdensity[g2]>densthresh && - dens>densthresh) { - g1temp[temppos] = g1; - g2temp[temppos] = g2; - denstemp[temppos] = dens; - temppos += 1; - } - continue; /* group isn't dense enough */ - } - if (gdensity[g1]>=peakdensthresh && gdensity[g2]>=peakdensthresh) - if (denslist[g1].idmerge) - g1=gl->list[g1].idmerge; - while (g2!=gl->list[g2].idmerge) - g2=gl->list[g2].idmerge; - if (g1list[g2].idmerge=g1; - else gl->list[g1].idmerge=g2; - continue; /* Go to the next boundary */ - } - /* Else one is above peakdensthresh, the other below. */ - /* Make the high one g1 */ - if (gdensity[g1]densestbound[g2]) { - /* It's the densest boundary yet */ - densestbound[g2] = dens; - densestboundgroup[g2] = g1; - } - } /* Get the next boundary line */ - - - /* Now the fringe groups are connected to the proper group - (>peakdensthresh) with the largest boundary. But we want to look - through the boundaries between fringe groups to propagate this - along. Connections are only as good as their smallest boundary */ - /* Keep the density of the connection in densestbound, and the - proper group it leads to in densestboundgroup */ - do { - changes = 0; - for (j=0;jdensestbound[g1]) { - dummy[0] = g2; g2=g1; g1=dummy[0]; - } - if (dens>densestbound[g2]&&densestbound[g1]>densestbound[g2]) { - changes++; - if (densngroups;j++) { - if (densestbound[j]>=densthresh) - gl->list[j].idmerge = densestboundgroup[j]; - } - /* Now we want to number the newly merged groups */ - /* The center groups are given negative numbers <-1 */ - for (j=0,gl->nnewgroups=0; jngroups; j++) - if (gl->list[j].idmerge==j) { - gl->list[j].idmerge = -2-(gl->nnewgroups++); - } - - /* Now trace each group through until a negative number is reached */ - for (j=0; jngroups; j++) { - if (gl->list[j].idmerge<0) continue; - g1 = j; - while ((g1=gl->list[g1].idmerge)>=0); - g2 = j; - do gl->list[g2].idmerge = g1; - while ((g2=gl->list[g2].idmerge)>=0); - } - - /* Finally, renumber the groups 0..N-1 */ - for (j=0,gr=gl->list;jngroups;j++,gr++) - gr->idmerge = -2-gr->idmerge; /* Keep -1 -> -1 */ - - - /* And delete the tempfile */ - remove(tempfilename); - free_vector(gdensity,0,ngroups-1); - free_vector(densestbound,0,ngroups-1); - free_ivector(densestboundgroup,0,ngroups-1); - return; -} - -/* ======================================================================= */ -/* =============== Update the tags and write them out ==================== */ -/* ======================================================================= */ - -void translatetags(Slice *s, Grouplist *gl) -/* Alter s->ntag to have the new groups. Reset gl so as to reflect the -new number of groups. */ -{ - int j; - - - for (j=1;j<=s->numlist;j++) - if (s->ntag[j]>=0) { - s->ntag[j] = gl->list[s->ntag[j]].idmerge; - } - /* Otherwise, translate the unbound particles */ - else if (s->ntag[j]<-1) - s->ntag[j] = UNBOUND - gl->list[UNBOUND-s->ntag[j]].idmerge; - free(gl->list); - gl->list = NULL; - gl->ngroups = gl->nnewgroups; - return; -} - -void writetags(Slice *s, Grouplist *gl, char *fname) -/* Write s->ntag to file */ -/* If fname==NULL, write to stdout */ -{ - FILE *f; - - - if (fname!=NULL) { - if ((f=fopen(fname,"w"))==NULL) myerror("Error opening new tag file."); - } else f=stdout; - fwrite(&(s->numpart),sizeof(int),1,f); - printf("writetags: s->numpart = %d gl->ngroups = %d\n", - s->numpart, gl->ngroups); - fwrite(&(gl->ngroups),sizeof(int),1,f); - fwrite(s->ntag+1,sizeof(int),s->numlist,f); - //fwrite(s->ID+1,sizeof(int),s->numlist,f); /* S Skory */ - fclose(f); - - return; -} - -void writetagsf77(Slice *s, Grouplist *gl, char *fname) -/* Write s->ntag to file */ -/* If fname==NULL, write to stdout */ -/* Use a format readable for FORTRAN unformatted read commands */ -{ - FILE *f; - int dummy; - if (fname!=NULL) { - if ((f=fopen(fname,"w"))==NULL) myerror("Error opening new tag file."); - } else f=stdout; - dummy = 8; fwrite(&dummy,sizeof(int),1,f); - fwrite(&(s->numpart),sizeof(int),1,f); - fwrite(&(gl->ngroups),sizeof(int),1,f); - fwrite(&dummy,sizeof(int),1,f); - dummy = s->numlist*sizeof(int); fwrite(&dummy,sizeof(int),1,f); - fwrite(s->ntag+1,sizeof(int),s->numlist,f); - fwrite(&dummy,sizeof(int),1,f); - fclose(f); - return; -} - -/* ====================================================================== */ -/* ========================== Sorting the Groups ======================== */ -/* ====================================================================== */ - -void sort_groups(Slice *s, Grouplist *gl, int mingroupsize, char *fname) -/* Sort the groups, as labeled by the idmerge field not their original -number, from largest to smallest. Alter the idmerge field to this new -numbering, setting any below mingroupsize to -1. */ -/* If fname!=NULL, write a little output file listing the group sizes */ -{ - FILE *f; - int j,k, *order, partingroup, igr, *newnum, nmergedgroups; - float *gsize; - Group *c; - void make_index_table(int n, float *fvect, int *index); - - nmergedgroups = gl->nnewgroups; - gsize = vector(0,nmergedgroups-1); - order = ivector(1,nmergedgroups); - newnum = ivector(0,nmergedgroups-1); - - /* First we need to find the number of particles in each group */ - for (j=0,c=gl->list;jngroups;j++,c++) c->npart=0; - - for (j=1;j<=s->numlist;j++) { /* Look through all the particles */ - igr = s->ntag[j]; - if (igr>=0) - if (igrngroups) gl->list[igr].npart++; - else myerror("Group tag is out of bounds."); - } - /* Now combine these to find the number in the new groups */ - for (j=0;jlist;jngroups;j++,c++) - if (c->idmerge>=0 && c->idmergeidmerge]+=c->npart; - else if (c->idmerge>=nmergedgroups) - myerror("Group idmerge is out of bounds."); - - make_index_table(nmergedgroups, gsize-1, order); - /* But remember that order[] thinks that gsize is unit-offset */ - for (j=nmergedgroups,k=0;j>0; j--,k++) - if (gsize[order[j]-1]>mingroupsize-0.5) newnum[order[j]-1]=k; - else break; /* All of the rest are too small */ - - gl->nnewgroups = k; - for (;j>0;j--) newnum[order[j]-1]=(-1); - /* Newnum[] holds the new sorted number for merged group j */ - - /* Now assign sorted group numbers to idmerge */ - partingroup = 0; - for (j=0,c=gl->list;jngroups;j++,c++) - if (c->idmerge>=0) - if ((c->idmerge = newnum[c->idmerge])>=0) - partingroup+=c->npart; - - /* Output the .size file, if inputed name isn't NULL */ - if (fname!=NULL) { - f = fopen(fname,"w"); - fprintf(f,"%"ISYM"\n%"ISYM"\n%"ISYM"\n", s->numpart, partingroup, gl->nnewgroups); - for (j=0;jnnewgroups;j++) - fprintf(f,"%"ISYM" %"ISYM"\n", j, (int)gsize[order[nmergedgroups-j]-1]); - fclose(f); - } - free_ivector(order,1,nmergedgroups); - free_vector(gsize,0,nmergedgroups-1); - free_ivector(newnum,0,nmergedgroups-1); - return; -} - -/* ======================== Sorting ============================ */ - -typedef struct index_struct { - float value; - int index; -} *ptrindex; - -int cmp_index_regroup(const void *a, const void *b) -{ - if ( ((ptrindex)a)->value<((ptrindex)b)->value) return -1; - else if ( ((ptrindex)a)->value>((ptrindex)b)->value) return 1; - else return 0; -} - -void make_index_table(int n, float *fvect, int *index) -/* Given a vector of floats fvect[1..n], construct a index table index[1..n] -so that index[j] contains the ID number of the jth lowest element. -Storage for index[] should be declared externally */ -/* This isn't fast, but it takes a tiny fraction of the runtime */ -{ - int j; - ptrindex sortvect; - - sortvect = (ptrindex)malloc(n*sizeof(struct index_struct)); - for (j=0;jpid = NULL; s->offset = 0; - s->px = s->py = s->pz = s->vx = s->vy = s->vz = NULL; - s->ntag = NULL; - //s->ID = NULL; /* S Skory */ - s->numpart = s->numlist = 0; - return s; -} - -void free_tags(Slice *s) -/* Free the tag vector */ -{ - if (s->ntag!=NULL) { - free_ivector(s->ntag, 1, s->numlist); - s->ntag=NULL; - //free_ivector(s->ID, 1, s->numlist); /* S Skory */ - //s->ID=NULL; - } - return; -} - -void free_data(Slice *s) -/* Free all the data vectors */ -{ - if (s->pid!=NULL) {free(s->pid); s->pid=NULL;} - if (s->px!=NULL) {free_vector(s->px,1,s->numlist); s->px=NULL;} - if (s->py!=NULL) {free_vector(s->py,1,s->numlist); s->py=NULL;} - if (s->pz!=NULL) {free_vector(s->pz,1,s->numlist); s->pz=NULL;} - if (s->vx!=NULL) {free_vector(s->vx,1,s->numlist); s->vx=NULL;} - if (s->vy!=NULL) {free_vector(s->vy,1,s->numlist); s->vy=NULL;} - if (s->vz!=NULL) {free_vector(s->vz,1,s->numlist); s->vz=NULL;} - return; -} - -void free_slice(Slice *s) -/* Free the space associated with the vectors in the given Slice */ -/* Then free the Slice variable itself */ -{ - free_tags(s); - free_data(s); - free(s); - return; -} - -/* =================================================================== */ - -int f77write(FILE *f, void *p, int len) -/* len is number of bytes to be written from p[0..len-1] */ -/* Return 0 if successful, 1 if not */ -{ - if (fwrite(&len,sizeof(int),1,f)!=1) return 1; - if (fwrite(p,1,len,f)!=len) return 1; - if (fwrite(&len,sizeof(int),1,f)!=1) return 1; - return 0; -} - -int f77read(FILE *f, void *p, int maxbytes) -/* Read a FORTRAN style block from the given file */ -/* maxbytes is the amount of space the pointer p points to */ -/* Space must be allocated to read the whole block into p */ -/* Return amount read, scream if there's a problem */ -/* Reading is done ZERO-OFFSET */ -{ - int size, size2; - if (fread(&size,sizeof(int),1,f)!=1) - myerror("f77read(): Error reading begin delimiter."); - if (size>maxbytes) - myerror("f77read(): Block delimiter exceeds size of storage."); - if (sizenumpart = sizeheader[0]; - s->numblocks = sizeheader[1]; - s->numperblock = sizeheader[0]/sizeheader[1]; - if (s->numpart != s->numblocks*(s->numperblock)) - myerror("Number of blocks not an even divisor of number of particles."); - - s->z = header[0]; - s->boxsize = header[1]*1000.0; /* We use kpc, not Mpc */ - s->physsize = s->boxsize/(1.0+s->z); /* We use kpc, not Mpc */ - s->velscale = 100.0*header[1]*sqrt(3.0/8.0/PI)/(1.0+s->z); - /* To go from data to pec vel */ - s->omega = header[4]; - if (header[6]!=0.0) myerror("HDM component listed in header."); - s->lambda = header[7]; - s->h0 = header[8]; - s->sigma8 = header[9]; /* At z=0 */ - - /* Now find some computed quantities. */ - s->a = 1.0/(1.0+s->z); - s->curv = 1.0-s->omega-s->lambda; - s->gamma = s->omega*(s->h0); - s->specn = 1.0; - s->hubb = 0.1*sqrt(s->omega/CUBE(s->a)+s->curv/SQR(s->a)+s->lambda)*(s->a); - - /* The following assume Omega = 1 */ - s->masspart = RHOCRIT/s->numpart*CUBE(s->boxsize); - s->growth = s->a; - s->t = HTIME*(s->h0)*pow(s->a, 1.5); - return 0; -} - -void normalizedata(Slice *s, int conp, int conv) -/* Put raw data into comoving h^-1 kpc and km/s units */ -{ - int j; - float velnorm; - if (conp) { - for (j=1;j<=s->numlist;j++) s->px[j] *= s->boxsize; - for (j=1;j<=s->numlist;j++) s->py[j] *= s->boxsize; - for (j=1;j<=s->numlist;j++) s->pz[j] *= s->boxsize; - } - - if (conv) { - for (j=1;j<=s->numlist;j++) s->vx[j] *= s->velscale; - for (j=1;j<=s->numlist;j++) s->vy[j] *= s->velscale; - for (j=1;j<=s->numlist;j++) s->vz[j] *= s->velscale; - } - return; -} - -/* ================================================================ */ - -int read_alldata(FILE *f, FILE *ftag, Slice *s, int conp, int conv) -/* Read all the data, including the tags if ftag!=NULL. */ -/* Store positions and velocities unless conp or conv = 0 */ -/* Assume that the data file header has been skipped, but read the -tag file header. */ -{ - int block; - float *dummylist; - - dummylist = NULL; - if (!conp || !conv) dummylist = vector(1,s->numperblock); - if (s->pid!=NULL) - mywarn("Non-NULL s->pid[] passed to read_alldata(). Ignoring..."); - - s->numlist=s->numpart; - if (conp) { - s->px=vector(1,s->numlist); - s->py=vector(1,s->numlist); - s->pz=vector(1,s->numlist); - } - if (conv) { - s->vx=vector(1,s->numlist); - s->vy=vector(1,s->numlist); - s->vz=vector(1,s->numlist); - } - if (ftag!=NULL) { - s->ntag = ivector(1,s->numlist); - s->ID = ivector(1,s->numlist); /* S Skory */ - } - - - printf("Reading data..."); - for (block=0;blocknumblocks;block++) { - /* Read the three position blocks */ - if (conp) { /* Store the data */ - f77read(f, s->px+s->numperblock*block+1, s->numperblock*sizeof(float)); - f77read(f, s->py+s->numperblock*block+1, s->numperblock*sizeof(float)); - f77read(f, s->pz+s->numperblock*block+1, s->numperblock*sizeof(float)); - } else { /* Don't store the data */ - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - } - /* Now read the three velocity blocks */ - if (conv) { /* Store the data */ - f77read(f, s->vx+s->numperblock*block+1, s->numperblock*sizeof(float)); - f77read(f, s->vy+s->numperblock*block+1, s->numperblock*sizeof(float)); - f77read(f, s->vz+s->numperblock*block+1, s->numperblock*sizeof(float)); - } else { /* Don't store the data */ - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - f77read(f, dummylist+1, s->numperblock*sizeof(float)); - } - if (block%8==1) {printf("."); fflush(stdout);} - } - if (dummylist!=NULL) free_vector(dummylist, 1, s->numperblock); - normalizedata(s,conp,conv); - - if (ftag!=NULL) { - printf("tags..."); fflush(stdout); - readalltags(ftag, s); - } - - printf("done!"); fflush(stdout); - return 0; -} - -int read_partdata(FILE *f, FILE *ftag, Slice *s) -/* Read one block (128k particles) of data into Slice s. Allocate needed -storage, erasing and freeing previous storage. */ -/* This cannot be done with s->pid!=NULL, so s->pid is ignored and s->numlist -is reset to BLOCKSIZE */ -/* Unlike other routines, this stores both positions and velocities in -all cases (since the storage requirements are already small */ -/* If ftag==NULL, don't read the tag file. Otherwise do read it. */ -{ - if (s->pid!=NULL) - mywarn("Non-trivial pid[] not supported with incremental reads"); - /* If we need to reallocate memory, do it. Otherwise, just write over */ - if (s->px==NULL || s->vx==NULL || s->numlist!=s->numperblock) { - if (s->px!=NULL) free_vector(s->px,1,s->numlist); - if (s->py!=NULL) free_vector(s->py,1,s->numlist); - if (s->pz!=NULL) free_vector(s->pz,1,s->numlist); - if (s->vx!=NULL) free_vector(s->vx,1,s->numlist); - if (s->vy!=NULL) free_vector(s->vy,1,s->numlist); - if (s->vz!=NULL) free_vector(s->vz,1,s->numlist); - if (ftag!=NULL && s->ntag!=NULL) { - free_ivector(s->ntag, 1, s->numlist); - free_ivector(s->ID, 1, s->numlist); /* S Skory */ - } - s->numlist = s->numperblock; - s->px = vector(1,s->numlist); - s->py = vector(1,s->numlist); - s->pz = vector(1,s->numlist); - s->vx = vector(1,s->numlist); - s->vy = vector(1,s->numlist); - s->vz = vector(1,s->numlist); - if (ftag!=NULL) { - s->ntag = ivector(1, s->numlist); - s->ID = ivector(1, s->numlist); /* S Skory */ - } - s->offset=0; - /* fprintf(stderr, "Reallocating data arrays.\n"); */ - } - else s->offset+=s->numlist; - - f77read(f,s->px+1,sizeof(float)*s->numlist); - f77read(f,s->py+1,sizeof(float)*s->numlist); - f77read(f,s->pz+1,sizeof(float)*s->numlist); - f77read(f,s->vx+1,sizeof(float)*s->numlist); - f77read(f,s->vy+1,sizeof(float)*s->numlist); - f77read(f,s->vz+1,sizeof(float)*s->numlist); - - if (ftag!=NULL) readtag(ftag, s->numlist, s->ntag); - normalizedata(s,1,1); - return 0; -} - -/* =============================================================== */ - -int readtag(FILE *f, int numread, int *ntag) -/* Read numread values from FILE f and put the values in ntag */ -/* Return 0 if successful, 1 if not */ -/* The storage ntag[1..numread] must exist */ -/* Note: the first 8 bytes of the tag file contain the number of particles -and the number of groups. These must be skipped before calling this routine. */ -{ - if (fread(ntag+1, sizeof(int), numread, f)!=numread) - myerror("Error in reading tag file."); - return 0; -} - -int skiptagheader(FILE *f, Slice *s) -/* Read the first 8 bytes from the tag file. Check that the first int equals -the number of particles. Return the second, which is the number of groups */ -{ - int dummy[2]; - if (fread(&dummy, sizeof(int), 2, f)!=2) myerror("Error in reading tag file."); - if (s->numpart!=0 && dummy[0]!=s->numpart) - myerror("First number in tag file doesn't match expected number of particles."); - s->numgroups = dummy[1]; - return dummy[1]; -} - -int readalltags(FILE *f, Slice *s) -/* Read the whole tag file. Allocate memory as needed */ -/* Return the number of groups */ -{ - int dummy[2]; - if (s->ntag==NULL || s->numlist!=s->numpart) { - if (s->ntag!=NULL) { - free_ivector(s->ntag, 1, s->numlist); - free_ivector(s->ID, 1, s->numlist); /* S Skory */ - } - s->numlist = s->numpart; - s->ntag = ivector(1, s->numlist); - s->ID = ivector(1, s->numlist); - } - if (fread(&dummy, sizeof(int), 2, f)!=2) myerror("Error 1 in reading tag file."); - if (dummy[0]!=s->numpart) - myerror("First int of tag file doesn't match numpart."); - s->numgroups = dummy[1]; - - if (fread(s->ntag+1, sizeof(int), s->numlist, f)!=s->numlist) - myerror("Couldn't read entire tag file."); - return dummy[1]; -} - -#endif - -/* ===================== Warnings and Errors =========================== */ - -/* Print a message and die */ -void myerror(char *message) -{ - fprintf(stderr, "%s\n", message); - exit(1); return; -} - -/* Just print a message */ -void mywarn(char *message) -{ - fprintf(stderr, "%s\n", message); - fflush(NULL); /* Flush everything, so we know where we are */ - return; -} diff --git a/yt/analysis_modules/halo_finding/hop/hop_smooth.c b/yt/analysis_modules/halo_finding/hop/hop_smooth.c deleted file mode 100644 index 185ad2fc705..00000000000 --- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c +++ /dev/null @@ -1,479 +0,0 @@ -/* SMOOTH.C */ -/* This was written by Joachim Stadel and the NASA HPCC ESS at -the University of Washington Department of Astronomy as part of -the SMOOTH program, v2.0.1. -URL: http://www-hpcc.astro.washington.edu/tools/SMOOTH */ - -/* DJE--I have removed unneeded subroutines, notably those having -to do with velocity field reconstructions (because they refer to -particle data that I chose not to store) and output routines -(because I wanted binary output). Also, the density subroutine -was slightly customized to reduce memory consumption in -the case of equal mass particles. */ - -/* HOP Version 1.0 (12/15/97) -- Original Release */ - -#include -#include -#ifdef _WIN32 -#define _USE_MATH_DEFINES -#endif -#include -#include -#include "smooth.h" -#include "kd.h" -#include "hop_numpy.h" - -#define ISYM "d" -#define GSYM "g" - -//#include "macros_and_parameters.h" - -#define IMARK 1 /* All particles are marked to be included */ - -int smInit(SMX *psmx,KD kd,int nSmooth,float *fPeriod) -{ - SMX smx; - PQ_STATIC; - int pi,j; - fprintf(stderr,"nSmooth = %d kd->nActive = %d\n", nSmooth, kd->nActive); - assert(nSmooth <= kd->nActive); - smx = (SMX)malloc(sizeof(struct smContext)); - assert(smx != NULL); - smx->kd = NULL; - - smx->kd = kd; - smx->nSmooth = nSmooth; - smx->pq = (PQ *)malloc(nSmooth*sizeof(PQ)); - assert(smx->pq != NULL); - PQ_INIT(smx->pq,nSmooth); - smx->pfBall2 = (float *)malloc((kd->nActive+1)*sizeof(int)); - assert(smx->pfBall2 != NULL); - smx->iMark = (char *)malloc(kd->nActive*sizeof(char)); - assert(smx->iMark); - smx->nListSize = smx->nSmooth+RESMOOTH_SAFE; - smx->fList = (float *)malloc(smx->nListSize*sizeof(float)); - assert(smx->fList != NULL); - smx->pList = (int *)malloc(smx->nListSize*sizeof(int)); - assert(smx->pList != NULL); - /* - ** Set for Periodic Boundary Conditions. - */ - for (j=0;j<3;++j) smx->fPeriod[j] = fPeriod[j]; - /* - ** Initialize arrays for calculated quantities.--DJE - */ - for (pi=0;pikd->nActive;++pi) { - NP_DENS(smx->kd, pi) = 0.0; - smx->kd->p[pi].iHop = 0; - } - *psmx = smx; - return(1); - } - - -void smFinish(SMX smx) -{ - free(smx->pfBall2); - free(smx->iMark); - free(smx->pq); - free(smx); - } - - -void smBallSearch(SMX smx,float fBall2,float *ri) -{ - KDN *c; - PARTICLE *p; - int cell,cp,ct,pj; - float fDist2,dx,dy,dz,lx,ly,lz,sx,sy,sz,x,y,z; - PQ *pq; - PQ_STATIC; - - c = smx->kd->kdNodes; - p = smx->kd->p; - pq = smx->pqHead; - x = ri[0]; - y = ri[1]; - z = ri[2]; - lx = smx->fPeriod[0]; - ly = smx->fPeriod[1]; - lz = smx->fPeriod[2]; - cell = ROOT; - /* - ** First find the "local" Bucket. - ** This could merely be the closest bucket to ri[3]. - */ - while (cell < smx->kd->nSplit) { - if (ri[c[cell].iDim] < c[cell].fSplit) cell = LOWER(cell); - else cell = UPPER(cell); - } - /* - ** Now start the search from the bucket given by cell! - */ - for (pj=c[cell].pLower;pj<=c[cell].pUpper;++pj) { - dx = x - NP_POS(smx->kd, pj, 0); - dy = y - NP_POS(smx->kd, pj, 1); - dz = z - NP_POS(smx->kd, pj, 2); - fDist2 = dx*dx + dy*dy + dz*dz; - if (fDist2 < fBall2) { - if (smx->iMark[pj]) continue; - smx->iMark[pq->p] = 0; - smx->iMark[pj] = 1; - pq->fKey = fDist2; - pq->p = pj; - pq->ax = 0.0; - pq->ay = 0.0; - pq->az = 0.0; - PQ_REPLACE(pq); - fBall2 = pq->fKey; - } - } - while (cell != ROOT) { - cp = SIBLING(cell); - ct = cp; - SETNEXT(ct); - while (1) { - INTERSECT(c,cp,fBall2,lx,ly,lz,x,y,z,sx,sy,sz); - /* - ** We have an intersection to test. - */ - if (cp < smx->kd->nSplit) { - cp = LOWER(cp); - continue; - } - else { - for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) { - dx = sx - NP_POS(smx->kd, pj, 0); - dy = sy - NP_POS(smx->kd, pj, 1); - dz = sz - NP_POS(smx->kd, pj, 2); - fDist2 = dx*dx + dy*dy + dz*dz; - if (fDist2 < fBall2) { - if (smx->iMark[pj]) continue; - smx->iMark[pq->p] = 0; - smx->iMark[pj] = 1; - pq->fKey = fDist2; - pq->p = pj; - pq->ax = sx - x; - pq->ay = sy - y; - pq->az = sz - z; - PQ_REPLACE(pq); - fBall2 = pq->fKey; - } - } - } - GetNextCell: - SETNEXT(cp); - if (cp == ct) break; - } - cell = PARENT(cell); - } - smx->pqHead = pq; - } - - -int smBallGather(SMX smx,float fBall2,float *ri) -{ - KDN *c; - PARTICLE *p; - int pj,nCnt,cp,nSplit; - float dx,dy,dz,x,y,z,lx,ly,lz,sx,sy,sz,fDist2; - - c = smx->kd->kdNodes; - p = smx->kd->p; - nSplit = smx->kd->nSplit; - lx = smx->fPeriod[0]; - ly = smx->fPeriod[1]; - lz = smx->fPeriod[2]; - x = ri[0]; - y = ri[1]; - z = ri[2]; - nCnt = 0; - cp = ROOT; - while (1) { - INTERSECT(c,cp,fBall2,lx,ly,lz,x,y,z,sx,sy,sz); - /* - ** We have an intersection to test. - */ - if (cp < nSplit) { - cp = LOWER(cp); - continue; - } - else { - for (pj=c[cp].pLower;pj<=c[cp].pUpper;++pj) { - dx = sx - NP_POS(smx->kd, pj, 0); - dy = sy - NP_POS(smx->kd, pj, 1); - dz = sz - NP_POS(smx->kd, pj, 2); - fDist2 = dx*dx + dy*dy + dz*dz; - if (fDist2 < fBall2) { - smx->fList[nCnt] = fDist2; - smx->pList[nCnt++] = pj; - /* Insert debugging flag here */ - if (nCnt > smx->nListSize) { - fprintf(stderr,"nCnt too big.\n"); - } - } - } - } - GetNextCell: - SETNEXT(cp); - if (cp == ROOT) break; - } - assert(nCnt <= smx->nListSize); - return(nCnt); - } - - -void smSmooth(SMX smx,void (*fncSmooth)(SMX,int,int,int *,float *)) -{ - KDN *c; - PARTICLE *p; - PQ *pq,*pqLast; - PQ_STATIC; - int cell; - int pi,pin,pj,pNext,nCnt,nSmooth; - float dx,dy,dz,x,y,z,h2,ax,ay,az; - float temp_ri[3]; - - - for (pi=0;pikd->nActive;++pi) { - if (IMARK) smx->pfBall2[pi] = -1.0; - else smx->pfBall2[pi] = 1.0; /* pretend it is already done! */ - } - smx->pfBall2[smx->kd->nActive] = -1.0; /* stop condition */ - for (pi=0;pikd->nActive;++pi) { - smx->iMark[pi] = 0; - } - pqLast = &smx->pq[smx->nSmooth-1]; - c = smx->kd->kdNodes; - p = smx->kd->p; - nSmooth = smx->nSmooth; - /* - ** Initialize Priority Queue. - */ - pin = 0; - pNext = 1; - ax = 0.0; - ay = 0.0; - az = 0.0; - for (pq=smx->pq,pj=0;pq<=pqLast;++pq,++pj) { - smx->iMark[pj] = 1; - pq->p = pj; - pq->ax = ax; - pq->ay = ay; - pq->az = az; - } - while (1) { - if (smx->pfBall2[pin] >= 0) { - /* - ** Find next particle which is not done, and load the - ** priority queue with nSmooth number of particles. - */ - while (smx->pfBall2[pNext] >= 0) ++pNext; - /* - ** Check if we are really finished. - */ - if (pNext == smx->kd->nActive) break; - pi = pNext; - ++pNext; - x = NP_POS(smx->kd, pi, 0); - y = NP_POS(smx->kd, pi, 1); - z = NP_POS(smx->kd, pi, 2); - /* printf("%"ISYM": %"GSYM" %"GSYM" %"GSYM"\n", pi, x, y, z); */ - /* - ** First find the "local" Bucket. - ** This could merely be the closest bucket to ri[3]. - */ - cell = ROOT; - while (cell < smx->kd->nSplit) { - if (NP_POS(smx->kd, pi, c[cell].iDim) < c[cell].fSplit) - cell = LOWER(cell); - else - cell = UPPER(cell); - } - /* - ** Remove everything from the queue. - */ - smx->pqHead = NULL; - for (pq=smx->pq;pq<=pqLast;++pq) smx->iMark[pq->p] = 0; - /* - ** Add everything from pj up to and including pj+nSmooth-1. - */ - pj = c[cell].pLower; - if (pj > smx->kd->nActive - nSmooth) - pj = smx->kd->nActive - nSmooth; - for (pq=smx->pq;pq<=pqLast;++pq) { - smx->iMark[pj] = 1; - dx = x - NP_POS(smx->kd, pj, 0); - dy = y - NP_POS(smx->kd, pj, 1); - dz = z - NP_POS(smx->kd, pj, 2); - pq->fKey = dx*dx + dy*dy + dz*dz; - pq->p = pj++; - pq->ax = 0.0; - pq->ay = 0.0; - pq->az = 0.0; - } - PQ_BUILD(smx->pq,nSmooth,smx->pqHead); - } - else { - /* - ** Calculate the priority queue using the previous particles! - */ - pi = pin; - x = NP_POS(smx->kd, pi, 0); - y = NP_POS(smx->kd, pi, 1); - z = NP_POS(smx->kd, pi, 2); - smx->pqHead = NULL; - for (pq=smx->pq;pq<=pqLast;++pq) { - pq->ax -= ax; - pq->ay -= ay; - pq->az -= az; - dx = x + pq->ax - NP_POS(smx->kd, pq->p, 0); - dy = y + pq->ay - NP_POS(smx->kd, pq->p, 1); - dz = z + pq->az - NP_POS(smx->kd, pq->p, 2); - pq->fKey = dx*dx + dy*dy + dz*dz; - } - PQ_BUILD(smx->pq,nSmooth,smx->pqHead); - ax = 0.0; - ay = 0.0; - az = 0.0; - } - temp_ri[0] = NP_POS(smx->kd, pi, 0); - temp_ri[1] = NP_POS(smx->kd, pi, 1); - temp_ri[2] = NP_POS(smx->kd, pi, 2); - smBallSearch(smx,smx->pqHead->fKey,temp_ri); - smx->pfBall2[pi] = smx->pqHead->fKey; - /* - ** Pick next particle, 'pin'. - ** Create fList and pList for function 'fncSmooth'. - */ - pin = pi; - nCnt = 0; - h2 = smx->pqHead->fKey; - for (pq=smx->pq;pq<=pqLast;++pq) { - if (pq == smx->pqHead) continue; - smx->pList[nCnt] = pq->p; - smx->fList[nCnt++] = pq->fKey; - if (smx->pfBall2[pq->p] >= 0) continue; - if (pq->fKey < h2) { - pin = pq->p; - h2 = pq->fKey; - ax = pq->ax; - ay = pq->ay; - az = pq->az; - } - } - (*fncSmooth)(smx,pi,nCnt,smx->pList,smx->fList); - } - } - - -void smReSmooth(SMX smx,void (*fncSmooth)(SMX,int,int,int *,float *)) -{ - PARTICLE *p; - int pi,nSmooth; - float temp_ri[3]; - - p = smx->kd->p; - for (pi=0;pikd->nActive;++pi) { - if (IMARK == 0) continue; - /* - ** Do a Ball Gather at the radius of the most distant particle - ** which is smDensity sets in smx->pBall[pi]. - */ - temp_ri[0] = NP_POS(smx->kd, pi, 0); - temp_ri[1] = NP_POS(smx->kd, pi, 1); - temp_ri[2] = NP_POS(smx->kd, pi, 2); - nSmooth = smBallGather(smx,smx->pfBall2[pi],temp_ri); - (*fncSmooth)(smx,pi,nSmooth,smx->pList,smx->fList); - } - } - - -void smDensity(SMX smx,int pi,int nSmooth,int *pList,float *fList) -{ - float ih2,r2,rs,fDensity; - int i,pj; - - ih2 = 4.0/smx->pfBall2[pi]; - fDensity = 0.0; - for (i=0;ikd, pj); -#else - fDensity += rs*smx->kd->fMass; -#endif - } - NP_DENS(smx->kd, pi) = M_1_PI*sqrt(ih2)*ih2*fDensity; - } - - -void smDensitySym(SMX smx,int pi,int nSmooth,int *pList,float *fList) -{ - float fNorm,ih2,r2,rs; - int i,pj; - - ih2 = 4.0/smx->pfBall2[pi]; - fNorm = 0.5*M_1_PI*sqrt(ih2)*ih2; - for (i=0;ikd, pi) += rs*NP_MASS(smx->kd, pj); - NP_DENS(smx->kd, pj) += rs*NP_MASS(smx->kd, pi); -#else - smx->kd->p[pi].fDensity += rs*smx->kd->fMass; - smx->kd->p[pj].fDensity += rs*smx->kd->fMass; -#endif - } - } - -/* I'm not using the following function, but I left it here in case someone -wants the densities outputted in Tipsy format. But you're probably better -off just fetching the smooth() program from the HPCC web site... */ - -void smOutDensity(SMX smx,FILE *fp) -{ - int i,iCnt; - - fprintf(fp,"%"ISYM"\n",smx->kd->nParticles); - iCnt = 0; - for (i=0;ikd->nGas;++i) { - if (smx->kd->bGas) { - if (IMARK) - fprintf(fp,"%.8"GSYM"\n",NP_DENS(smx->kd, iCnt)); - else fprintf(fp,"0\n"); - ++iCnt; - } - else fprintf(fp,"0\n"); - } - for (i=0;ikd->nDark;++i) { - if (smx->kd->bDark) { - if (IMARK) - fprintf(fp,"%.8"GSYM"\n",NP_DENS(smx->kd, iCnt)); - else fprintf(fp,"0\n"); - ++iCnt; - } - else fprintf(fp,"0\n"); - } - for (i=0;ikd->nStar;++i) { - if (smx->kd->bStar) { - if (IMARK) - fprintf(fp,"%.8"GSYM"\n",NP_DENS(smx->kd, iCnt)); - else fprintf(fp,"0\n"); - ++iCnt; - } - else fprintf(fp,"0\n"); - } -} - - diff --git a/yt/analysis_modules/halo_finding/hop/kd.h b/yt/analysis_modules/halo_finding/hop/kd.h deleted file mode 100644 index cf0b733c6d3..00000000000 --- a/yt/analysis_modules/halo_finding/hop/kd.h +++ /dev/null @@ -1,203 +0,0 @@ -/* KD.H */ -/* This was written by Joachim Stadel and the NASA HPCC ESS at -the University of Washington Department of Astronomy as part of -the SMOOTH program, v2.0.1. -URL: http://www-hpcc.astro.washington.edu/tools/SMOOTH */ - -/* DJE--I have made a few alterations to the PARTICLE structure -in order to reduce memory consumption. */ - -/* HOP Version 1.0 (12/15/97) -- Original Release */ - -/* GLB--set different masses on */ - -#define DIFFERENT_MASSES - -//#include "macros_and_parameters.h" - -#ifndef KD_HINCLUDED -#define KD_HINCLUDED - -#include "Python.h" -#include "numpy/ndarrayobject.h" - -#define ROOT 1 -#define LOWER(i) (i<<1) -#define UPPER(i) ((i<<1)+1) -#define PARENT(i) (i>>1) -#define SIBLING(i) ((i&1)?i-1:i+1) -#define SETNEXT(i)\ -{\ - while (i&1) i=i>>1;\ - ++i;\ - } - -#define DARK 1 -#define GAS 2 -#define STAR 4 - -typedef struct Particle { - int np_index; - int iHop; - int iOrder; -#if 0 - float r[3]; - float fDensity; - // int iID; /* the real ID of the particle S. Skory */ - int iHop; /* DJE: The number of the highest-density neighbor; - Later, the group number. */ -#ifdef DIFFERENT_MASSES - float fMass; -#endif - /* DJE: The following are unused and cost too much memory to keep */ - /* float v[3]; */ - /* float fMass; */ - /* int iMark; */ - /* float vMean[3]; */ - /* float fVelDisp2; */ -#endif - } PARTICLE; - -typedef struct bndBound { - float fMin[3]; - float fMax[3]; - } BND; - -typedef struct kdNode { - float fSplit; - BND bnd; - int iDim; - int pLower; - int pUpper; - } KDN; - -typedef struct kdContext { - int nBucket; - int nParticles; - int nDark; - int nGas; - int nStar; - int bDark; - int bGas; - int bStar; - int nActive; - float fTime; - BND bnd; - int nLevels; - int nNodes; - int nSplit; - float fMass; /* DJE: If all particles have the same mass */ - PARTICLE *p; - KDN *kdNodes; - int uSecond; - int uMicro; - npy_float64 *np_densities; - npy_float64 *np_pos[3]; - npy_float64 *np_masses; - float totalmass; - } * KD; - - -#define INTERSECT(c,cp,fBall2,lx,ly,lz,x,y,z,sx,sy,sz)\ -{\ - float dx,dy,dz,dx1,dy1,dz1,fDist2;\ - dx = c[cp].bnd.fMin[0]-x;\ - dx1 = x-c[cp].bnd.fMax[0];\ - dy = c[cp].bnd.fMin[1]-y;\ - dy1 = y-c[cp].bnd.fMax[1];\ - dz = c[cp].bnd.fMin[2]-z;\ - dz1 = z-c[cp].bnd.fMax[2];\ - if (dx > 0.0) {\ - dx1 += lx;\ - if (dx1 < dx) {\ - fDist2 = dx1*dx1;\ - sx = x+lx;\ - }\ - else {\ - fDist2 = dx*dx;\ - sx = x;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dx1 > 0.0) {\ - dx += lx;\ - if (dx < dx1) {\ - fDist2 = dx*dx;\ - sx = x-lx;\ - }\ - else {\ - fDist2 = dx1*dx1;\ - sx = x;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - fDist2 = 0.0;\ - sx = x;\ - }\ - if (dy > 0.0) {\ - dy1 += ly;\ - if (dy1 < dy) {\ - fDist2 += dy1*dy1;\ - sy = y+ly;\ - }\ - else {\ - fDist2 += dy*dy;\ - sy = y;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dy1 > 0.0) {\ - dy += ly;\ - if (dy < dy1) {\ - fDist2 += dy*dy;\ - sy = y-ly;\ - }\ - else {\ - fDist2 += dy1*dy1;\ - sy = y;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - sy = y;\ - }\ - if (dz > 0.0) {\ - dz1 += lz;\ - if (dz1 < dz) {\ - fDist2 += dz1*dz1;\ - sz = z+lz;\ - }\ - else {\ - fDist2 += dz*dz;\ - sz = z;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else if (dz1 > 0.0) {\ - dz += lz;\ - if (dz < dz1) {\ - fDist2 += dz*dz;\ - sz = z-lz;\ - }\ - else {\ - fDist2 += dz1*dz1;\ - sz = z;\ - }\ - if (fDist2 > fBall2) goto GetNextCell;\ - }\ - else {\ - sz = z;\ - }\ - } - - -void kdTime(KD,int *,int *); -int kdInit(KD *,int); -int kdReadTipsy(KD,FILE *,int,int,int); -void kdInMark(KD,char *); -int kdBuildTree(KD); -void kdOrder(KD); -void kdFinish(KD); - -#endif diff --git a/yt/analysis_modules/halo_finding/hop/slice.h b/yt/analysis_modules/halo_finding/hop/slice.h deleted file mode 100644 index a198124f311..00000000000 --- a/yt/analysis_modules/halo_finding/hop/slice.h +++ /dev/null @@ -1,97 +0,0 @@ -/* SLICE.H, Daniel Eisenstein, 1997 */ -/* Based on a paper by Daniel Eisenstein & Piet Hut, -"HOP: A New Group-Finding Algorithm for N-body Simulations." -See the included documentation or view it at -http://www.sns.ias.edu/~eisenste/hop/hop_doc.html */ - -/* Version 1.0 (12/15/97) -- Original Release */ - -#ifndef NBODYUTIL_H -#define NBODYUTIL_H - -#define RHOCRIT 277.5 /* in h^2 Msun/kpc^3 */ -#define HTIME 9.7776 /* in h^-1 Gyr */ -#define GNEWT 4.51e-6 /* in kpc^3/Msun/Gyr^2, h cancels */ -#define KMS 0.975 /* to convert from kpc/Gyr to km/s */ - -#define PI 3.141592654 -#define ROOT2 1.414213562 -#define ROOTPI2 1.253314137 /* sqrt(Pi/2) */ - -#include -#include -#include -//#include "macros_and_parameters.h" - -/* Structure to hold data from one time slice and info about the slice */ -/* This needn't hold all of the points; it might just hold some subset */ -/* Hence, data vectors are just given as arrays here */ -/* Usage: pid[] Array goes from 0 to pid[0]. pid is NULL if the particle -numbering is just the trivial mapping. Kinematic arrays go from 1 to numlist -and can be NULL if the data hasn't been read. Numlist==pid[0] often */ - -typedef struct slicestruct { -#ifdef NOT_USED - /* First, some generic stuff about the simulation */ - float omega, lambda, curv; - float h0; /* H_0 = 100 km/s/Mpc * h0 */ - float specn, gamma; /* The spectral index and BBKS PS Gamma */ - float sigma8; /* At z=0 */ - - /* Next, some information about this slice in particular */ - float z, a, t, growth; - /* a=1 at z=0, g=a at early times */ - float hubb; /* This is a*H(z), but without h0. So it's 0.1 km/s/kpc - redshifted appropriately. This is used to relate - comoving positions and peculiar velocities */ - - /* Now some information about the data */ - int numblocks; /* Number of blocks in the data file */ - int numperblock; /* Number of particles per block */ - float masspart; /* Mass per particle in h^-1 Msun */ - float boxsize; /* Comoving size of box in h^-1 kpc */ - float physsize; /* Physical Size in h^-1 kpc */ - float velscale; /* To turn raw velocity data into peculiar vel in km/s*/ -#endif - - int numpart; /* Total number of particles in the simulation */ - /* Now the data itself */ - int *pid; /* The id number of the particle */ - /* pid[0] holds the number of particles in the list */ - int offset; /* If pid==NULL, then the arrays are consecutively - numbered, starting from offset+1 */ - int numlist; /* Length of arrays below, set when allocated */ - float *px, *py, *pz, *vx, *vy, *vz; /* The kinematic information */ - - /* And here's the group tag information */ - int *ntag; /* Only stored for the numlist above */ - //int *ID; /* The real, true ID of the particle. S Skory */ - int numgroups; /* The number of groups read out of the tag file */ -} Slice; /* Type Slice is defined */ - -/* Prototypes */ -Slice *newslice(); -void free_tags(Slice *s); -void free_data(Slice *s); -void free_slice(Slice *s); -int f77write(FILE *f, void *p, int len); -int f77read(FILE *f, void *p, int len); -float *vector(long nl, long nh); -int *ivector(long nl, long nh); -void free_vector(float *v, long nl, long nh); -void free_ivector(int *v, long nl, long nh); - -void myerror(char *message); -void mywarn(char *message); - -int read_header(FILE *f, Slice *s); -void normalizedata(Slice *s, int conp, int conv); -int read_alldata(FILE *f, FILE *ftag, Slice *s, int conp, int conv); -int read_partdata(FILE *f, FILE *ftag, Slice *s); - -int readtag(FILE *f, int numread, int *ntag); -int skiptagheader(FILE *f, Slice *s); -int readalltags(FILE *f, Slice *s); - - -#endif /* NBODYUTIL_H */ diff --git a/yt/analysis_modules/halo_finding/hop/smooth.h b/yt/analysis_modules/halo_finding/hop/smooth.h deleted file mode 100644 index 71d93c6199e..00000000000 --- a/yt/analysis_modules/halo_finding/hop/smooth.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SMOOTH.H */ -/* This was written by Joachim Stadel and the NASA HPCC ESS at -the University of Washington Department of Astronomy as part of -the SMOOTH program, v2.0.1. -URL: http://www-hpcc.astro.washington.edu/tools/SMOOTH */ - -/* DJE--I have made a few additions to the SMX structure -in order to store information necessary for HOP. I have also -added the Boundary structure. */ - -/* HOP Version 1.0 (12/15/97) -- Original Release */ - -#ifndef SMOOTH_HINCLUDED -#define SMOOTH_HINCLUDED - -#include "kd.h" -//#include "macros_and_parameters.h" - -#define RESMOOTH_SAFE 30 - -/* DJE: Define this structure to hold the boundary data. */ -typedef struct boundarystruct { - int nGroup1, nGroup2; /* The two groups involved, ordered such - that nGroup1>1];\ - (pq)[PQ_j].pqFromExt = &(pq)[(PQ_j+(n))>>1];\ - }\ - } - - -#define PQ_BUILD(pq,n,q)\ -{\ - for (PQ_j=(n)-1;PQ_j>0;--PQ_j) {\ - PQ_i = (PQ_j<<1);\ - if (PQ_i < (n)) PQ_t = (pq)[PQ_i].pqWinner;\ - else PQ_t = &(pq)[PQ_i-(n)];\ - ++PQ_i;\ - if (PQ_i < (n)) PQ_lt = (pq)[PQ_i].pqWinner;\ - else PQ_lt = &(pq)[PQ_i-(n)];\ - if (PQ_t->fKey < PQ_lt->fKey) {\ - (pq)[PQ_j].pqLoser = PQ_t;\ - (pq)[PQ_j].pqWinner = PQ_lt;\ - }\ - else {\ - (pq)[PQ_j].pqLoser = PQ_lt;\ - (pq)[PQ_j].pqWinner = PQ_t;\ - }\ - }\ - (q) = (pq)[1].pqWinner;\ - } - - -#define PQ_REPLACE(q)\ -{\ - PQ_t = (q)->pqFromExt;\ - while (PQ_t) {\ - if (PQ_t->pqLoser->fKey > (q)->fKey) {\ - PQ_lt = PQ_t->pqLoser;\ - PQ_t->pqLoser = (q);\ - (q) = PQ_lt;\ - }\ - PQ_t = PQ_t->pqFromInt;\ - }\ - } - - - -int smInit(SMX *,KD,int,float *); -void smFinish(SMX); -void smBallSearch(SMX,float,float *); -int smBallGather(SMX,float,float *); -void smSmooth(SMX,void (*)(SMX,int,int,int *,float *)); -void smReSmooth(SMX,void (*)(SMX,int,int,int *,float *)); -void smDensity(SMX,int,int,int *,float *); -void smDensitySym(SMX,int,int,int *,float *); -void smMeanVel(SMX,int,int,int *,float *); -void smMeanVelSym(SMX,int,int,int *,float *); -void smVelDisp(SMX,int,int,int *,float *); -void smVelDispSym(SMX,int,int,int *,float *); -void smNull(SMX,int,int,int *,float *); -void smOutDensity(SMX,FILE *); -void smOutMeanVel(SMX,FILE *); -void smOutVelDisp(SMX,FILE *); -void smOutPhase(SMX,FILE *); -void smOutMach(SMX,FILE *); -void smOutSpeed(SMX,FILE *); - -#endif - - - diff --git a/yt/analysis_modules/halo_finding/rockstar/__init__.py b/yt/analysis_modules/halo_finding/rockstar/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/halo_finding/rockstar/api.py b/yt/analysis_modules/halo_finding/rockstar/api.py deleted file mode 100644 index 94ea5e02616..00000000000 --- a/yt/analysis_modules/halo_finding/rockstar/api.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -API for Rockstar halo finding - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from .rockstar import RockstarHaloFinder diff --git a/yt/analysis_modules/halo_finding/rockstar/rockstar.py b/yt/analysis_modules/halo_finding/rockstar/rockstar.py deleted file mode 100644 index 295048fbb50..00000000000 --- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py +++ /dev/null @@ -1,370 +0,0 @@ -""" -Operations to get Rockstar loaded up - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.config import ytcfg -from yt.data_objects.time_series import \ - DatasetSeries -from yt.extern import \ - six -from yt.funcs import \ - is_root, mylog -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - ParallelAnalysisInterface, ProcessorPool -from yt.utilities.exceptions import YTRockstarMultiMassNotSupported - -from . import rockstar_interface - -import socket -import time -import os -import numpy as np -from os import path - -class InlineRunner(ParallelAnalysisInterface): - def __init__(self): - # If this is being run inline, num_readers == comm.size, always. - psize = ytcfg.getint("yt", "__global_parallel_size") - self.num_readers = psize - # No choice for you, everyone's a writer too! - self.num_writers = psize - - def run(self, handler, pool): - # If inline, we use forks. - server_pid = 0 - # Start a server on only one machine/fork. - if pool.comm.rank == 0: - server_pid = os.fork() - if server_pid == 0: - handler.start_server() - os._exit(0) - # Start writers on all. - writer_pid = 0 - time.sleep(0.05 + pool.comm.rank/10.0) - writer_pid = os.fork() - if writer_pid == 0: - handler.start_writer() - os._exit(0) - # Everyone's a reader! - time.sleep(0.05 + pool.comm.rank/10.0) - handler.start_reader() - # Make sure the forks are done, which they should be. - if writer_pid != 0: - os.waitpid(writer_pid, 0) - if server_pid != 0: - os.waitpid(server_pid, 0) - - def setup_pool(self): - pool = ProcessorPool() - # Everyone is a reader, and when we're inline, that's all that matters. - readers = np.arange(ytcfg.getint("yt", "__global_parallel_size")) - pool.add_workgroup(ranks=readers, name="readers") - return pool, pool.workgroups[0] - -class StandardRunner(ParallelAnalysisInterface): - def __init__(self, num_readers, num_writers): - self.num_readers = num_readers - psize = ytcfg.getint("yt", "__global_parallel_size") - if num_writers is None: - self.num_writers = psize - num_readers - 1 - else: - self.num_writers = min(num_writers, psize) - if self.num_readers + self.num_writers + 1 != psize: - mylog.error('%i reader + %i writers + 1 server != %i mpi', - self.num_readers, self.num_writers, psize) - raise RuntimeError - - def run(self, handler, wg): - # Not inline so we just launch them directly from our MPI threads. - if wg.name == "server": - handler.start_server() - if wg.name == "readers": - time.sleep(0.05) - handler.start_reader() - if wg.name == "writers": - time.sleep(0.1) - handler.start_writer() - - def setup_pool(self): - pool = ProcessorPool() - pool, workgroup = ProcessorPool.from_sizes( - [ (1, "server"), - (self.num_readers, "readers"), - (self.num_writers, "writers") ] - ) - return pool, workgroup - -class RockstarHaloFinder(ParallelAnalysisInterface): - r"""Spawns the Rockstar Halo finder, distributes dark matter - particles and finds halos. - - The halo finder requires dark matter particles of a fixed size. - Rockstar has three main processes: reader, writer, and the - server which coordinates reader/writer processes. - - Parameters - ---------- - ts : DatasetSeries, ~yt.data_objects.static_output.Dataset - This is the data source containing the DM particles. Because - halo IDs may change from one snapshot to the next, the only - way to keep a consistent halo ID across time is to feed - Rockstar a set of snapshots, ie, via DatasetSeries. - num_readers : int - The number of reader can be increased from the default - of 1 in the event that a single snapshot is split among - many files. This can help in cases where performance is - IO-limited. Default is 1. If run inline, it is - equal to the number of MPI threads. - num_writers : int - The number of writers determines the number of processing threads - as well as the number of threads writing output data. - The default is set to comm.size-num_readers-1. If run inline, - the default is equal to the number of MPI threads. - outbase : str - This is where the out*list files that Rockstar makes should be - placed. Default is 'rockstar_halos'. - particle_type : str - This is the "particle type" that can be found in the data. This can be - a filtered particle or an inherent type. - force_res : float - This parameter specifies the force resolution that Rockstar uses - in units of Mpc/h. - If no value is provided, this parameter is automatically set to - the width of the smallest grid element in the simulation from the - last data snapshot (i.e. the one where time has evolved the - longest) in the time series: - ``ds_last.index.get_smallest_dx().in_units("Mpc/h")``. - total_particles : int - If supplied, this is a pre-calculated total number of particles present - in the simulation. For example, this is useful when analyzing a series - of snapshots where the number of dark matter particles should not - change and this will save some disk access time. If left unspecified, - it will be calculated automatically. Default: ``None``. - particle_mass : float - If supplied, use this as the particle mass supplied to rockstar. - Otherwise, the smallest particle mass will be identified and calculated - internally. This is useful for multi-dm-mass simulations. Note that - this will only give sensible results for halos that are not "polluted" - by lower resolution particles. Default: ``None``. - - Returns - ------- - None - - Examples - -------- - - To use the script below you must run it using MPI: - mpirun -np 4 python run_rockstar.py - - >>> import yt - >>> yt.enable_parallelism() - >>> from yt.analysis_modules.halo_finding.rockstar.api import \ - ... RockstarHaloFinder - - >>> # create a particle filter to remove star particles - >>> @yt.particle_filter("dark_matter", requires=["creation_time"]) - ... def _dm_filter(pfilter, data): - ... return data["creation_time"] <= 0.0 - - >>> def setup_ds(ds): - ... ds.add_particle_filter("dark_matter") - - >>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") - >>> es.get_time_series(setup_function=setup_ds, redshift_data=False) - - >>> rh = RockstarHaloFinder(es, num_readers=1, num_writers=2, - ... particle_type="dark_matter") - >>> rh.run() - - """ - def __init__(self, ts, num_readers = 1, num_writers = None, - outbase="rockstar_halos", particle_type="all", - force_res=None, total_particles=None, dm_only=False, - particle_mass=None, min_halo_size=25): - if is_root(): - mylog.info("The citation for the Rockstar halo finder can be found at") - mylog.info("https://ui.adsabs.harvard.edu/abs/2013ApJ...762..109B") - ParallelAnalysisInterface.__init__(self) - # Decide how we're working. - if ytcfg.getboolean("yt", "inline"): - self.runner = InlineRunner() - else: - self.runner = StandardRunner(num_readers, num_writers) - self.num_readers = self.runner.num_readers - self.num_writers = self.runner.num_writers - mylog.info("Rockstar is using %d readers and %d writers", - self.num_readers, self.num_writers) - # Note that Rockstar does not support subvolumes. - # We assume that all of the snapshots in the time series - # use the same domain info as the first snapshots. - if not isinstance(ts, DatasetSeries): - ts = DatasetSeries([ts]) - self.ts = ts - self.particle_type = particle_type - self.outbase = six.b(outbase) - self.min_halo_size = min_halo_size - if force_res is None: - tds = ts[-1] # Cache a reference - self.force_res = tds.index.get_smallest_dx().in_units("Mpc/h") - # We have to delete now to wipe the index - del tds - else: - self.force_res = force_res - self.total_particles = total_particles - self.dm_only = dm_only - self.particle_mass = particle_mass - # Setup pool and workgroups. - self.pool, self.workgroup = self.runner.setup_pool() - p = self._setup_parameters(ts) - params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0]) - self.__dict__.update(params) - self.handler = rockstar_interface.RockstarInterface(self.ts) - - def _setup_parameters(self, ts): - if self.workgroup.name != "readers": return None - tds = ts[0] - ptype = self.particle_type - if ptype not in tds.particle_types and ptype != 'all': - has_particle_filter = tds.add_particle_filter(ptype) - if not has_particle_filter: - raise RuntimeError("Particle type (filter) %s not found." % (ptype)) - - dd = tds.all_data() - # Get DM particle mass. - - particle_mass = self.particle_mass - if particle_mass is None: - pmass_min, pmass_max = dd.quantities.extrema( - (ptype, "particle_mass"), non_zero = True) - if np.abs(pmass_max - pmass_min) / pmass_max > 0.01: - raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max, - ptype) - particle_mass = pmass_min - - p = {} - if self.total_particles is None: - # Get total_particles in parallel. - tp = dd.quantities.total_quantity((ptype, "particle_ones")) - p['total_particles'] = int(tp) - mylog.warning("Total Particle Count: %0.3e", int(tp)) - p['left_edge'] = tds.domain_left_edge.in_units("Mpccm/h") - p['right_edge'] = tds.domain_right_edge.in_units("Mpccm/h") - p['center'] = (tds.domain_right_edge.in_units("Mpccm/h") + tds.domain_left_edge.in_units("Mpccm/h"))/2.0 - p['particle_mass'] = self.particle_mass = particle_mass - p['particle_mass'].convert_to_units("Msun / h") - del tds - return p - - def __del__(self): - try: - self.pool.free_all() - except AttributeError: - # This really only acts to cut down on the misleading - # error messages when/if this class is called incorrectly - # or some other error happens and self.pool hasn't been created - # already. - pass - - def _get_hosts(self): - if self.comm.rank == 0 or self.comm.size == 1: - - #Temporary mac hostname fix - try: - server_address = socket.gethostname() - socket.gethostbyname(server_address) - except socket.gaierror: - server_address = "localhost" - - sock = socket.socket() - sock.bind(('', 0)) - port = sock.getsockname()[-1] - del sock - else: - server_address, port = None, None - self.server_address, self.port = self.comm.mpi_bcast( - (server_address, port)) - self.server_address = six.b(str(self.server_address)) - self.port = six.b(str(self.port)) - - def run(self, block_ratio = 1, callbacks = None, restart = False): - """ - - """ - if block_ratio != 1: - raise NotImplementedError - self._get_hosts() - # Find restart output number - num_outputs = len(self.ts) - if restart: - restart_file = os.path.join(self.outbase, "restart.cfg") - if not os.path.exists(restart_file): - raise RuntimeError("Restart file %s not found" % (restart_file)) - with open(restart_file) as restart_fh: - for l in restart_fh: - if l.startswith("RESTART_SNAP"): - restart_num = int(l.split("=")[1]) - if l.startswith("NUM_WRITERS"): - num_writers = int(l.split("=")[1]) - if num_writers != self.num_writers: - raise RuntimeError( - "Number of writers in restart has changed from the original " - "run (OLD = %d, NEW = %d). To avoid problems in the " - "restart, choose the same number of writers." % \ - (num_writers, self.num_writers)) - # Remove the datasets that were already analyzed - self.ts._pre_outputs = self.ts._pre_outputs[restart_num:] - else: - restart_num = 0 - self.handler.setup_rockstar( - self.server_address, - self.port, - num_outputs, self.total_particles, - self.particle_type, - particle_mass = self.particle_mass, - parallel = self.comm.size > 1, - num_readers = self.num_readers, - num_writers = self.num_writers, - writing_port = -1, - block_ratio = block_ratio, - outbase = self.outbase, - force_res = self.force_res, - callbacks = callbacks, - restart_num = restart_num, - min_halo_size = self.min_halo_size) - # Make the directory to store the halo lists in. - if not self.outbase: - self.outbase = os.getcwd() - if self.comm.rank == 0 and not restart: - if not os.path.exists(self.outbase): - os.makedirs(self.outbase) - # Make a record of which dataset corresponds to which set of - # output files because it will be easy to lose this connection. - fp = open(self.outbase.decode() + '/datasets.txt', 'w') - fp.write("# dsname\tindex\n") - for i, ds in enumerate(self.ts): - dsloc = path.join(path.relpath(ds.fullpath), ds.basename) - line = "%s\t%d\n" % (dsloc, i) - fp.write(line) - fp.close() - # This barrier makes sure the directory exists before it might be used. - self.comm.barrier() - if self.comm.size == 1: - self.handler.call_rockstar() - else: - # And run it! - self.runner.run(self.handler, self.workgroup) - self.comm.barrier() - self.pool.free_all() diff --git a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx deleted file mode 100644 index 0f8ed7574db..00000000000 --- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx +++ /dev/null @@ -1,439 +0,0 @@ -import numpy as np -import os, sys -cimport numpy as np -cimport cython -from cython cimport floating -#from cpython.mem cimport PyMem_Malloc -from libc.stdlib cimport malloc, free -import sys - -# Importing relevant rockstar data types particle, fof halo, halo - -cdef import from "particle.h": - struct particle: - np.int64_t id - float pos[6] - -cdef import from "rockstar.h": - particle *global_particles "p" - void rockstar_cleanup() - -cdef import from "fof.h": - struct fof: - np.int64_t num_p - particle *particles - -cdef import from "halo.h": - struct halo: - np.int64_t id - float pos[6] - float corevel[3] - float bulkvel[3] - float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms - float J[3] - float energy, spin - float alt_m[4] - float Xoff, Voff, b_to_a, c_to_a - float A[3] - float b_to_a2, c_to_a2 - float A2[3] - float bullock_spin, kin_to_pot, m_pe_b, m_pe_d - np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core - float min_pos_err, min_vel_err, min_bulkvel_err, _pad - -ctypedef struct haloflat: - np.int64_t id - float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z - float corevel_x, corevel_y, corevel_z - float bulkvel_x, bulkvel_y, bulkvel_z - float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms - float Jx, Jy, Jz - float energy, spin - float alt_m1, alt_m2, alt_m3, alt_m4 - float Xoff, Voff, b_to_a, c_to_a - float Ax, Ay, Az - float b_to_a2, c_to_a2, A2x, A2y, A2z - float bullock_spin, kin_to_pot, m_pe_b, m_pe_d - np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core - float min_pos_err, min_vel_err, min_bulkvel_err, _pad - -# For finding sub halos import finder function and global variable -# rockstar uses to store the results - -cdef import from "groupies.h": - void find_subs(fof *f) nogil - halo *halos - np.int64_t num_halos - void calc_mass_definition() nogil - void free_particle_copies() nogil - void alloc_particle_copies(np.int64_t total_copies) nogil - void free_halos() nogil - float max_halo_radius(halo *h) nogil - -# global in groupies.c -cdef extern double particle_thresh_dens[5] - -# For outputing halos, rockstar style - -cdef import from "meta_io.h": - void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) nogil - -# For setting up the configuration of rockstar - -cdef import from "config.h": - void setup_config() nogil - void output_config(char *fn) nogil - -cdef import from "distance.h": - void init_cosmology() nogil - -cdef import from "config_vars.h": - # Rockstar cleverly puts all of the config variables inside a templated - # definition of their variables. - char *FILE_FORMAT - np.float64_t PARTICLE_MASS - - char *MASS_DEFINITION - char *MASS_DEFINITION2 - char *MASS_DEFINITION3 - char *MASS_DEFINITION4 - char *MASS_DEFINITION5 - np.int64_t STRICT_SO_MASSES - np.int64_t MIN_HALO_OUTPUT_SIZE - np.float64_t FORCE_RES - np.float64_t FORCE_RES_PHYS_MAX - - np.float64_t SCALE_NOW - np.float64_t h0 - np.float64_t Ol - np.float64_t Om - np.float64_t W0 - np.float64_t WA - - np.int64_t GADGET_ID_BYTES - np.float64_t GADGET_MASS_CONVERSION - np.float64_t GADGET_LENGTH_CONVERSION - np.int64_t GADGET_SKIP_NON_HALO_PARTICLES - np.int64_t RESCALE_PARTICLE_MASS - - np.int64_t PARALLEL_IO - char *PARALLEL_IO_SERVER_ADDRESS - char *PARALLEL_IO_SERVER_PORT - np.int64_t PARALLEL_IO_WRITER_PORT - char *PARALLEL_IO_SERVER_INTERFACE - char *RUN_ON_SUCCESS - - char *INBASE - char *FILENAME - np.int64_t STARTING_SNAP - np.int64_t RESTART_SNAP - np.int64_t NUM_SNAPS - np.int64_t NUM_BLOCKS - np.int64_t NUM_READERS - np.int64_t PRELOAD_PARTICLES - char *SNAPSHOT_NAMES - char *LIGHTCONE_ALT_SNAPS - char *BLOCK_NAMES - - char *OUTBASE - np.float64_t OVERLAP_LENGTH - np.int64_t NUM_WRITERS - np.int64_t FORK_READERS_FROM_WRITERS - np.int64_t FORK_PROCESSORS_PER_MACHINE - - char *OUTPUT_FORMAT - np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED - np.int64_t FULL_PARTICLE_CHUNKS - char *BGC2_SNAPNAMES - - np.int64_t SHAPE_ITERATIONS - np.int64_t WEIGHTED_SHAPES - np.int64_t BOUND_PROPS - np.int64_t BOUND_OUT_TO_HALO_EDGE - np.int64_t DO_MERGER_TREE_ONLY - np.int64_t IGNORE_PARTICLE_IDS - np.float64_t EXACT_LL_CALC - np.float64_t TRIM_OVERLAP - np.float64_t ROUND_AFTER_TRIM - np.int64_t LIGHTCONE - np.int64_t PERIODIC - - np.float64_t LIGHTCONE_ORIGIN[3] - np.float64_t LIGHTCONE_ALT_ORIGIN[3] - - np.float64_t LIMIT_CENTER[3] - np.float64_t LIMIT_RADIUS - - np.int64_t SWAP_ENDIANNESS - np.int64_t GADGET_VARIANT - np.int64_t ART_VARIANT - - np.float64_t FOF_FRACTION - np.float64_t FOF_LINKING_LENGTH - np.float64_t INITIAL_METRIC_SCALING - np.float64_t INCLUDE_HOST_POTENTIAL_RATIO - np.int64_t TEMPORAL_HALO_FINDING - np.int64_t MIN_HALO_PARTICLES - np.float64_t UNBOUND_THRESHOLD - np.int64_t ALT_NFW_METRIC - np.int64_t EXTRA_PROFILING - - np.int64_t TOTAL_PARTICLES - np.float64_t BOX_SIZE - np.int64_t OUTPUT_LEVELS - np.float64_t DUMP_PARTICLES[3] - - np.float64_t AVG_PARTICLE_SPACING - np.int64_t SINGLE_SNAP - - - -cdef class RockstarGroupiesInterface: - - cdef public object ds - cdef public object fof - - # For future use/consistency - def __cinit__(self,ds): - self.ds = ds - - def setup_rockstar(self, - particle_mass, - int periodic = 1, force_res = None, - int min_halo_size = 25, outbase = "None", - write_config = False, exact_ll_calc = False, - lightcone = False, lightcone_origin = [0,0,0], - callbacks = None, unbound_threshold=None): - global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om - global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS - global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS - global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE - global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES - global OUTPUT_FORMAT, EXTRA_PROFILING - global STRICT_SO_MASSES, EXACT_LL_CALC - global LIGHTCONE, LIGHTCONE_ORIGIN - - if force_res is not None: - FORCE_RES=np.float64(force_res) - - OVERLAP_LENGTH = 0.0 - - # Set to 0.0 if you plan on calculating spherical overdensity masses. - # Otherwise filtering of halos in rockstar meta_io.c _should_print - # will filter the wrong halos when halo mass is re-calculated before - # output_halos - global UNBOUND_THRESHOLD - if unbound_threshold is not None: - UNBOUND_THRESHOLD = unbound_threshold - - FILENAME = "inline." - FILE_FORMAT = "GENERIC" - OUTPUT_FORMAT = "BOTH" - MIN_HALO_OUTPUT_SIZE=min_halo_size - - ds = self.ds - - h0 = ds.hubble_constant - Ol = ds.omega_lambda - Om = ds.omega_matter - - SCALE_NOW = 1.0/(ds.current_redshift+1.0) - - if not outbase =='None'.decode('UTF-8'): - #output directory. since we can't change the output filenames - #workaround is to make a new directory - OUTBASE = outbase - - PARTICLE_MASS = particle_mass.in_units('Msun/h') - PERIODIC = periodic - BOX_SIZE = ds.domain_width.in_units('Mpccm/h')[0] - - if exact_ll_calc: EXACT_LL_CALC = 1 - STRICT_SO_MASSES = 1 # presumably unused in our code path - EXTRA_PROFILING = 0 - - if lightcone: - LIGHTCONE = 1 - LIGHTCONE_ORIGIN[0] = lightcone_origin[0] - LIGHTCONE_ORIGIN[1] = lightcone_origin[1] - LIGHTCONE_ORIGIN[2] = lightcone_origin[2] - - # Set up the configuration options - setup_config() - - # Needs to be called so rockstar can use the particle mass parameter - # to calculate virial quantities properly - init_cosmology() - calc_mass_definition() - - if write_config: output_config(NULL) - - def particle_thresh_dens(self): - cdef np.ndarray d = np.array([particle_thresh_dens[0], - particle_thresh_dens[1], - particle_thresh_dens[2], - particle_thresh_dens[3], - particle_thresh_dens[4]], - dtype=np.float64) - return d - - def assign_masses(self, h, np.ndarray[np.float32_t, ndim=1] r, float force_res, \ - double pmass, np.ndarray[np.float64_t, ndim=1] dens_thresh, - early_termination=False): - """ - Assign spherical overdensity masses to halos. r must be sorted - - Parameters - ---------- - h: struct haloflat - Assign masses to this halo - r: np.ndarray - Sorted array of particle radii - force_res: float - Force resolution, below which density is smoothed. - dens_thresh: np.ndarray - Thresholds for spherical overdensity mass calculation - early_termination: bool - Specifies whether or not to terminate mass calculation when - first particle density is below the lowest density threshold. - If False, may lead to overestimate of SO masses for subhalos, - but gives a better comparison to plain rockstar masses with - STRICT_SO=1. Default: False - Returns - ------- - None - """ - cdef double total_mass = 0.0 - cdef double m = 0.0 - cdef double alt_m1 = 0.0 - cdef double alt_m2 = 0.0 - cdef double alt_m3 = 0.0 - cdef double alt_m4 = 0.0 - cdef double rr - cdef double cur_dens - cdef int min_ind = np.argmin(dens_thresh) - cdef int eterm = early_termination - for rr in r: - if rr < force_res: rr = force_res - total_mass += pmass - cur_dens = total_mass/(rr*rr*rr) - if cur_dens > dens_thresh[0]: m = total_mass - if cur_dens > dens_thresh[1]: alt_m1 = total_mass - if cur_dens > dens_thresh[2]: alt_m2 = total_mass - if cur_dens > dens_thresh[3]: alt_m3 = total_mass - if cur_dens > dens_thresh[4]: alt_m4 = total_mass - if eterm and cur_dens <= dens_thresh[min_ind]: - break - h['m'] = m - h['alt_m1'] = alt_m1 - h['alt_m2'] = alt_m2 - h['alt_m3'] = alt_m3 - h['alt_m4'] = alt_m4 - # if cur_dens > dens_thresh[1]: - # This is usually a subhalo problem, and we don't know who is a subhalo - # print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!" - # print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1]) - - def max_halo_radius(self, int i): - return max_halo_radius(&halos[i]) - - def output_halos(self, np.int64_t idoffset, np.ndarray[np.float32_t, ndim=2] bbox): - cdef float bounds[6] - if idoffset is None: idoffset = 0 - if bbox is None: - output_halos(idoffset, 0, 0, NULL) - else: - for i in range(3): - bounds[i] = bbox[i,0] - bounds[i+3] = bbox[i,1] - output_halos(idoffset, 0, 0, bounds) - - def output_config(self): - output_config(NULL) - - def return_halos(self): - cdef haloflat[:] haloview = ( halos) - return np.asarray(haloview) - - def finish(self): - rockstar_cleanup() - free_halos() - - @cython.boundscheck(False) - @cython.wraparound(False) - def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind, - np.ndarray[np.int64_t, ndim=1] fof_tags, - np.ndarray[floating, ndim=2] pos, - np.ndarray[floating, ndim=2] vel): - - verbose = False - # Define fof object - - # Find number of particles - cdef np.int64_t i, j, k, ind - global global_particles - - # Allocate space for correct number of particles - cdef fof fof_obj - - cdef np.int64_t max_count = 0 - cdef np.int64_t next_tag, local_tag, last_fof_tag = -1 - fof_obj.num_p = 0 - j = 0 - # We're going to do one iteration to get the most frequent value. - for i in range(pind.shape[0]): - ind = pind[i] - local_tag = fof_tags[ind] - # Don't count the null group - if local_tag == -1: continue - if local_tag != last_fof_tag: - if j > max_count: - max_count = j - last_fof_tag = local_tag - j = 1 - else: - j += 1 - if j > max_count: - max_count = j - #print >> sys.stderr, "Most frequent occurrence: %s" % max_count - fof_obj.particles = malloc(max_count * sizeof(particle)) - j = 0 - cdef int counter = 0, ndone = 0 - cdef np.ndarray[np.int64_t, ndim=1] pcounts - pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64") - cdef np.int64_t frac = (pcounts.shape[0] / 20.0) - free_halos() - for i in range(pind.shape[0]): - ind = pind[i] - local_tag = fof_tags[ind] - # Skip this one -- it means no group. - if local_tag == -1: - continue - if i == pind.shape[0] - 1: - next_tag = local_tag + 1 - else: - next_tag = fof_tags[pind[i+1]] - for k in range(3): - fof_obj.particles[j].pos[k] = pos[ind,k] - fof_obj.particles[j].pos[k+3] = vel[ind,k] - fof_obj.particles[j].id = j - fof_obj.num_p += 1 - j += 1 - # Now we check if we're the last one - if local_tag != next_tag: - pcounts[ndone] = fof_obj.num_p - counter += 1 - ndone += 1 - if verbose and counter == frac: - print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % ( - (100.0 * ndone)/pcounts.size, - fof_obj.particles[0].pos[2], - halos[num_halos - 1].pos[2]) - counter = 0 - global_particles = &fof_obj.particles[0] - find_subs(&fof_obj) - # Now we reset - fof_obj.num_p = j = 0 - free(fof_obj.particles) - global_particles = NULL - return pcounts diff --git a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx deleted file mode 100644 index ab3dc609e39..00000000000 --- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx +++ /dev/null @@ -1,330 +0,0 @@ -""" -Particle operations for Lagrangian Volume - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -import os, sys -cimport numpy as np -cimport cython -from libc.stdlib cimport malloc -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_objects - -from yt.config import ytcfg - -cdef import from "particle.h": - struct particle: - np.int64_t id - float pos[6] - -ctypedef struct particleflat: - np.int64_t id - float pos_x - float pos_y - float pos_z - float vel_x - float vel_y - float vel_z - -cdef import from "halo.h": - struct halo: - np.int64_t id - float pos[6] - float corevel[3] - float bulkvel[3] - float J[3] - float m, r, child_r, mgrav, vmax, rvmax, rs, vrms, energy, spin - np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core - float min_pos_err, min_vel_err, min_bulkvel_err - -cdef import from "io_generic.h": - ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p) - ctypedef void (*AHG) (halo *h, particle *hp) - void set_load_particles_generic(LPG func, AHG afunc) - -cdef import from "rockstar.h": - void rockstar(float *bounds, np.int64_t manual_subs) - -cdef import from "config.h": - void setup_config() - -cdef import from "server.h" nogil: - int server() - np.int64_t READER_TYPE - np.int64_t WRITER_TYPE - -cdef import from "client.h" nogil: - void client(np.int64_t in_type) - -cdef import from "meta_io.h": - void read_particles(char *filename) - void output_halos(np.int64_t id_offset, np.int64_t snap, - np.int64_t chunk, float *bounds) - -cdef import from "config_vars.h": - # Rockstar cleverly puts all of the config variables inside a templated - # definition of their variables. - char *FILE_FORMAT - np.float64_t PARTICLE_MASS - - char *MASS_DEFINITION - np.int64_t MIN_HALO_OUTPUT_SIZE - np.float64_t FORCE_RES - - np.float64_t SCALE_NOW - np.float64_t h0 - np.float64_t Ol - np.float64_t Om - - np.int64_t GADGET_ID_BYTES - np.float64_t GADGET_MASS_CONVERSION - np.float64_t GADGET_LENGTH_CONVERSION - np.int64_t GADGET_SKIP_NON_HALO_PARTICLES - np.int64_t RESCALE_PARTICLE_MASS - - np.int64_t PARALLEL_IO - char *PARALLEL_IO_SERVER_ADDRESS - char *PARALLEL_IO_SERVER_PORT - np.int64_t PARALLEL_IO_WRITER_PORT - char *PARALLEL_IO_SERVER_INTERFACE - char *RUN_ON_SUCCESS - - char *INBASE - char *FILENAME - np.int64_t STARTING_SNAP - np.int64_t RESTART_SNAP - np.int64_t NUM_SNAPS - np.int64_t NUM_BLOCKS - np.int64_t NUM_READERS - np.int64_t PRELOAD_PARTICLES - char *SNAPSHOT_NAMES - char *LIGHTCONE_ALT_SNAPS - char *BLOCK_NAMES - - char *OUTBASE - np.float64_t OVERLAP_LENGTH - np.int64_t NUM_WRITERS - np.int64_t FORK_READERS_FROM_WRITERS - np.int64_t FORK_PROCESSORS_PER_MACHINE - - char *OUTPUT_FORMAT - np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED - np.int64_t FULL_PARTICLE_CHUNKS - char *BGC2_SNAPNAMES - - np.int64_t BOUND_PROPS - np.int64_t BOUND_OUT_TO_HALO_EDGE - np.int64_t DO_MERGER_TREE_ONLY - np.int64_t IGNORE_PARTICLE_IDS - np.float64_t TRIM_OVERLAP - np.float64_t ROUND_AFTER_TRIM - np.int64_t LIGHTCONE - np.int64_t PERIODIC - - np.float64_t LIGHTCONE_ORIGIN[3] - np.float64_t LIGHTCONE_ALT_ORIGIN[3] - - np.float64_t LIMIT_CENTER[3] - np.float64_t LIMIT_RADIUS - - np.int64_t SWAP_ENDIANNESS - np.int64_t GADGET_VARIANT - - np.float64_t FOF_FRACTION - np.float64_t FOF_LINKING_LENGTH - np.float64_t INCLUDE_HOST_POTENTIAL_RATIO - np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO - np.int64_t TEMPORAL_HALO_FINDING - np.int64_t MIN_HALO_PARTICLES - np.float64_t UNBOUND_THRESHOLD - np.int64_t ALT_NFW_METRIC - - np.int64_t TOTAL_PARTICLES - np.float64_t BOX_SIZE - np.int64_t OUTPUT_HMAD - np.int64_t OUTPUT_PARTICLES - np.int64_t OUTPUT_LEVELS - np.float64_t DUMP_PARTICLES[3] - - np.float64_t AVG_PARTICLE_SPACING - np.int64_t SINGLE_SNAP - -# Forward declare -cdef class RockstarInterface - -cdef void rh_analyze_halo(halo *h, particle *hp): - # I don't know why, but sometimes we get halos with 0 particles. - if h.num_p == 0: return - cdef particleflat[:] pslice - pslice = (hp) - parray = np.asarray(pslice) - for cb in rh.callbacks: - cb(rh.ds, parray) - # This is where we call our functions - -cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p): - global SCALE_NOW - cdef np.float64_t left_edge[6] - cdef np.ndarray[np.int64_t, ndim=1] arri - cdef np.ndarray[np.float64_t, ndim=1] arr - cdef unsigned long long pi,fi,i - cdef np.int64_t local_parts = 0 - ds = rh.ds = next(rh.tsl) - - SCALE_NOW = 1.0/(ds.current_redshift+1.0) - # First we need to find out how many this reader is going to read in - # if the number of readers > 1. - dd = ds.all_data() - - # Add particle type filter if not defined - if rh.particle_type not in ds.particle_types and rh.particle_type != 'all': - ds.add_particle_filter(rh.particle_type) - - if NUM_BLOCKS > 1: - local_parts = 0 - for chunk in parallel_objects( - dd.chunks([], "io")): - local_parts += chunk[rh.particle_type, "particle_ones"].sum() - else: - local_parts = TOTAL_PARTICLES - - p[0] = malloc(sizeof(particle) * local_parts) - - left_edge[0] = ds.domain_left_edge.in_units('Mpccm/h')[0] - left_edge[1] = ds.domain_left_edge.in_units('Mpccm/h')[1] - left_edge[2] = ds.domain_left_edge.in_units('Mpccm/h')[2] - left_edge[3] = left_edge[4] = left_edge[5] = 0.0 - pi = 0 - for chunk in parallel_objects(dd.chunks([], "io")): - arri = np.asarray(chunk[rh.particle_type, "particle_index"], - dtype="int64") - npart = arri.size - for i in range(npart): - p[0][i+pi].id = arri[i] - fi = 0 - for field in ["particle_position_x", "particle_position_y", - "particle_position_z", - "particle_velocity_x", "particle_velocity_y", - "particle_velocity_z"]: - if "position" in field: - unit = "Mpccm/h" - else: - unit = "km/s" - arr = chunk[rh.particle_type, field].in_units(unit).astype("float64") - for i in range(npart): - p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi]) - fi += 1 - pi += npart - num_p[0] = local_parts - del ds - -cdef class RockstarInterface: - - cdef public object data_source - cdef public object ts - cdef public object tsl - cdef public object ds - cdef int rank - cdef int size - cdef public int block_ratio - cdef public object particle_type - cdef public int total_particles - cdef public object callbacks - - def __cinit__(self, ts): - self.ts = ts - self.tsl = ts.__iter__() #timeseries generator used by read - - def setup_rockstar(self, char *server_address, char *server_port, - int num_snaps, np.int64_t total_particles, - particle_type, - np.float64_t particle_mass, - int parallel = False, int num_readers = 1, - int num_writers = 1, - int writing_port = -1, int block_ratio = 1, - int periodic = 1, force_res=None, - int min_halo_size = 25, outbase = "None", - callbacks = None, int restart_num = 0): - global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT - global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om - global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS - global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS - global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE, OUTPUT_FORMAT - global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES, RESTART_SNAP - if force_res is not None: - FORCE_RES=np.float64(force_res) - #print "set force res to ",FORCE_RES - OVERLAP_LENGTH = 0.0 - if parallel: - PARALLEL_IO = 1 - PARALLEL_IO_SERVER_ADDRESS = server_address - PARALLEL_IO_SERVER_PORT = server_port - if writing_port > 0: - PARALLEL_IO_WRITER_PORT = writing_port - else: - PARALLEL_IO = 0 - PARALLEL_IO_SERVER_ADDRESS = server_address - PARALLEL_IO_SERVER_PORT = server_port - FILENAME = "inline." - FILE_FORMAT = "GENERIC" - OUTPUT_FORMAT = "ASCII" - NUM_SNAPS = num_snaps - RESTART_SNAP = restart_num - NUM_READERS = num_readers - NUM_WRITERS = num_writers - NUM_BLOCKS = num_readers - MIN_HALO_OUTPUT_SIZE=min_halo_size - TOTAL_PARTICLES = total_particles - self.block_ratio = block_ratio - self.particle_type = particle_type - - tds = self.ts[0] - h0 = tds.hubble_constant - Ol = tds.omega_lambda - Om = tds.omega_matter - SCALE_NOW = 1.0/(tds.current_redshift+1.0) - if callbacks is None: callbacks = [] - self.callbacks = callbacks - if not outbase == 'None'.encode('UTF-8'): - #output directory. since we can't change the output filenames - #workaround is to make a new directory - OUTBASE = outbase - - PARTICLE_MASS = particle_mass - PERIODIC = periodic - BOX_SIZE = (tds.domain_right_edge[0] - - tds.domain_left_edge[0]).in_units("Mpccm/h") - setup_config() - rh = self - cdef LPG func = rh_read_particles - cdef AHG afunc = rh_analyze_halo - set_load_particles_generic(func, afunc) - - def call_rockstar(self): - read_particles("generic") - rockstar(NULL, 0) - output_halos(0, 0, 0, NULL) - - def start_server(self): - with nogil: - server() - - def start_reader(self): - cdef np.int64_t in_type = np.int64(READER_TYPE) - client(in_type) - - def start_writer(self): - cdef np.int64_t in_type = np.int64(WRITER_TYPE) - client(in_type) - diff --git a/yt/analysis_modules/halo_finding/tests/__init__.py b/yt/analysis_modules/halo_finding/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/halo_finding/tests/run_rockstar.py b/yt/analysis_modules/halo_finding/tests/run_rockstar.py deleted file mode 100644 index c2d1513f2fd..00000000000 --- a/yt/analysis_modules/halo_finding/tests/run_rockstar.py +++ /dev/null @@ -1,26 +0,0 @@ -from mpi4py import MPI -import yt -from yt.analysis_modules.halo_finding.rockstar.api import \ - RockstarHaloFinder -from yt.data_objects.particle_filters import \ - particle_filter -yt.enable_parallelism() - -comm = MPI.Comm.Get_parent() - -@particle_filter("dark_matter", requires=["creation_time"]) -def _dm_filter(pfilter, data): - return data["creation_time"] <= 0.0 - -def setup_ds(ds): - ds.add_particle_filter("dark_matter") - -es = yt.simulation("Enzo_64/64.param", "Enzo") -es.get_time_series(setup_function=setup_ds, - redshifts=[1., 0.]) - -rh = RockstarHaloFinder(es, num_readers=1, num_writers=1, - particle_type="dark_matter") -rh.run() - -comm.Disconnect() diff --git a/yt/analysis_modules/halo_finding/tests/test_halo_finders.py b/yt/analysis_modules/halo_finding/tests/test_halo_finders.py deleted file mode 100644 index 9d9bcd36a42..00000000000 --- a/yt/analysis_modules/halo_finding/tests/test_halo_finders.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Tests for HOP and FOF halo finders. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.convenience import \ - load -from yt.data_objects.particle_filters import \ - add_particle_filter -from yt.analysis_modules.halo_analysis.api import \ - HaloCatalog -from yt.testing import \ - requires_file, \ - assert_array_equal -from yt.utilities.answer_testing.framework import \ - data_dir_load - -import tempfile -import os -import shutil - -def dm(pfilter, data): - return data["creation_time"] <= 0. -add_particle_filter("dm", dm, filtered_type='all', - requires=["creation_time"]) - -enzotiny = "enzo_tiny_cosmology/DD0046/DD0046" -@requires_file(enzotiny) -def test_datacontainer_data(): - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - ds = data_dir_load(enzotiny) - ds.add_particle_filter("dm") - - for method in ["fof", "hop"]: - hc = HaloCatalog(data_ds=ds, finder_method=method, - output_dir="hc1", - finder_kwargs={"dm_only": True}) - hc.create() - hc = HaloCatalog(data_ds=ds, finder_method=method, - output_dir="hc2", - finder_kwargs={"dm_only": False, "ptype": "dm"}) - hc.create() - - ds1 = load("hc1/hc1.0.h5") - ds2 = load("hc2/hc2.0.h5") - assert_array_equal(ds1.r["particle_mass"], ds2.r["particle_mass"]) - - os.chdir(curdir) - shutil.rmtree(tmpdir) diff --git a/yt/analysis_modules/halo_finding/tests/test_rockstar.py b/yt/analysis_modules/halo_finding/tests/test_rockstar.py deleted file mode 100644 index 0f19c6c7228..00000000000 --- a/yt/analysis_modules/halo_finding/tests/test_rockstar.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import sys - -from yt.convenience import load -from yt.utilities.answer_testing.framework import \ - FieldValuesTest, \ - requires_sim - -_fields = (("halos", "particle_position_x"), - ("halos", "particle_position_y"), - ("halos", "particle_position_z"), - ("halos", "particle_mass")) - -@requires_sim("Enzo_64/64.param", "Enzo", big_data=True) -def test_rockstar(): - from mpi4py import MPI - filename = os.path.join(os.path.dirname(__file__), - "run_rockstar.py") - comm = MPI.COMM_SELF.Spawn(sys.executable, - args=[filename], - maxprocs=3) - comm.Disconnect() - - h1 = "rockstar_halos/halos_0.0.bin" - d1 = load(h1) - for field in _fields: - yield FieldValuesTest(d1, field, particle_type=True, decimals=1) - h2 = "rockstar_halos/halos_1.0.bin" - d2 = load(h2) - for field in _fields: - yield FieldValuesTest(d2, field, particle_type=True, decimals=1) diff --git a/yt/analysis_modules/halo_mass_function/api.py b/yt/analysis_modules/halo_mass_function/api.py index 2c4a8a31f79..b978be38cc8 100644 --- a/yt/analysis_modules/halo_mass_function/api.py +++ b/yt/analysis_modules/halo_mass_function/api.py @@ -1,27 +1,7 @@ -""" -API for halo_mass_function +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "The halo_mass_function module does not function correctly and has been " - "deprecated. This code has been moved to the yt attic " - "(https://github.com/yt-project/yt_attic) and will be removed in a " - "future release.") - -from .halo_mass_function import \ - HaloMassFcn, \ - TransferFunction, \ - integrate_inf +raise YTModuleRemoved( + "halo_mass_function", + "https://github.com/yt-project/yt_attic", + "https://yt-attic.readthedocs.io/") diff --git a/yt/analysis_modules/halo_mass_function/halo_mass_function.py b/yt/analysis_modules/halo_mass_function/halo_mass_function.py deleted file mode 100644 index 4d881656e65..00000000000 --- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py +++ /dev/null @@ -1,873 +0,0 @@ -""" -Halo Mass Function and supporting functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -import math - -from yt.funcs import mylog -from yt.units.yt_array import \ - YTArray, \ - YTQuantity -from yt.utilities.physical_ratios import \ - rho_crit_g_cm3_h2 - -class HaloMassFcn(): - r""" - Initialize a HaloMassFcn object to analyze the distribution of halos as - a function of mass. A mass function can be created for a set of - simulated halos, an analytic fit to can be created for a redshift and - set of cosmological parameters, or both can be created. - - Provided with a halo dataset object, this will make a the mass function - for simulated halos. Providing a simulation dataset will set as many - of the cosmological parameters as possible for the creation of the - analytic mass function. - - The HaloMassFcn object has arrays hanging off of it containing the mass - function information. - - masses_sim : Array - Halo masses from simulated halos. Units: M_solar. - n_cumulative_sim : Array - Number density of halos with mass greater than the corresponding - mass in masses_sim (simulated). Units: comoving Mpc^-3 - masses_analytic : Array - Masses used for the generation of the analytic mass function, Units: - M_solar. - n_cumulative_analytic : Array - Number density of halos with mass greater then the corresponding - mass in masses_analytic (analytic). Units: comoving Mpc^-3 - dndM_dM_analytic : Array - Differential number density of halos, (dn/dM)*dM (analytic). - - The HaloMassFcn object also has a convenience function write_out() that - will write out the data to disk. - - Creating a HaloMassFcn object with no arguments will produce an analytic - mass function at redshift = 0 using default cosmological values. - - Parameters - ---------- - simulation_ds : Simulation dataset object - The loaded simulation dataset, used to set cosmological parameters. - Default : None. - halos_ds : Halo dataset object - The halos from a simulation to be used for creation of the - halo mass function in the simulation. - Default : None. - make_analytic : bool - Whether or not to calculate the analytic mass function to go with - the simulated halo mass function. Automatically set to true if a - simulation dataset is provided. - Default : True. - omega_matter0 : float - The fraction of the universe made up of matter (dark and baryonic). - Default : 0.2726. - omega_lambda0 : float - The fraction of the universe made up of dark energy. - Default : 0.7274. - omega_baryon0 : float - The fraction of the universe made up of baryonic matter. This is not - always stored in the dataset and should be checked by hand. - Default : 0.0456. - hubble0 : float - The expansion rate of the universe in units of 100 km/s/Mpc. - Default : 0.704. - sigma8 : float - The amplitude of the linear power spectrum at z=0 as specified by - the rms amplitude of mass-fluctuations in a top-hat sphere of radius - 8 Mpc/h. This is not always stored in the dataset and should be - checked by hand. - Default : 0.86. - primoridal_index : float - This is the index of the mass power spectrum before modification by - the transfer function. A value of 1 corresponds to the scale-free - primordial spectrum. This is not always stored in the dataset and - should be checked by hand. - Default : 1.0. - this_redshift : float - The current redshift. - Default : 0. - log_mass_min : float - The log10 of the mass of the minimum of the halo mass range. This is - set automatically by the range of halo masses if a simulated halo - dataset is provided. If a halo dataset if not provided and no value - is specified, it will be set to 5. Units: M_solar - Default : None. - log_mass_max : float - The log10 of the mass of the maximum of the halo mass range. This is - set automatically by the range of halo masses if a simulated halo - dataset is provided. If a halo dataset if not provided and no value - is specified, it will be set to 16. Units: M_solar - Default : None. - num_sigma_bins : float - The number of bins (points) to use for the calculation of the - analytic mass function. - Default : 360. - fitting_function : int - Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, - 3 = Sheth-Tormen, 4 = Warren, 5 = Tinker - Default : 4. - - Examples - -------- - - This creates the halo mass function for a halo dataset from a simulation - and the analytic mass function at the same redshift as the dataset, - using as many cosmological parameters as can be pulled from the dataset. - - >>> halos_ds = load("rockstar_halos/halo_0.0.bin") - >>> hmf = HaloMassFcn(halos_ds=halos_ds) - >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim) - >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic) - >>> plt.savefig("mass_function.png") - - This creates only the analytic halo mass function for a simulation - dataset, with default values for cosmological parameters not stored in - the dataset. - - >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046") - >>> hmf = HaloMassFcn(simulation_ds=ds) - >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic) - >>> plt.savefig("mass_function.png") - - This creates the analytic mass function for an arbitrary set of - cosmological parameters, with neither a simulation nor halo dataset. - - >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, - omega_lambda0=0.73, hubble0=0.7, this_redshift=10, - log_mass_min=5, log_mass_max=9) - >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic) - >>> plt.savefig("mass_function.png") - """ - def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=True, - omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, - sigma8=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, - log_mass_max=None, num_sigma_bins=360, fitting_function=4): - self.simulation_ds = simulation_ds - self.halos_ds = halos_ds - self.omega_matter0 = omega_matter0 - self.omega_lambda0 = omega_lambda0 - self.omega_baryon0 = omega_baryon0 - self.hubble0 = hubble0 - self.sigma8 = sigma8 - self.primordial_index = primordial_index - self.this_redshift = this_redshift - self.log_mass_min = log_mass_min - self.log_mass_max = log_mass_max - self.num_sigma_bins = num_sigma_bins - self.fitting_function = fitting_function - self.make_analytic = make_analytic - self.make_simulated = False - """ - If we want to make an analytic mass function, grab what we can from either the - halo file or the data set, and make sure that the user supplied everything else - that is needed. - """ - # If we don't have any datasets, make the analytic function with user values - if simulation_ds is None and halos_ds is None: - # Set a reasonable mass min and max if none were provided - if log_mass_min is None: - self.log_mass_min = 5 - if log_mass_max is None: - self.log_mass_max = 16 - # If we're making the analytic function... - if self.make_analytic: - # Try to set cosmological parameters from the simulation dataset - if simulation_ds is not None: - self.omega_matter0 = self.simulation_ds.omega_matter - self.omega_lambda0 = self.simulation_ds.omega_lambda - self.hubble0 = self.simulation_ds.hubble_constant - self.this_redshift = self.simulation_ds.current_redshift - # Set a reasonable mass min and max if none were provided - if log_mass_min is None: - self.log_mass_min = 5 - if log_mass_max is None: - self.log_mass_max = 16 - # If we have a halo dataset but not a simulation dataset, use that instead - if simulation_ds is None and halos_ds is not None: - self.omega_matter0 = self.halos_ds.omega_matter - self.omega_lambda0 = self.halos_ds.omega_lambda - self.hubble0 = self.halos_ds.hubble_constant - self.this_redshift = self.halos_ds.current_redshift - # If the user didn't specify mass min and max, set them from the halos - if log_mass_min is None: - self.set_mass_from_halos("min_mass") - if log_mass_max is None: - self.set_mass_from_halos("max_mass") - # Do the calculations. - self.sigmaM() - self.dndm() - # Return the mass array in M_solar rather than M_solar/h - self.masses_analytic = YTArray(self.masses_analytic/self.hubble0, "Msun") - # The halo arrays will already have yt units, but the analytic forms do - # not. If a dataset has been provided, use that to give them units. At the - # same time, convert to comoving (Mpc)^-3 - if simulation_ds is not None: - self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic, - "(Mpccm)**(-3)") - elif halos_ds is not None: - self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, - "(Mpccm)**(-3)") - else: - from yt.units.unit_registry import UnitRegistry - from yt.units.dimensions import length - hmf_registry = UnitRegistry() - for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit - hmf_registry.add(new_unit, - hmf_registry.lut[my_unit][0] / - (1 + self.this_redshift), - length, "\\rm{%s}/(1+z)" % my_unit) - self.n_cumulative_analytic = YTArray(self.n_cumulative_analytic, - "(Mpccm)**(-3)", - registry=hmf_registry) - - - """ - If a halo file has been supplied, make a mass function for the simulated halos. - """ - if halos_ds is not None: - # Used to check if a simulated halo mass function exists to write out - self.make_simulated=True - # Calculate the simulated halo mass function - self.create_sim_hmf() - - """ - If we're making an analytic fit and have a halo dataset, but don't have log_mass_min - or log_mass_max from the user, set it from the range of halo masses. - """ - def set_mass_from_halos(self, which_limit): - data_source = self.halos_ds.all_data() - if which_limit == "min_mass": - self.log_mass_min = \ - int(np.log10(np.amin(data_source["particle_mass"].in_units("Msun")))) - if which_limit == "max_mass": - self.log_mass_max = \ - int(np.log10(np.amax(data_source["particle_mass"].in_units("Msun"))))+1 - - """ - Here's where we create the halo mass functions from simulated halos - """ - def create_sim_hmf(self): - data_source = self.halos_ds.all_data() - # We're going to use indices to count the number of halos above a given mass - masses_sim = np.sort(data_source["particle_mass"].in_units("Msun")) - # Determine the size of the simulation volume in comoving Mpc**3 - sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod() - n_cumulative_sim = np.arange(len(masses_sim),0,-1) - # We don't want repeated halo masses, and the unique indices tell us which values - # correspond to distinct halo masses. - self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True) - # Now make this an actual number density of halos as a function of mass. - self.n_cumulative_sim = n_cumulative_sim[unique_indices]/sim_volume - # masses_sim and n_cumulative_sim are now set, but remember that the log10 quantities - # are what is usually plotted for a halo mass function. - - def write_out(self, prefix='HMF', analytic=True, simulated=True): - """ - Writes out the halo mass functions to file(s) with prefix *prefix*. - """ - # First the analytic file, check that analytic fit exists and was requested - if analytic: - if self.make_analytic: - fitname = prefix + '-analytic.dat' - fp = open(fitname, "w") - line = \ - "#Columns:\n" + \ - "#1. mass (M_solar)\n" + \ - "#2. cumulative number density of halos [comoving Mpc^-3]\n" + \ - "#3. (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3]\n" - fp.write(line) - for i in range(self.masses_analytic.size - 1): - line = "%e\t%e\t%e\n" % (self.masses_analytic[i], - self.n_cumulative_analytic[i], - self.dndM_dM_analytic[i]) - fp.write(line) - fp.close() - # If the analytic halo mass function wasn't created, warn the user - else: - mylog.warning("The analytic halo mass function was not created and cannot be " + - "written out! Specify its creation with " + - "HaloMassFcn(make_analytic=True, other_args) when creating the " + - "HaloMassFcn object.") - # Write out the simulated mass fucntion if it exists and was requested - if simulated: - if self.make_simulated: - haloname = prefix + '-simulated.dat' - fp = open(haloname, "w") - line = \ - "#Columns:\n" + \ - "#1. mass [Msun]\n" + \ - "#2. cumulative number density of halos [comoving Mpc^-3]\n" - fp.write(line) - for i in range(self.masses_sim.size - 1): - line = "%e\t%e\n" % (self.masses_sim[i], - self.n_cumulative_sim[i]) - fp.write(line) - fp.close() - # If the simulated halo mass function wasn't created, warn the user - else: - mylog.warning("The simulated halo mass function was not created and cannot " + - "be written out! Specify its creation by providing a loaded " + - "halo dataset with HaloMassFcn(ds_halos=loaded_halo_dataset, " + - "other_args) when creating the HaloMassFcn object.") - - def sigmaM(self): - """ - Written by BWO, 2006 (updated 25 January 2007). - Converted to Python by Stephen Skory December 2009. - - This routine takes in cosmological parameters and creates a file (array) with - sigma(M) in it, which is necessary for various press-schechter type - stuff. In principle one can calculate it ahead of time, but it's far, - far faster in the long run to calculate your sigma(M) ahead of time. - - Inputs: cosmology, user must set parameters - - Outputs: four columns of data containing the following information: - - 1) mass (Msolar/h) - 2) sigma (normalized) using Msun/h as the input - - The arrays output are used later. - """ - - # Set up the transfer function object. - self.TF = TransferFunction(self.omega_matter0, self.omega_baryon0, 0.0, 0, - self.omega_lambda0, self.hubble0, self.this_redshift); - - if self.TF.qwarn: - mylog.error("You should probably fix your cosmology parameters!") - - # output arrays - # 1) mass (M_solar/h), changed to M_solar/h at output - self.masses_analytic = np.empty(self.num_sigma_bins, dtype='float64') - # 2) sigma(M, z=0, where mass is in Msun/h) - self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64') - - # get sigma_8 normalization - R = 8.0; # in units of Mpc/h (comoving) - - sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R)); - sigma_normalization = self.sigma8 / sigma8_unnorm; - - # rho0 in units of h^2 Msolar/Mpc^3 - rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2, - 'g/cm**3').in_units('Msun/Mpc**3') - rho0 = rho0.value.item() - - # spacing in mass of our sigma calculation - dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins; - - """ - loop over the total number of sigma_bins the user has requested. - For each bin, calculate mass and equivalent radius, and call - sigma_squared_of_R to get the sigma(R) (equivalent to sigma(M)), - normalize by user-specified sigma_8, and then write out. - """ - for i in range(self.num_sigma_bins): - - # thislogmass is in units of Msolar, NOT Msolar/h - thislogmass = self.log_mass_min + i*dm - - # mass in units of h^-1 Msolar - thismass = math.pow(10.0, thislogmass) * self.hubble0; - - # radius is in units of h^-1 Mpc (comoving) - thisradius = math.pow( 3.0*thismass / 4.0 / math.pi / rho0, 1.0/3.0 ); - - R = thisradius; # h^-1 Mpc (comoving) - - self.masses_analytic[i] = thismass; # Msun/h - - # get normalized sigma(R) - self.sigmaarray[i] = math.sqrt(self.sigma_squared_of_R(R)) * sigma_normalization; - # All done! - - def dndm(self): - # constants - set these before calling any functions! - # rho0 in units of h^2 Msolar/Mpc^3 - rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2, - 'g/cm**3').in_units('Msun/Mpc**3') - rho0 = rho0.value.item() - - self.delta_c0 = 1.69; # critical density for turnaround (Press-Schechter) - - n_cumulative_analytic = 0.0; # keep track of cumulative number density - - # Loop over masses, going BACKWARD, and calculate dn/dm as well as the - # cumulative mass function. - - # output arrays - # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3) - self.dndM_dM_analytic = np.empty(self.num_sigma_bins, dtype='float64') - # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) - self.n_cumulative_analytic = np.zeros(self.num_sigma_bins, dtype='float64') - - for j in range(self.num_sigma_bins - 1): - i = (self.num_sigma_bins - 2) - j - - thissigma = self.sigmaof_M_z(i, self.this_redshift); - nextsigma = self.sigmaof_M_z(i+1, self.this_redshift); - - # calc dsigmadm - has units of h (since masses_analytic has units of h^-1) - dsigmadm = (nextsigma-thissigma) / (self.masses_analytic[i+1] - self.masses_analytic[i]); - - # calculate dn(M,z) (dn/dM * dM) - # this has units of h^3 since rho0 has units of h^2, dsigmadm - # has units of h, and masses_analytic has units of h^-1 - dndM_dM_analytic = -1.0 / thissigma * dsigmadm * rho0 / self.masses_analytic[i] * \ - self.multiplicityfunction(thissigma)*(self.masses_analytic[i+1] - self.masses_analytic[i]); - - # scale by h^3 to get rid of all factors of h - dndM_dM_analytic *= math.pow(self.hubble0, 3.0); - - # keep track of cumulative number density - if dndM_dM_analytic > 1.0e-20: - n_cumulative_analytic += dndM_dM_analytic; - - # Store this. - self.n_cumulative_analytic[i] = n_cumulative_analytic - self.dndM_dM_analytic[i] = dndM_dM_analytic - - - def sigma_squared_of_R(self, R): - """ - calculates sigma^2(R). This is the routine where the magic happens (or - whatever it is that we do here). Integrates the sigma_squared_integrand - parameter from R to infinity. Calls GSL (gnu scientific library) to do - the actual integration. - - Note that R is in h^-1 Mpc (comoving) - """ - self.R = R - result = integrate_inf(self.sigma_squared_integrand) - - sigmasquaredofR = result / 2.0 / math.pi / math.pi - - return sigmasquaredofR; - - def sigma_squared_integrand(self, k): - """ - integrand for integral to get sigma^2(R). - """ - - Rcom = self.R; # this is R in comoving Mpc/h - - f = k*k*self.PofK(k)*np.power( abs(self.WofK(Rcom,k)), 2.0); - - return f - - def PofK(self, k): - """ - returns power spectrum as a function of wavenumber k - """ - - thisPofK = np.power(k, self.primordial_index) * np.power( self.TofK(k), 2.0); - - return thisPofK; - - def TofK(self, k): - """ - returns transfer function as a function of wavenumber k. - """ - - thisTofK = self.TF.TFmdm_onek_hmpc(k); - - return thisTofK; - - def WofK(self, R, k): - """ - returns W(k), which is the fourier transform of the top-hat function. - """ - - x = R*k; - - thisWofK = 3.0 * ( np.sin(x) - x*np.cos(x) ) / (x*x*x); - - return thisWofK; - - def multiplicityfunction(self, sigma): - """ - Multiplicity function - this is where the various fitting functions/analytic - theories are different. The various places where I found these fitting functions - are listed below. - """ - - nu = self.delta_c0 / sigma; - - if self.fitting_function==1: - # Press-Schechter (This form from Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 5) - thismult = math.sqrt(2.0/math.pi) * nu * math.exp(-0.5*nu*nu); - - elif self.fitting_function==2: - # Jenkins et al. 2001, MNRAS 321, 372-384, eqtn. 9 - thismult = 0.315 * math.exp( -1.0 * math.pow( abs( math.log(1.0/sigma) + 0.61), 3.8 ) ); - - elif self.fitting_function==3: - # Sheth-Tormen 1999, eqtn 10, using expression from Jenkins et al. 2001, eqtn. 7 - A=0.3222; - a=0.707; - p=0.3; - thismult = A*math.sqrt(2.0*a/math.pi)*(1.0+ math.pow( 1.0/(nu*nu*a), p) )*\ - nu * math.exp(-0.5*a*nu*nu); - - elif self.fitting_function==4: - # LANL fitting function - Warren et al. 2005, astro-ph/0506395, eqtn. 5 - A=0.7234; - a=1.625; - b=0.2538; - c=1.1982; - thismult = A*( math.pow(sigma, -1.0*a) + b)*math.exp(-1.0*c / sigma / sigma ); - - elif self.fitting_function==5: - # Tinker et al. 2008, eqn 3, \Delta=300 # \Delta=200 - A = 0.2 #0.186 - a = 1.52 #1.47 - b = 2.25 #2.57 - c = 1.27 #1.19 - thismult = A * ( math.pow((sigma / b), -a) + 1) * \ - math.exp(-1 * c / sigma / sigma) - - else: - mylog.error("Don't understand this. Fitting function requested is %d\n", - self.fitting_function) - return None - - return thismult - - def sigmaof_M_z(self, sigmabin, redshift): - """ - sigma(M, z) - """ - - thissigma = self.Dofz(redshift) * self.sigmaarray[sigmabin]; - - return thissigma; - - def Dofz(self, redshift): - """ - Growth function - """ - - thisDofz = self.gofz(redshift) / self.gofz(0.0) / (1.0+redshift); - - return thisDofz; - - - def gofz(self, redshift): - """ - g(z) - I don't think this has any other name - """ - - thisgofz = 2.5 * self.omega_matter_of_z(redshift) / \ - ( math.pow( self.omega_matter_of_z(redshift), 4.0/7.0 ) - \ - self.omega_lambda_of_z(redshift) + \ - ( (1.0 + self.omega_matter_of_z(redshift) / 2.0) * \ - (1.0 + self.omega_lambda_of_z(redshift) / 70.0) )) - - return thisgofz; - - - def omega_matter_of_z(self,redshift): - """ - Omega matter as a function of redshift - """ - - thisomofz = self.omega_matter0 * math.pow( 1.0+redshift, 3.0) / \ - math.pow( self.Eofz(redshift), 2.0 ); - - return thisomofz; - - def omega_lambda_of_z(self,redshift): - """ - Omega lambda as a function of redshift - """ - - thisolofz = self.omega_lambda0 / math.pow( self.Eofz(redshift), 2.0 ) - - return thisolofz; - - def Eofz(self, redshift): - """ - E(z) - I don't think this has any other name - """ - thiseofz = math.sqrt( self.omega_lambda0 \ - + (1.0 - self.omega_lambda0 - self.omega_matter0)*math.pow( 1.0+redshift, 2.0) \ - + self.omega_matter0 * math.pow( 1.0+redshift, 3.0) ); - - return thiseofz; - - -""" -Fitting Formulae for CDM + Baryon + Massive Neutrino (MDM) cosmologies. -Daniel J. Eisenstein & Wayne Hu, Institute for Advanced Study - -There are two primary routines here, one to set the cosmology, the -other to construct the transfer function for a single wavenumber k. -You should call the former once (per cosmology) and the latter as -many times as you want. - - TFmdm_set_cosm() -- User passes all the cosmological parameters as - arguments; the routine sets up all of the scalar quantities needed - computation of the fitting formula. The input parameters are: - 1) omega_matter -- Density of CDM, baryons, and massive neutrinos, - in units of the critical density. - 2) omega_baryon -- Density of baryons, in units of critical. - 3) omega_hdm -- Density of massive neutrinos, in units of critical - 4) degen_hdm -- (Int) Number of degenerate massive neutrino species - 5) omega_lambda -- Cosmological constant - 6) hubble -- Hubble constant, in units of 100 km/s/Mpc - 7) redshift -- The redshift at which to evaluate */ - - TFmdm_onek_mpc() -- User passes a single wavenumber, in units of Mpc^-1. - Routine returns the transfer function from the Eisenstein & Hu - fitting formula, based on the cosmology currently held in the - internal variables. The routine returns T_cb (the CDM+Baryon - density-weighted transfer function), although T_cbn (the CDM+ - Baryon+Neutrino density-weighted transfer function) is stored - in the global variable tf_cbnu. */ - -We also supply TFmdm_onek_hmpc(), which is identical to the previous -routine, but takes the wavenumber in units of h Mpc^-1. - -We hold the internal scalar quantities in global variables, so that -the user may access them in an external program, via "extern" declarations. - -Please note that all internal length scales are in Mpc, not h^-1 Mpc! -""" - -class TransferFunction(object): - """ - This routine takes cosmological parameters and a redshift and sets up - all the internal scalar quantities needed to compute the transfer function. - - Parameters - ---------- - omega_matter : float - Density of CDM, baryons, and massive neutrinos, in units - of the critical density. - omega_baryon : float - Density of baryons, in units of critical. - omega_hdm : float - Density of massive neutrinos, in units of critical - degen_hdm : integer - Number of degenerate massive neutrino species - omega_lambda : float - Cosmological constant - hubble : float - Hubble constant, in units of 100 km/s/Mpc - redshift : float - The redshift at which to evaluate - - Returns 0 if all is well, 1 if a warning was issued. Otherwise, - sets many global variables for use in TFmdm_onek_mpc() - """ - def __init__(self, omega_matter, omega_baryon, omega_hdm, - degen_hdm, omega_lambda, hubble, redshift): - self.qwarn = 0; - self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K - - # Look for strange input - if (omega_baryon<0.0): - mylog.error("TFmdm_set_cosm(): Negative omega_baryon set to trace amount.\n") - self.qwarn = 1 - if (omega_hdm<0.0): - mylog.error("TFmdm_set_cosm(): Negative omega_hdm set to trace amount.\n") - self.qwarn = 1; - if (hubble<=0.0): - mylog.error("TFmdm_set_cosm(): Negative Hubble constant illegal.\n") - return None - elif (hubble>2.0): - mylog.error("TFmdm_set_cosm(): Hubble constant should be in units of 100 km/s/Mpc.\n"); - self.qwarn = 1; - if (redshift<=-1.0): - mylog.error("TFmdm_set_cosm(): Redshift < -1 is illegal.\n"); - return None - elif (redshift>99.0): - mylog.error("TFmdm_set_cosm(): Large redshift entered. TF may be inaccurate.\n"); - self.qwarn = 1; - - if (degen_hdm<1): degen_hdm=1; - self.num_degen_hdm = degen_hdm; - # Have to save this for TFmdm_onek_mpc() - # This routine would crash if baryons or neutrinos were zero, - # so don't allow that. - if (omega_baryon<=0): omega_baryon=1e-5; - if (omega_hdm<=0): omega_hdm=1e-5; - - self.omega_curv = 1.0-omega_matter-omega_lambda; - self.omhh = omega_matter*SQR(hubble); - self.obhh = omega_baryon*SQR(hubble); - self.onhh = omega_hdm*SQR(hubble); - self.f_baryon = omega_baryon/omega_matter; - self.f_hdm = omega_hdm/omega_matter; - self.f_cdm = 1.0-self.f_baryon-self.f_hdm; - self.f_cb = self.f_cdm+self.f_baryon; - self.f_bnu = self.f_baryon+self.f_hdm; - - # Compute the equality scale. - self.z_equality = 25000.0*self.omhh/SQR(SQR(self.theta_cmb)) # Actually 1+z_eq - self.k_equality = 0.0746*self.omhh/SQR(self.theta_cmb); - - # Compute the drag epoch and sound horizon - z_drag_b1 = 0.313*math.pow(self.omhh,-0.419)*(1+0.607*math.pow(self.omhh,0.674)); - z_drag_b2 = 0.238*math.pow(self.omhh,0.223); - self.z_drag = 1291*math.pow(self.omhh,0.251)/(1.0+0.659*math.pow(self.omhh,0.828))* \ - (1.0+z_drag_b1*math.pow(self.obhh,z_drag_b2)); - self.y_drag = self.z_equality/(1.0+self.z_drag); - - self.sound_horizon_fit = 44.5*math.log(9.83/self.omhh)/math.sqrt(1.0+10.0*math.pow(self.obhh,0.75)); - - # Set up for the free-streaming & infall growth function - self.p_c = 0.25*(5.0-math.sqrt(1+24.0*self.f_cdm)); - self.p_cb = 0.25*(5.0-math.sqrt(1+24.0*self.f_cb)); - - omega_denom = omega_lambda+SQR(1.0+redshift)*(self.omega_curv+\ - omega_matter*(1.0+redshift)); - self.omega_lambda_z = omega_lambda/omega_denom; - self.omega_matter_z = omega_matter*SQR(1.0+redshift)*(1.0+redshift)/omega_denom; - self.growth_k0 = self.z_equality/(1.0+redshift)*2.5*self.omega_matter_z/ \ - (math.pow(self.omega_matter_z,4.0/7.0)-self.omega_lambda_z+ \ - (1.0+self.omega_matter_z/2.0)*(1.0+self.omega_lambda_z/70.0)); - self.growth_to_z0 = self.z_equality*2.5*omega_matter/(math.pow(omega_matter,4.0/7.0) \ - -omega_lambda + (1.0+omega_matter/2.0)*(1.0+omega_lambda/70.0)); - self.growth_to_z0 = self.growth_k0/self.growth_to_z0; - - # Compute small-scale suppression - self.alpha_nu = self.f_cdm/self.f_cb*(5.0-2.*(self.p_c+self.p_cb))/(5.-4.*self.p_cb)* \ - math.pow(1+self.y_drag,self.p_cb-self.p_c)* \ - (1+self.f_bnu*(-0.553+0.126*self.f_bnu*self.f_bnu))/ \ - (1-0.193*math.sqrt(self.f_hdm*self.num_degen_hdm)+0.169*self.f_hdm*math.pow(self.num_degen_hdm,0.2))* \ - (1+(self.p_c-self.p_cb)/2*(1+1/(3.-4.*self.p_c)/(7.-4.*self.p_cb))/(1+self.y_drag)); - self.alpha_gamma = math.sqrt(self.alpha_nu); - self.beta_c = 1/(1-0.949*self.f_bnu); - # Done setting scalar variables - self.hhubble = hubble # Need to pass Hubble constant to TFmdm_onek_hmpc() - - - def TFmdm_onek_mpc(self, kk): - """ - Given a wavenumber in Mpc^-1, return the transfer function for the - cosmology held in the global variables. - - Parameters - ---------- - kk : float - Wavenumber in Mpc^-1 - - Returns - ------- - growth_cb : float - the transfer function for density-weighted - CDM + Baryon perturbations. (returned and set as a global var) - growth_cbnu : float - the transfer function for density-weighted - CDM + Baryon + Massive Neutrino perturbations. - (set as a global var) - """ - - self.qq = kk/self.omhh*SQR(self.theta_cmb); - - # Compute the scale-dependent growth functions - self.y_freestream = 17.2*self.f_hdm*(1+0.488*math.pow(self.f_hdm,-7.0/6.0))* \ - SQR(self.num_degen_hdm*self.qq/self.f_hdm); - temp1 = math.pow(self.growth_k0, 1.0-self.p_cb); - temp2 = np.power(self.growth_k0/(1+self.y_freestream),0.7); - self.growth_cb = np.power(1.0+temp2, self.p_cb/0.7)*temp1; - self.growth_cbnu = np.power(np.power(self.f_cb,0.7/self.p_cb)+temp2, self.p_cb/0.7)*temp1; - - # Compute the master function - self.gamma_eff = self.omhh*(self.alpha_gamma+(1-self.alpha_gamma)/ \ - (1+SQR(SQR(kk*self.sound_horizon_fit*0.43)))); - self.qq_eff = self.qq*self.omhh/self.gamma_eff; - - tf_sup_L = np.log(2.71828+1.84*self.beta_c*self.alpha_gamma*self.qq_eff); - tf_sup_C = 14.4+325/(1+60.5*np.power(self.qq_eff,1.11)); - self.tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*SQR(self.qq_eff)); - - self.qq_nu = 3.92*self.qq*math.sqrt(self.num_degen_hdm/self.f_hdm); - self.max_fs_correction = 1+1.2*math.pow(self.f_hdm,0.64)*math.pow(self.num_degen_hdm,0.3+0.6*self.f_hdm)/ \ - (np.power(self.qq_nu,-1.6)+np.power(self.qq_nu,0.8)); - self.tf_master = self.tf_sup*self.max_fs_correction; - - # Now compute the CDM+HDM+baryon transfer functions - tf_cb = self.tf_master*self.growth_cb/self.growth_k0; - #tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0; - return tf_cb - - - def TFmdm_onek_hmpc(self, kk): - """ - Given a wavenumber in h Mpc^-1, return the transfer function for the - cosmology held in the global variables. - - Parameters - ---------- - kk : float - Wavenumber in h Mpc^-1 - - Returns - ------- - growth_cb : float - the transfer function for density-weighted - CDM + Baryon perturbations. (return and set as a global var) - growth_cbnu : float - the transfer function for density-weighted - CDM + Baryon + Massive Neutrino perturbations. - """ - return self.TFmdm_onek_mpc(kk*self.hhubble); - -def SQR(a): - return a*a - -def integrate_inf(fcn, error=1e-3, initial_guess=10): - """ - Integrate a function *fcn* from zero to infinity, stopping when the answer - changes by less than *error*. Hopefully someday we can do something - better than this! - """ - xvals = np.logspace(0,np.log10(initial_guess), initial_guess+1)-.9 - yvals = fcn(xvals) - xdiffs = xvals[1:] - xvals[:-1] - # Trapezoid rule, but with different dxes between values, so np.trapz - # will not work. - areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0 - area0 = np.sum(areas) - # Next guess. - next_guess = 10 * initial_guess - xvals = np.logspace(0,np.log10(next_guess), 2*initial_guess**2+1)-.99 - yvals = fcn(xvals) - xdiffs = xvals[1:] - xvals[:-1] - # Trapezoid rule. - areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0 - area1 = np.sum(areas) - # Now we refine until the error is smaller than *error*. - diff = area1 - area0 - area_last = area1 - one_pow = 3 - while diff > error: - next_guess *= 10 - xvals = np.logspace(0,np.log10(next_guess), one_pow*initial_guess**one_pow+1) - (1 - 0.1**one_pow) - yvals = fcn(xvals) - xdiffs = xvals[1:] - xvals[:-1] - # Trapezoid rule. - areas = (yvals[1:] + yvals[:-1]) * xdiffs / 2.0 - area_next = np.sum(areas) - diff = area_next - area_last - area_last = area_next - one_pow+=1 - return area_last diff --git a/yt/analysis_modules/level_sets/api.py b/yt/analysis_modules/level_sets/api.py index 8586690c0c7..810b89dc617 100644 --- a/yt/analysis_modules/level_sets/api.py +++ b/yt/analysis_modules/level_sets/api.py @@ -1,44 +1,5 @@ -""" -API for level_sets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. All rights reserved. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( +raise RuntimeError( "The level_sets module has been moved to yt.data_objects.level_sets." - "This import is deprecated and will be removed in a future release." "Please, change the import in your scripts from " "'from yt.analysis_modules.level_sets' to " "'from yt.data_objects.level_sets.'.") - -from yt.data_objects.level_sets.contour_finder import \ - identify_contours - -from yt.data_objects.level_sets.clump_handling import \ - Clump, \ - find_clumps, \ - get_lowest_clumps - -from yt.data_objects.level_sets.clump_info_items import \ - add_clump_info - -from yt.data_objects.level_sets.clump_validators import \ - add_validator - -from yt.data_objects.level_sets.clump_tools import \ - recursive_all_clumps, \ - return_all_clumps, \ - return_bottom_clumps, \ - recursive_bottom_clumps, \ - clump_list_sort diff --git a/yt/analysis_modules/list_modules.py b/yt/analysis_modules/list_modules.py deleted file mode 100644 index e2dc1c6ce3e..00000000000 --- a/yt/analysis_modules/list_modules.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -A mechanism for listing available analysis modules. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import os -import sys - -def get_available_modules(): - modpath = os.path.abspath(os.path.dirname(__file__)) - available_modules = [] - for d in [os.path.join(modpath, f) for f in os.listdir(modpath)]: - if os.path.isdir(d) and os.path.isfile(os.path.join(d, "api.py")): - available_modules.append(os.path.basename(d)) - return available_modules - -class AnalysisModuleLoader(object): - - @property - def available_modules(self): - return get_available_modules() - - def __getattr__(self, attr): - try: - name = "yt.analysis_modules.%s.api" % (attr) - __import__(name, level=-1) - setattr(self, attr, sys.modules[name]) - except ImportError: - raise AttributeError(attr) - return getattr(self, attr) - - def __dir__(self): - # This is a badly behaving object. I was unable to get this line: - #return super(AnalysisModuleLoader, self).__dir__() + self.available_modules - # to work, so we simply return only the methods we know about. - return ["available_modules"] + self.available_modules - -amods = AnalysisModuleLoader() diff --git a/yt/analysis_modules/particle_trajectories/api.py b/yt/analysis_modules/particle_trajectories/api.py index ecaf6f8c1c8..0b7d346baf9 100644 --- a/yt/analysis_modules/particle_trajectories/api.py +++ b/yt/analysis_modules/particle_trajectories/api.py @@ -1,6 +1,4 @@ -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning("Particle trajectories are now available from DatasetSeries " - "objects as ts.particle_trajectories. The ParticleTrajectories " - "analysis module is deprecated.") -from yt.data_objects.particle_trajectories import ParticleTrajectories \ No newline at end of file +raise RuntimeError( + "Particle trajectories are now available from DatasetSeries " + "objects as ts.particle_trajectories. The ParticleTrajectories " + "analysis module has been removed.") diff --git a/yt/analysis_modules/photon_simulator/api.py b/yt/analysis_modules/photon_simulator/api.py index 1826ffd91a2..ed738750d0e 100644 --- a/yt/analysis_modules/photon_simulator/api.py +++ b/yt/analysis_modules/photon_simulator/api.py @@ -1,33 +1,7 @@ -""" -API for yt.analysis_modules.photon_simulator. -""" +from yt.utilities.exceptions import \ + YTModuleRemoved -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning("The photon_simulator module is deprecated. Please use pyXSIM " - "(http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.") - -from .photon_models import \ - PhotonModel, \ - ThermalPhotonModel - -from .photon_simulator import \ - PhotonList, \ - EventList, \ - merge_files, \ - convert_old_file - -from .spectral_models import \ - SpectralModel, \ - XSpecThermalModel, \ - XSpecAbsorbModel, \ - TableApecModel, \ - TableAbsorbModel +raise YTModuleRemoved( + "photon_simulator", + "pyXSIM", + "http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim") diff --git a/yt/analysis_modules/photon_simulator/photon_models.py b/yt/analysis_modules/photon_simulator/photon_models.py deleted file mode 100644 index 7882e07dc75..00000000000 --- a/yt/analysis_modules/photon_simulator/photon_models.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Classes for specific photon models - -The algorithms used here are based off of the method used by the -PHOX code (https://wwwmpa.mpa-garching.mpg.de/~kdolag/Phox/), -developed by Veronica Biffi and Klaus Dolag. References for -PHOX may be found at: - -Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545 -https://ui.adsabs.harvard.edu/abs/2012MNRAS.420.3545B - -Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395 -https://ui.adsabs.harvard.edu/abs/2013MNRAS.428.1395B - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six import string_types -import numpy as np -from yt.funcs import mylog, get_pbar -from yt.units.yt_array import YTArray -from yt.utilities.physical_constants import mp -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_objects -from yt.units.yt_array import uconcatenate - -n_kT = 10000 -kT_min = 8.08e-2 -kT_max = 50. - -photon_units = {"Energy":"keV", - "dx":"kpc"} -for ax in "xyz": - photon_units[ax] = "kpc" - photon_units["v"+ax] = "km/s" - -class PhotonModel(object): - - def __init__(self): - pass - - def __call__(self, data_source, parameters): - photons = {} - return photons - -class ThermalPhotonModel(PhotonModel): - r""" - Initialize a ThermalPhotonModel from a thermal spectrum. - - Parameters - ---------- - spectral_model : `SpectralModel` - A thermal spectral model instance, either of `XSpecThermalModel` - or `TableApecModel`. - X_H : float, optional - The hydrogen mass fraction. - Zmet : float or string, optional - The metallicity. If a float, assumes a constant metallicity throughout. - If a string, is taken to be the name of the metallicity field. - photons_per_chunk : integer - The maximum number of photons that are allocated per chunk. Increase or decrease - as needed. - method : string, optional - The method used to generate the photon energies from the spectrum: - "invert_cdf": Invert the cumulative distribution function of the spectrum. - "accept_reject": Acceptance-rejection method using the spectrum. - The first method should be sufficient for most cases. - prng : NumPy `RandomState` object or numpy.random - A pseudo-random number generator. Typically will only be specified - if you have a reason to generate the same set of random numbers, such as for a - test. Default is the numpy.random module. - """ - def __init__(self, spectral_model, X_H=0.75, Zmet=0.3, - photons_per_chunk=10000000, method="invert_cdf", - prng=np.random): - self.X_H = X_H - self.Zmet = Zmet - self.spectral_model = spectral_model - self.photons_per_chunk = photons_per_chunk - self.method = method - self.prng = prng - - def __call__(self, data_source, parameters): - - ds = data_source.ds - - exp_time = parameters["FiducialExposureTime"] - area = parameters["FiducialArea"] - redshift = parameters["FiducialRedshift"] - D_A = parameters["FiducialAngularDiameterDistance"].in_cgs() - dist_fac = 1.0/(4.*np.pi*D_A.value*D_A.value*(1.+redshift)**2) - src_ctr = parameters["center"] - - my_kT_min, my_kT_max = data_source.quantities.extrema("kT") - - self.spectral_model.prepare_spectrum(redshift) - emid = self.spectral_model.emid - ebins = self.spectral_model.ebins - nchan = len(emid) - - citer = data_source.chunks([], "io") - - photons = {} - photons["x"] = [] - photons["y"] = [] - photons["z"] = [] - photons["vx"] = [] - photons["vy"] = [] - photons["vz"] = [] - photons["dx"] = [] - photons["Energy"] = [] - photons["NumberOfPhotons"] = [] - - spectral_norm = area.v*exp_time.v*dist_fac - - tot_num_cells = data_source.ires.shape[0] - - pbar = get_pbar("Generating photons ", tot_num_cells) - - cell_counter = 0 - - for chunk in parallel_objects(citer): - - kT = chunk["kT"].v - num_cells = len(kT) - if num_cells == 0: - continue - vol = chunk["cell_volume"].in_cgs().v - EM = (chunk["density"]/mp).in_cgs().v**2 - EM *= 0.5*(1.+self.X_H)*self.X_H*vol - - if isinstance(self.Zmet, string_types): - metalZ = chunk[self.Zmet].v - else: - metalZ = self.Zmet*np.ones(num_cells) - - idxs = np.argsort(kT) - - kT_bins = np.linspace(kT_min, max(my_kT_max.v, kT_max), num=n_kT+1) - dkT = kT_bins[1]-kT_bins[0] - kT_idxs = np.digitize(kT[idxs], kT_bins) - kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1 - bcounts = np.bincount(kT_idxs).astype("int") - bcounts = bcounts[bcounts > 0] - n = int(0) - bcell = [] - ecell = [] - for bcount in bcounts: - bcell.append(n) - ecell.append(n+bcount) - n += bcount - kT_idxs = np.unique(kT_idxs) - - cell_em = EM[idxs]*spectral_norm - - number_of_photons = np.zeros(num_cells, dtype="uint64") - energies = np.zeros(self.photons_per_chunk) - - start_e = 0 - end_e = 0 - - for ibegin, iend, ikT in zip(bcell, ecell, kT_idxs): - - kT = kT_bins[ikT] + 0.5*dkT - - n_current = iend-ibegin - - cem = cell_em[ibegin:iend] - - cspec, mspec = self.spectral_model.get_spectrum(kT) - - tot_ph_c = cspec.d.sum() - tot_ph_m = mspec.d.sum() - - u = self.prng.uniform(size=n_current) - - cell_norm_c = tot_ph_c*cem - cell_norm_m = tot_ph_m*metalZ[ibegin:iend]*cem - cell_norm = np.modf(cell_norm_c + cell_norm_m) - cell_n = np.uint64(cell_norm[1]) + np.uint64(cell_norm[0] >= u) - - number_of_photons[ibegin:iend] = cell_n - - end_e += int(cell_n.sum()) - - if end_e > self.photons_per_chunk: - raise RuntimeError("Number of photons generated for this chunk "+ - "exceeds photons_per_chunk (%d)! " % self.photons_per_chunk + - "Increase photons_per_chunk!") - - if self.method == "invert_cdf": - cumspec_c = np.cumsum(cspec.d) - cumspec_m = np.cumsum(mspec.d) - cumspec_c = np.insert(cumspec_c, 0, 0.0) - cumspec_m = np.insert(cumspec_m, 0, 0.0) - - ei = start_e - for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]): - if cn == 0: continue - # The rather verbose form of the few next statements is a - # result of code optimization and shouldn't be changed - # without checking for performance degradation. See - # https://bitbucket.org/yt_analysis/yt/pull-requests/1766 - # for details. - if self.method == "invert_cdf": - cumspec = cumspec_c - cumspec += Z * cumspec_m - norm_factor = 1.0 / cumspec[-1] - cumspec *= norm_factor - randvec = self.prng.uniform(size=cn) - randvec.sort() - cell_e = np.interp(randvec, cumspec, ebins) - elif self.method == "accept_reject": - tot_spec = cspec.d - tot_spec += Z * mspec.d - norm_factor = 1.0 / tot_spec.sum() - tot_spec *= norm_factor - eidxs = self.prng.choice(nchan, size=cn, p=tot_spec) - cell_e = emid[eidxs] - energies[int(ei):int(ei + cn)] = cell_e - cell_counter += 1 - pbar.update(cell_counter) - ei += cn - - start_e = end_e - - active_cells = number_of_photons > 0 - idxs = idxs[active_cells] - - photons["NumberOfPhotons"].append(number_of_photons[active_cells]) - photons["Energy"].append(ds.arr(energies[:end_e].copy(), "keV")) - photons["x"].append((chunk["x"][idxs]-src_ctr[0]).in_units("kpc")) - photons["y"].append((chunk["y"][idxs]-src_ctr[1]).in_units("kpc")) - photons["z"].append((chunk["z"][idxs]-src_ctr[2]).in_units("kpc")) - photons["vx"].append(chunk["velocity_x"][idxs].in_units("km/s")) - photons["vy"].append(chunk["velocity_y"][idxs].in_units("km/s")) - photons["vz"].append(chunk["velocity_z"][idxs].in_units("km/s")) - photons["dx"].append(chunk["dx"][idxs].in_units("kpc")) - - pbar.finish() - - for key in photons: - if len(photons[key]) > 0: - photons[key] = uconcatenate(photons[key]) - elif key == "NumberOfPhotons": - photons[key] = np.array([]) - else: - photons[key] = YTArray([], photon_units[key]) - - mylog.info("Number of photons generated: %d" % int(np.sum(photons["NumberOfPhotons"]))) - mylog.info("Number of cells with photons: %d" % len(photons["x"])) - - self.spectral_model.cleanup_spectrum() - - return photons diff --git a/yt/analysis_modules/photon_simulator/photon_simulator.py b/yt/analysis_modules/photon_simulator/photon_simulator.py deleted file mode 100644 index cf7eb6da648..00000000000 --- a/yt/analysis_modules/photon_simulator/photon_simulator.py +++ /dev/null @@ -1,1567 +0,0 @@ -""" -Classes for generating lists of photons and detected events - -The SciPy Proceeding that describes this module in detail may be found at: - -http://conference.scipy.org/proceedings/scipy2014/zuhone.html - -The algorithms used here are based off of the method used by the -PHOX code (https://wwwmpa.mpa-garching.mpg.de/~kdolag/Phox/), -developed by Veronica Biffi and Klaus Dolag. References for -PHOX may be found at: - -Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545 -https://ui.adsabs.harvard.edu/abs/2012MNRAS.420.3545B - -Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395 -https://ui.adsabs.harvard.edu/abs/2013MNRAS.428.1395B -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -from yt.extern.six import string_types -from collections import defaultdict -import numpy as np -from yt.funcs import mylog, get_pbar, iterable, ensure_list -from yt.utilities.physical_constants import clight -from yt.utilities.cosmology import Cosmology -from yt.utilities.orientation import Orientation -from yt.visualization.fits_image import assert_same_wcs -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - communication_system, parallel_root_only, get_mpi_type, \ - parallel_capable -from yt.units.yt_array import YTQuantity, YTArray, uconcatenate -from yt.utilities.on_demand_imports import _h5py as h5py -from yt.utilities.on_demand_imports import _astropy -import warnings -import os - -comm = communication_system.communicators[-1] - -axes_lookup = {"x":("y","z"), - "y":("z","x"), - "z":("x","y")} - -def force_unicode(value): - if hasattr(value, 'decode'): - return value.decode('utf8') - else: - return value - -def parse_value(value, default_units): - if isinstance(value, YTQuantity): - return value.in_units(default_units) - elif iterable(value): - return YTQuantity(value[0], value[1]).in_units(default_units) - else: - return YTQuantity(value, default_units) - -def validate_parameters(first, second, skip=[]): - keys1 = list(first.keys()) - keys2 = list(first.keys()) - keys1.sort() - keys2.sort() - if keys1 != keys2: - raise RuntimeError("The two inputs do not have the same parameters!") - for k1, k2 in zip(keys1, keys2): - if k1 not in skip: - v1 = first[k1] - v2 = second[k2] - if isinstance(v1, string_types) or isinstance(v2, string_types): - check_equal = v1 == v2 - else: - check_equal = np.allclose(v1, v2, rtol=0.0, atol=1.0e-10) - if not check_equal: - raise RuntimeError("The values for the parameter '%s' in the two inputs" % k1 + - " are not identical (%s vs. %s)!" % (v1, v2)) - -class PhotonList(object): - - def __init__(self, photons, parameters, cosmo, p_bins): - self.photons = photons - self.parameters = parameters - self.cosmo = cosmo - self.p_bins = p_bins - self.num_cells = len(photons["x"]) - - def keys(self): - return self.photons.keys() - - def items(self): - ret = [] - for k, v in self.photons.items(): - if k == "Energy": - ret.append((k, self[k])) - else: - ret.append((k,v)) - return ret - - def values(self): - ret = [] - for k, v in self.photons.items(): - if k == "Energy": - ret.append(self[k]) - else: - ret.append(v) - return ret - - def __getitem__(self, key): - if key == "Energy": - return [self.photons["Energy"][self.p_bins[i]:self.p_bins[i+1]] - for i in range(self.num_cells)] - else: - return self.photons[key] - - def __contains__(self, key): - return key in self.photons - - def __repr__(self): - return self.photons.__repr__() - - @classmethod - def from_file(cls, filename): - r""" - Initialize a PhotonList from the HDF5 file *filename*. - """ - - photons = {} - parameters = {} - - f = h5py.File(filename, mode="r") - - p = f["/parameters"] - parameters["FiducialExposureTime"] = YTQuantity(p["fid_exp_time"].value, "s") - parameters["FiducialArea"] = YTQuantity(p["fid_area"].value, "cm**2") - parameters["FiducialRedshift"] = p["fid_redshift"].value - parameters["FiducialAngularDiameterDistance"] = YTQuantity(p["fid_d_a"].value, "Mpc") - parameters["Dimension"] = p["dimension"].value - parameters["Width"] = YTQuantity(p["width"].value, "kpc") - parameters["HubbleConstant"] = p["hubble"].value - parameters["OmegaMatter"] = p["omega_matter"].value - parameters["OmegaLambda"] = p["omega_lambda"].value - - d = f["/data"] - - num_cells = d["x"][:].shape[0] - start_c = comm.rank*num_cells//comm.size - end_c = (comm.rank+1)*num_cells//comm.size - - photons["x"] = YTArray(d["x"][start_c:end_c], "kpc") - photons["y"] = YTArray(d["y"][start_c:end_c], "kpc") - photons["z"] = YTArray(d["z"][start_c:end_c], "kpc") - photons["dx"] = YTArray(d["dx"][start_c:end_c], "kpc") - photons["vx"] = YTArray(d["vx"][start_c:end_c], "km/s") - photons["vy"] = YTArray(d["vy"][start_c:end_c], "km/s") - photons["vz"] = YTArray(d["vz"][start_c:end_c], "km/s") - - n_ph = d["num_photons"][:] - - if comm.rank == 0: - start_e = np.uint64(0) - else: - start_e = n_ph[:start_c].sum() - end_e = start_e + np.uint64(n_ph[start_c:end_c].sum()) - - photons["NumberOfPhotons"] = n_ph[start_c:end_c] - - p_bins = np.cumsum(photons["NumberOfPhotons"]) - p_bins = np.insert(p_bins, 0, [np.uint64(0)]) - - photons["Energy"] = YTArray(d["energy"][start_e:end_e], "keV") - - f.close() - - cosmo = Cosmology(hubble_constant=parameters["HubbleConstant"], - omega_matter=parameters["OmegaMatter"], - omega_lambda=parameters["OmegaLambda"]) - - return cls(photons, parameters, cosmo, p_bins) - - @classmethod - def from_scratch(cls, data_source, redshift, area, - exp_time, photon_model, parameters=None, - center=None, dist=None, cosmology=None): - r""" - Initialize a PhotonList from a photon model. The redshift, collecting area, - exposure time, and cosmology are stored in the *parameters* dictionary which - is passed to the *photon_model* function. - - Parameters - ---------- - data_source : `yt.data_objects.data_containers.YTSelectionContainer` - The data source from which the photons will be generated. - redshift : float - The cosmological redshift for the photons. - area : float - The collecting area to determine the number of photons in cm^2. - exp_time : float - The exposure time to determine the number of photons in seconds. - photon_model : function - A function that takes the *data_source* and the *parameters* - dictionary and returns a *photons* dictionary. Must be of the - form: photon_model(data_source, parameters) - parameters : dict, optional - A dictionary of parameters to be passed to the user function. - center : string or array_like, optional - The origin of the photons. Accepts "c", "max", or a coordinate. - dist : tuple, optional - The angular diameter distance in the form (value, unit), used - mainly for nearby sources. This may be optionally supplied - instead of it being determined from the *redshift* and given *cosmology*. - cosmology : `yt.utilities.cosmology.Cosmology`, optional - Cosmological information. If not supplied, we try to get - the cosmology from the dataset. Otherwise, \LambdaCDM with - the default yt parameters is assumed. - - Examples - -------- - This is the simplest possible example, where we call the built-in thermal model: - - >>> thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3) - >>> redshift = 0.05 - >>> area = 6000.0 - >>> time = 2.0e5 - >>> sp = ds.sphere("c", (500., "kpc")) - >>> my_photons = PhotonList.from_user_model(sp, redshift, area, - ... time, thermal_model) - - If you wish to make your own photon model function, it must take as its - arguments the *data_source* and the *parameters* dictionary. However you - determine them, the *photons* dict needs to have the following items, corresponding - to cells which have photons: - - "x" : the x-position of the cell relative to the source center in kpc, YTArray - "y" : the y-position of the cell relative to the source center in kpc, YTArray - "z" : the z-position of the cell relative to the source center in kpc, YTArray - "vx" : the x-velocity of the cell in km/s, YTArray - "vy" : the y-velocity of the cell in km/s, YTArray - "vz" : the z-velocity of the cell in km/s, YTArray - "dx" : the width of the cell in kpc, YTArray - "NumberOfPhotons" : the number of photons in the cell, NumPy array of unsigned 64-bit integers - "Energy" : the source rest-frame energies of the photons, YTArray - - The last array is not the same size as the others because it contains the energies in all of - the cells in a single 1-D array. The first photons["NumberOfPhotons"][0] elements are - for the first cell, the next photons["NumberOfPhotons"][1] are for the second cell, and so on. - - The following is a simple example where a point source with a single line emission - spectrum of photons is created. More complicated examples which actually - create photons based on the fields in the dataset could be created. - - >>> import numpy as np - >>> import yt - >>> from yt.analysis_modules.photon_simulator.api import PhotonList - >>> def line_func(source, parameters): - ... - ... ds = source.ds - ... - ... num_photons = parameters["num_photons"] - ... E0 = parameters["line_energy"] # Energies are in keV - ... sigE = parameters["line_sigma"] - ... src_ctr = parameters["center"] - ... - ... energies = norm.rvs(loc=E0, scale=sigE, size=num_photons) - ... - ... # Place everything in the center cell - ... for i, ax in enumerate("xyz"): - ... photons[ax] = (ds.domain_center[0]-src_ctr[0]).in_units("kpc") - ... photons["vx"] = ds.arr([0], "km/s") - ... photons["vy"] = ds.arr([0], "km/s") - ... photons["vz"] = ds.arr([100.0], "km/s") - ... photons["dx"] = ds.find_field_values_at_point("dx", ds.domain_center).in_units("kpc") - ... photons["NumberOfPhotons"] = np.array(num_photons*np.ones(1), dtype="uint64") - ... photons["Energy"] = ds.arr(energies, "keV") - >>> - >>> redshift = 0.05 - >>> area = 6000.0 - >>> time = 2.0e5 - >>> parameters = {"num_photons" : 10000, "line_energy" : 5.0, - ... "line_sigma" : 0.1} - >>> ddims = (128,128,128) - >>> random_data = {"density":(np.random.random(ddims),"g/cm**3")} - >>> ds = yt.load_uniform_grid(random_data, ddims) - >>> dd = ds.all_data() - >>> my_photons = PhotonList.from_user_model(dd, redshift, area, - ... time, line_func, - ... parameters=parameters) - - """ - - ds = data_source.ds - - if parameters is None: - parameters = {} - if cosmology is None: - hubble = getattr(ds, "hubble_constant", None) - omega_m = getattr(ds, "omega_matter", None) - omega_l = getattr(ds, "omega_lambda", None) - if hubble == 0: hubble = None - if hubble is not None and \ - omega_m is not None and \ - omega_l is not None: - cosmo = Cosmology(hubble_constant=hubble, - omega_matter=omega_m, - omega_lambda=omega_l) - else: - cosmo = Cosmology() - else: - cosmo = cosmology - mylog.info("Cosmology: h = %g, omega_matter = %g, omega_lambda = %g" % - (cosmo.hubble_constant, cosmo.omega_matter, cosmo.omega_lambda)) - if dist is None: - D_A = cosmo.angular_diameter_distance(0.0,redshift).in_units("Mpc") - else: - D_A = parse_value(dist, "Mpc") - redshift = 0.0 - - if center in ("center", "c"): - parameters["center"] = ds.domain_center - elif center in ("max", "m"): - parameters["center"] = ds.find_max("density")[-1] - elif iterable(center): - if isinstance(center, YTArray): - parameters["center"] = center.in_units("code_length") - elif isinstance(center, tuple): - if center[0] == "min": - parameters["center"] = ds.find_min(center[1])[-1] - elif center[0] == "max": - parameters["center"] = ds.find_max(center[1])[-1] - else: - raise RuntimeError - else: - parameters["center"] = ds.arr(center, "code_length") - elif center is None: - parameters["center"] = data_source.get_field_parameter("center") - - parameters["FiducialExposureTime"] = parse_value(exp_time, "s") - parameters["FiducialArea"] = parse_value(area, "cm**2") - parameters["FiducialRedshift"] = redshift - parameters["FiducialAngularDiameterDistance"] = D_A - parameters["HubbleConstant"] = cosmo.hubble_constant - parameters["OmegaMatter"] = cosmo.omega_matter - parameters["OmegaLambda"] = cosmo.omega_lambda - - dimension = 0 - width = 0.0 - for i, ax in enumerate("xyz"): - le, re = data_source.quantities.extrema(ax) - delta_min, delta_max = data_source.quantities.extrema("d%s"%ax) - le -= 0.5*delta_max - re += 0.5*delta_max - width = max(width, re-parameters["center"][i], parameters["center"][i]-le) - dimension = max(dimension, int(width/delta_min)) - parameters["Dimension"] = 2*dimension - parameters["Width"] = 2.*width.in_units("kpc") - - photons = photon_model(data_source, parameters) - - mylog.info("Finished generating photons.") - - p_bins = np.cumsum(photons["NumberOfPhotons"]) - p_bins = np.insert(p_bins, 0, [np.uint64(0)]) - - return cls(photons, parameters, cosmo, p_bins) - - def write_h5_file(self, photonfile): - """ - Write the photons to the HDF5 file *photonfile*. - """ - - if parallel_capable: - - mpi_long = get_mpi_type("int64") - mpi_double = get_mpi_type("float64") - - local_num_cells = len(self.photons["x"]) - sizes_c = comm.comm.gather(local_num_cells, root=0) - - local_num_photons = np.sum(self.photons["NumberOfPhotons"]) - sizes_p = comm.comm.gather(local_num_photons, root=0) - - if comm.rank == 0: - num_cells = sum(sizes_c) - num_photons = sum(sizes_p) - disps_c = [sum(sizes_c[:i]) for i in range(len(sizes_c))] - disps_p = [sum(sizes_p[:i]) for i in range(len(sizes_p))] - x = np.zeros(num_cells) - y = np.zeros(num_cells) - z = np.zeros(num_cells) - vx = np.zeros(num_cells) - vy = np.zeros(num_cells) - vz = np.zeros(num_cells) - dx = np.zeros(num_cells) - n_ph = np.zeros(num_cells, dtype="uint64") - e = np.zeros(num_photons) - else: - sizes_c = [] - sizes_p = [] - disps_c = [] - disps_p = [] - x = np.empty([]) - y = np.empty([]) - z = np.empty([]) - vx = np.empty([]) - vy = np.empty([]) - vz = np.empty([]) - dx = np.empty([]) - n_ph = np.empty([]) - e = np.empty([]) - - comm.comm.Gatherv([self.photons["x"].d, local_num_cells, mpi_double], - [x, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["y"].d, local_num_cells, mpi_double], - [y, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["z"].d, local_num_cells, mpi_double], - [z, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["vx"].d, local_num_cells, mpi_double], - [vx, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["vy"].d, local_num_cells, mpi_double], - [vy, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["vz"].d, local_num_cells, mpi_double], - [vz, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["dx"].d, local_num_cells, mpi_double], - [dx, (sizes_c, disps_c), mpi_double], root=0) - comm.comm.Gatherv([self.photons["NumberOfPhotons"], local_num_cells, mpi_long], - [n_ph, (sizes_c, disps_c), mpi_long], root=0) - comm.comm.Gatherv([self.photons["Energy"].d, local_num_photons, mpi_double], - [e, (sizes_p, disps_p), mpi_double], root=0) - - else: - - x = self.photons["x"].d - y = self.photons["y"].d - z = self.photons["z"].d - vx = self.photons["vx"].d - vy = self.photons["vy"].d - vz = self.photons["vz"].d - dx = self.photons["dx"].d - n_ph = self.photons["NumberOfPhotons"] - e = self.photons["Energy"].d - - if comm.rank == 0: - - f = h5py.File(photonfile, mode="w") - - # Parameters - - p = f.create_group("parameters") - p.create_dataset("fid_area", data=float(self.parameters["FiducialArea"])) - p.create_dataset("fid_exp_time", data=float(self.parameters["FiducialExposureTime"])) - p.create_dataset("fid_redshift", data=self.parameters["FiducialRedshift"]) - p.create_dataset("hubble", data=self.parameters["HubbleConstant"]) - p.create_dataset("omega_matter", data=self.parameters["OmegaMatter"]) - p.create_dataset("omega_lambda", data=self.parameters["OmegaLambda"]) - p.create_dataset("fid_d_a", data=float(self.parameters["FiducialAngularDiameterDistance"])) - p.create_dataset("dimension", data=self.parameters["Dimension"]) - p.create_dataset("width", data=float(self.parameters["Width"])) - - # Data - - d = f.create_group("data") - d.create_dataset("x", data=x) - d.create_dataset("y", data=y) - d.create_dataset("z", data=z) - d.create_dataset("vx", data=vx) - d.create_dataset("vy", data=vy) - d.create_dataset("vz", data=vz) - d.create_dataset("dx", data=dx) - d.create_dataset("num_photons", data=n_ph) - d.create_dataset("energy", data=e) - - f.close() - - comm.barrier() - - def project_photons(self, normal, area_new=None, exp_time_new=None, - redshift_new=None, dist_new=None, - absorb_model=None, psf_sigma=None, - sky_center=None, responses=None, - convolve_energies=False, no_shifting=False, - north_vector=None, prng=np.random): - r""" - Projects photons onto an image plane given a line of sight. - - Parameters - ---------- - normal : character or array_like - Normal vector to the plane of projection. If "x", "y", or "z", will - assume to be along that axis (and will probably be faster). Otherwise, - should be an off-axis normal vector, e.g [1.0,2.0,-3.0] - area_new : float, optional - New value for the effective area of the detector. If *responses* - are specified the value of this keyword is ignored. - exp_time_new : float, optional - The new value for the exposure time. - redshift_new : float, optional - The new value for the cosmological redshift. - dist_new : tuple, optional - The new value for the angular diameter distance in the form - (value, unit), used mainly for nearby sources. This may be optionally supplied - instead of it being determined from the cosmology. - absorb_model : 'yt.analysis_modules.photon_simulator.PhotonModel`, optional - A model for galactic absorption. - psf_sigma : float, optional - Quick-and-dirty psf simulation using Gaussian smoothing with - standard deviation *psf_sigma* in degrees. - sky_center : array_like, optional - Center RA, Dec of the events in degrees. - responses : list of strings, optional - The names of the ARF and/or RMF files to convolve the photons with. - convolve_energies : boolean, optional - If this is set, the photon energies will be convolved with the RMF. - no_shifting : boolean, optional - If set, the photon energies will not be Doppler shifted. - north_vector : a sequence of floats - A vector defining the 'up' direction. This option sets the orientation of - the plane of projection. If not set, an arbitrary grid-aligned north_vector - is chosen. Ignored in the case where a particular axis (e.g., "x", "y", or - "z") is explicitly specified. - prng : NumPy `RandomState` object or numpy.random - A pseudo-random number generator. Typically will only be specified if you - have a reason to generate the same set of random numbers, such as for a - test. Default is the numpy.random module. - - Examples - -------- - >>> L = np.array([0.1,-0.2,0.3]) - >>> events = my_photons.project_photons(L, area_new="sim_arf.fits", - ... redshift_new=0.05, - ... psf_sigma=0.01) - """ - - if redshift_new is not None and dist_new is not None: - mylog.error("You may specify a new redshift or distance, "+ - "but not both!") - - if sky_center is None: - sky_center = YTArray([30.,45.], "degree") - else: - sky_center = YTArray(sky_center, "degree") - - dx = self.photons["dx"].d - nx = self.parameters["Dimension"] - if psf_sigma is not None: - psf_sigma = parse_value(psf_sigma, "degree") - - if not isinstance(normal, string_types): - L = np.array(normal) - orient = Orientation(L, north_vector=north_vector) - x_hat = orient.unit_vectors[0] - y_hat = orient.unit_vectors[1] - z_hat = orient.unit_vectors[2] - - n_ph = self.photons["NumberOfPhotons"] - n_ph_tot = n_ph.sum() - - eff_area = None - - parameters = {} - - if responses is not None: - responses = ensure_list(responses) - parameters["ARF"] = responses[0] - if len(responses) == 2: - parameters["RMF"] = responses[1] - area_new = parameters["ARF"] - - zobs0 = self.parameters["FiducialRedshift"] - D_A0 = self.parameters["FiducialAngularDiameterDistance"] - scale_factor = 1.0 - - # If we use an RMF, figure out where the response matrix actually is. - if "RMF" in parameters: - rmf = _astropy.pyfits.open(parameters["RMF"]) - if "MATRIX" in rmf: - mat_key = "MATRIX" - elif "SPECRESP MATRIX" in rmf: - mat_key = "SPECRESP MATRIX" - else: - raise RuntimeError("Cannot find the response matrix in the RMF " - "file %s! " % parameters["RMF"]+"It should " - "be named \"MATRIX\" or \"SPECRESP MATRIX\".") - rmf.close() - else: - mat_key = None - - if (exp_time_new is None and area_new is None and - redshift_new is None and dist_new is None): - my_n_obs = n_ph_tot - zobs = zobs0 - D_A = D_A0 - else: - if exp_time_new is None: - Tratio = 1. - else: - Tratio = parse_value(exp_time_new, "s")/self.parameters["FiducialExposureTime"] - if area_new is None: - Aratio = 1. - elif isinstance(area_new, string_types): - if comm.rank == 0: - mylog.info("Using energy-dependent effective area: %s" % (parameters["ARF"])) - f = _astropy.pyfits.open(area_new) - earf = 0.5*(f["SPECRESP"].data.field("ENERG_LO")+f["SPECRESP"].data.field("ENERG_HI")) - eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP")) - if "RMF" in parameters: - weights = self._normalize_arf(parameters["RMF"], mat_key) - eff_area *= weights - else: - mylog.warning("You specified an ARF but not an RMF. This is ok if the "+ - "responses are normalized properly. If not, you may "+ - "get inconsistent results.") - f.close() - Aratio = eff_area.max()/self.parameters["FiducialArea"].v - else: - mylog.info("Using constant effective area.") - Aratio = parse_value(area_new, "cm**2")/self.parameters["FiducialArea"] - if redshift_new is None and dist_new is None: - Dratio = 1. - zobs = zobs0 - D_A = D_A0 - else: - if redshift_new is None: - zobs = 0.0 - D_A = parse_value(dist_new, "Mpc") - else: - zobs = redshift_new - D_A = self.cosmo.angular_diameter_distance(0.0,zobs).in_units("Mpc") - scale_factor = (1.+zobs0)/(1.+zobs) - Dratio = D_A0*D_A0*(1.+zobs0)**3 / \ - (D_A*D_A*(1.+zobs)**3) - fak = Aratio*Tratio*Dratio - if fak > 1: - raise ValueError("This combination of requested parameters results in " - "%g%% more photons collected than are " % (100.*(fak-1.)) + - "available in the sample. Please reduce the collecting " - "area, exposure time, or increase the distance/redshift " - "of the object. Alternatively, generate a larger sample " - "of photons.") - my_n_obs = np.uint64(n_ph_tot*fak) - - n_obs_all = comm.mpi_allreduce(my_n_obs) - if comm.rank == 0: - mylog.info("Total number of photons to use: %d" % (n_obs_all)) - - if my_n_obs == n_ph_tot: - idxs = np.arange(my_n_obs,dtype='uint64') - else: - idxs = prng.permutation(n_ph_tot)[:my_n_obs].astype("uint64") - obs_cells = np.searchsorted(self.p_bins, idxs, side='right')-1 - delta = dx[obs_cells] - - if isinstance(normal, string_types): - - xsky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs) - ysky = prng.uniform(low=-0.5,high=0.5,size=my_n_obs) - xsky *= delta - ysky *= delta - xsky += self.photons[axes_lookup[normal][0]][obs_cells] - ysky += self.photons[axes_lookup[normal][1]][obs_cells] - - if not no_shifting: - vz = self.photons["v%s" % normal] - - else: - x = prng.uniform(low=-0.5,high=0.5,size=my_n_obs) - y = prng.uniform(low=-0.5,high=0.5,size=my_n_obs) - z = prng.uniform(low=-0.5,high=0.5,size=my_n_obs) - - if not no_shifting: - vz = self.photons["vx"]*z_hat[0] + \ - self.photons["vy"]*z_hat[1] + \ - self.photons["vz"]*z_hat[2] - - x *= delta - y *= delta - z *= delta - x += self.photons["x"][obs_cells].d - y += self.photons["y"][obs_cells].d - z += self.photons["z"][obs_cells].d - - xsky = x*x_hat[0] + y*x_hat[1] + z*x_hat[2] - ysky = x*y_hat[0] + y*y_hat[1] + z*y_hat[2] - - if no_shifting: - eobs = self.photons["Energy"][idxs] - else: - shift = -vz.in_cgs()/clight - shift = np.sqrt((1.-shift)/(1.+shift)) - eobs = self.photons["Energy"][idxs]*shift[obs_cells] - eobs *= scale_factor - - if absorb_model is None: - not_abs = np.ones(eobs.shape, dtype='bool') - else: - mylog.info("Absorbing.") - absorb_model.prepare_spectrum() - emid = absorb_model.emid - aspec = absorb_model.get_spectrum() - absorb = np.interp(eobs, emid, aspec, left=0.0, right=0.0) - randvec = aspec.max()*prng.uniform(size=eobs.shape) - not_abs = randvec < absorb - absorb_model.cleanup_spectrum() - - if eff_area is None: - detected = np.ones(eobs.shape, dtype='bool') - else: - mylog.info("Applying energy-dependent effective area.") - earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0) - randvec = eff_area.max()*prng.uniform(size=eobs.shape) - detected = randvec < earea - - detected = np.logical_and(not_abs, detected) - - events = {} - - dx_min = self.parameters["Width"]/self.parameters["Dimension"] - dtheta = YTQuantity(np.rad2deg(dx_min/D_A), "degree") - - events["xpix"] = xsky[detected]/dx_min.v + 0.5*(nx+1) - events["ypix"] = ysky[detected]/dx_min.v + 0.5*(nx+1) - events["eobs"] = eobs[detected] - - events = comm.par_combine_object(events, datatype="dict", op="cat") - - if psf_sigma is not None: - events["xpix"] += prng.normal(sigma=psf_sigma/dtheta) - events["ypix"] += prng.normal(sigma=psf_sigma/dtheta) - - num_events = len(events["xpix"]) - - if comm.rank == 0: - mylog.info("Total number of observed photons: %d" % num_events) - - if "RMF" in parameters and convolve_energies: - events, info = self._convolve_with_rmf(parameters["RMF"], events, - mat_key, prng) - for k, v in info.items(): - parameters[k] = v - - if exp_time_new is None: - parameters["ExposureTime"] = self.parameters["FiducialExposureTime"] - else: - parameters["ExposureTime"] = exp_time_new - if area_new is None: - parameters["Area"] = self.parameters["FiducialArea"] - else: - parameters["Area"] = area_new - parameters["Redshift"] = zobs - parameters["AngularDiameterDistance"] = D_A.in_units("Mpc") - parameters["sky_center"] = sky_center - parameters["pix_center"] = np.array([0.5*(nx+1)]*2) - parameters["dtheta"] = dtheta - - return EventList(events, parameters) - - def _normalize_arf(self, respfile, mat_key): - rmf = _astropy.pyfits.open(respfile) - table = rmf[mat_key] - weights = np.array([w.sum() for w in table.data["MATRIX"]]) - rmf.close() - return weights - - def _convolve_with_rmf(self, respfile, events, mat_key, prng): - """ - Convolve the events with a RMF file. - """ - mylog.info("Reading response matrix file (RMF): %s" % (respfile)) - - hdulist = _astropy.pyfits.open(respfile) - - tblhdu = hdulist[mat_key] - n_de = len(tblhdu.data["ENERG_LO"]) - mylog.info("Number of energy bins in RMF: %d" % (n_de)) - mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]), - max(tblhdu.data["ENERG_HI"]))) - - tblhdu2 = hdulist["EBOUNDS"] - n_ch = len(tblhdu2.data["CHANNEL"]) - mylog.info("Number of channels in RMF: %d" % (n_ch)) - - eidxs = np.argsort(events["eobs"]) - - phEE = events["eobs"][eidxs].d - - detectedChannels = [] - - # run through all photon energies and find which bin they go in - k = 0 - fcurr = 0 - last = len(phEE) - - pbar = get_pbar("Scattering energies with RMF:", last) - - for low,high in zip(tblhdu.data["ENERG_LO"],tblhdu.data["ENERG_HI"]): - # weight function for probabilities from RMF - weights = np.nan_to_num(np.float64(tblhdu.data[k]["MATRIX"][:])) - weights /= weights.sum() - # build channel number list associated to array value, - # there are groups of channels in rmfs with nonzero probabilities - trueChannel = [] - f_chan = np.nan_to_num(tblhdu.data["F_CHAN"][k]) - n_chan = np.nan_to_num(tblhdu.data["N_CHAN"][k]) - n_grp = np.nan_to_num(tblhdu.data["N_CHAN"][k]) - if not iterable(f_chan): - f_chan = [f_chan] - n_chan = [n_chan] - n_grp = [n_grp] - for start,nchan in zip(f_chan, n_chan): - end = start + nchan - if start == end: - trueChannel.append(start) - else: - for j in range(start,end): - trueChannel.append(j) - if len(trueChannel) > 0: - for q in range(fcurr,last): - if phEE[q] >= low and phEE[q] < high: - channelInd = prng.choice(len(weights), p=weights) - fcurr += 1 - detectedChannels.append(trueChannel[channelInd]) - if phEE[q] >= high: - break - pbar.update(fcurr) - k += 1 - pbar.finish() - - dchannel = np.array(detectedChannels) - - events["xpix"] = events["xpix"][eidxs] - events["ypix"] = events["ypix"][eidxs] - events["eobs"] = YTArray(phEE, "keV") - events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int) - - info = {"ChannelType" : tblhdu.header["CHANTYPE"], - "Telescope" : tblhdu.header["TELESCOP"], - "Instrument" : tblhdu.header["INSTRUME"]} - - info["Mission"] = tblhdu.header.get("MISSION","") - - return events, info - -class EventList(object): - - def __init__(self, events, parameters): - self.events = events - self.parameters = parameters - self.num_events = events["xpix"].shape[0] - self.wcs = _astropy.pywcs.WCS(naxis=2) - self.wcs.wcs.crpix = parameters["pix_center"] - self.wcs.wcs.crval = parameters["sky_center"].d - self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value] - self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"] - self.wcs.wcs.cunit = ["deg"]*2 - - def keys(self): - return self.events.keys() - - def has_key(self, key): - return key in self.keys() - - def items(self): - return self.events.items() - - def values(self): - return self.events.values() - - def __getitem__(self,key): - if key not in self.events: - if key == "xsky" or key == "ysky": - x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1) - self.events["xsky"] = YTArray(x, "degree") - self.events["ysky"] = YTArray(y, "degree") - return self.events[key] - - def __repr__(self): - return self.events.__repr__() - - def __contains__(self, key): - return key in self.events - - def __add__(self, other): - assert_same_wcs(self.wcs, other.wcs) - validate_parameters(self.parameters, other.parameters) - events = {} - for item1, item2 in zip(self.items(), other.items()): - k1, v1 = item1 - k2, v2 = item2 - events[k1] = uconcatenate([v1,v2]) - return EventList(events, self.parameters) - - def filter_events(self, region): - """ - Filter events using a ds9 *region*. Requires the pyregion package. - Returns a new EventList. - """ - import pyregion - import os - if os.path.exists(region): - reg = pyregion.open(region) - else: - reg = pyregion.parse(region) - r = reg.as_imagecoord(header=self.wcs.to_header()) - f = r.get_filter() - idxs = f.inside_x_y(self["xpix"], self["ypix"]) - if idxs.sum() == 0: - raise RuntimeError("No events are inside this region!") - new_events = {} - for k, v in self.items(): - new_events[k] = v[idxs] - return EventList(new_events, self.parameters) - - @classmethod - def from_h5_file(cls, h5file): - """ - Initialize an EventList from a HDF5 file with filename *h5file*. - """ - events = {} - parameters = {} - - f = h5py.File(h5file, mode="r") - - p = f["/parameters"] - parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s") - area = force_unicode(p['area'].value) - if isinstance(area, string_types): - parameters["Area"] = area - else: - parameters["Area"] = YTQuantity(area, "cm**2") - parameters["Redshift"] = p["redshift"].value - parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc") - parameters["sky_center"] = YTArray(p["sky_center"][:], "deg") - parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg") - parameters["pix_center"] = p["pix_center"][:] - if "rmf" in p: - parameters["RMF"] = force_unicode(p["rmf"].value) - if "arf" in p: - parameters["ARF"] = force_unicode(p["arf"].value) - if "channel_type" in p: - parameters["ChannelType"] = force_unicode(p["channel_type"].value) - if "mission" in p: - parameters["Mission"] = force_unicode(p["mission"].value) - if "telescope" in p: - parameters["Telescope"] = force_unicode(p["telescope"].value) - if "instrument" in p: - parameters["Instrument"] = force_unicode(p["instrument"].value) - - d = f["/data"] - events["xpix"] = d["xpix"][:] - events["ypix"] = d["ypix"][:] - events["eobs"] = YTArray(d["eobs"][:], "keV") - if "pi" in d: - events["PI"] = d["pi"][:] - if "pha" in d: - events["PHA"] = d["pha"][:] - - f.close() - - return cls(events, parameters) - - @classmethod - def from_fits_file(cls, fitsfile): - """ - Initialize an EventList from a FITS file with filename *fitsfile*. - """ - hdulist = _astropy.pyfits.open(fitsfile) - - tblhdu = hdulist["EVENTS"] - - events = {} - parameters = {} - - parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s") - if isinstance(tblhdu.header["AREA"], (string_types, bytes)): - parameters["Area"] = tblhdu.header["AREA"] - else: - parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2") - parameters["Redshift"] = tblhdu.header["REDSHIFT"] - parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc") - if "RMF" in tblhdu.header: - parameters["RMF"] = tblhdu["RMF"] - if "ARF" in tblhdu.header: - parameters["ARF"] = tblhdu["ARF"] - if "CHANTYPE" in tblhdu.header: - parameters["ChannelType"] = tblhdu["CHANTYPE"] - if "MISSION" in tblhdu.header: - parameters["Mission"] = tblhdu["MISSION"] - if "TELESCOP" in tblhdu.header: - parameters["Telescope"] = tblhdu["TELESCOP"] - if "INSTRUME" in tblhdu.header: - parameters["Instrument"] = tblhdu["INSTRUME"] - parameters["sky_center"] = YTArray([tblhdu["TCRVL2"],tblhdu["TCRVL3"]], "deg") - parameters["pix_center"] = np.array([tblhdu["TCRVL2"],tblhdu["TCRVL3"]]) - parameters["dtheta"] = YTQuantity(tblhdu["TCRVL3"], "deg") - events["xpix"] = tblhdu.data.field("X") - events["ypix"] = tblhdu.data.field("Y") - events["eobs"] = YTArray(tblhdu.data.field("ENERGY")/1000., "keV") - if "PI" in tblhdu.columns.names: - events["PI"] = tblhdu.data.field("PI") - if "PHA" in tblhdu.columns.names: - events["PHA"] = tblhdu.data.field("PHA") - - return cls(events, parameters) - - @parallel_root_only - def write_fits_file(self, fitsfile, clobber=False): - """ - Write events to a FITS binary table file with filename *fitsfile*. - Set *clobber* to True if you need to overwrite a previous file. - """ - pyfits = _astropy.pyfits - Time = _astropy.time.Time - TimeDelta = _astropy.time.TimeDelta - - exp_time = float(self.parameters["ExposureTime"]) - - t_begin = Time.now() - dt = TimeDelta(exp_time, format='sec') - t_end = t_begin + dt - - cols = [] - - col_e = pyfits.Column(name='ENERGY', format='E', unit='eV', - array=self["eobs"].in_units("eV").d) - col_x = pyfits.Column(name='X', format='D', unit='pixel', - array=self["xpix"]) - col_y = pyfits.Column(name='Y', format='D', unit='pixel', - array=self["ypix"]) - - cols = [col_e, col_x, col_y] - - if "ChannelType" in self.parameters: - chantype = self.parameters["ChannelType"] - if chantype == "PHA": - cunit = "adu" - elif chantype == "PI": - cunit = "Chan" - col_ch = pyfits.Column(name=chantype.upper(), format='1J', - unit=cunit, array=self.events[chantype]) - cols.append(col_ch) - - mylog.info("Generating times for events assuming uniform time " - "distribution. In future versions this will be made " - "more general.") - - time = np.random.uniform(size=self.num_events, low=0.0, - high=float(self.parameters["ExposureTime"])) - col_t = pyfits.Column(name="TIME", format='1D', unit='s', - array=time) - cols.append(col_t) - - coldefs = pyfits.ColDefs(cols) - tbhdu = pyfits.BinTableHDU.from_columns(coldefs) - tbhdu.update_ext_name("EVENTS") - - tbhdu.header["MTYPE1"] = "sky" - tbhdu.header["MFORM1"] = "x,y" - tbhdu.header["MTYPE2"] = "EQPOS" - tbhdu.header["MFORM2"] = "RA,DEC" - tbhdu.header["TCTYP2"] = "RA---TAN" - tbhdu.header["TCTYP3"] = "DEC--TAN" - tbhdu.header["TCRVL2"] = float(self.parameters["sky_center"][0]) - tbhdu.header["TCRVL3"] = float(self.parameters["sky_center"][1]) - tbhdu.header["TCDLT2"] = -float(self.parameters["dtheta"]) - tbhdu.header["TCDLT3"] = float(self.parameters["dtheta"]) - tbhdu.header["TCRPX2"] = self.parameters["pix_center"][0] - tbhdu.header["TCRPX3"] = self.parameters["pix_center"][1] - tbhdu.header["TLMIN2"] = 0.5 - tbhdu.header["TLMIN3"] = 0.5 - tbhdu.header["TLMAX2"] = 2.*self.parameters["pix_center"][0]-0.5 - tbhdu.header["TLMAX3"] = 2.*self.parameters["pix_center"][1]-0.5 - tbhdu.header["EXPOSURE"] = exp_time - tbhdu.header["TSTART"] = 0.0 - tbhdu.header["TSTOP"] = exp_time - if isinstance(self.parameters["Area"], string_types): - tbhdu.header["AREA"] = self.parameters["Area"] - else: - tbhdu.header["AREA"] = float(self.parameters["Area"]) - tbhdu.header["D_A"] = float(self.parameters["AngularDiameterDistance"]) - tbhdu.header["REDSHIFT"] = self.parameters["Redshift"] - tbhdu.header["HDUVERS"] = "1.1.0" - tbhdu.header["RADECSYS"] = "FK5" - tbhdu.header["EQUINOX"] = 2000.0 - tbhdu.header["HDUCLASS"] = "OGIP" - tbhdu.header["HDUCLAS1"] = "EVENTS" - tbhdu.header["HDUCLAS2"] = "ACCEPTED" - tbhdu.header["DATE"] = t_begin.tt.isot - tbhdu.header["DATE-OBS"] = t_begin.tt.isot - tbhdu.header["DATE-END"] = t_end.tt.isot - if "RMF" in self.parameters: - tbhdu.header["RESPFILE"] = self.parameters["RMF"] - f = pyfits.open(self.parameters["RMF"]) - nchan = int(f["EBOUNDS"].header["DETCHANS"]) - tbhdu.header["PHA_BINS"] = nchan - f.close() - if "ARF" in self.parameters: - tbhdu.header["ANCRFILE"] = self.parameters["ARF"] - if "ChannelType" in self.parameters: - tbhdu.header["CHANTYPE"] = self.parameters["ChannelType"] - if "Mission" in self.parameters: - tbhdu.header["MISSION"] = self.parameters["Mission"] - if "Telescope" in self.parameters: - tbhdu.header["TELESCOP"] = self.parameters["Telescope"] - if "Instrument" in self.parameters: - tbhdu.header["INSTRUME"] = self.parameters["Instrument"] - - hdulist = [pyfits.PrimaryHDU(), tbhdu] - - if "ChannelType" in self.parameters: - start = pyfits.Column(name='START', format='1D', unit='s', - array=np.array([0.0])) - stop = pyfits.Column(name='STOP', format='1D', unit='s', - array=np.array([exp_time])) - - tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop]) - tbhdu_gti.update_ext_name("STDGTI") - tbhdu_gti.header["TSTART"] = 0.0 - tbhdu_gti.header["TSTOP"] = exp_time - tbhdu_gti.header["HDUCLASS"] = "OGIP" - tbhdu_gti.header["HDUCLAS1"] = "GTI" - tbhdu_gti.header["HDUCLAS2"] = "STANDARD" - tbhdu_gti.header["RADECSYS"] = "FK5" - tbhdu_gti.header["EQUINOX"] = 2000.0 - tbhdu_gti.header["DATE"] = t_begin.tt.isot - tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot - tbhdu_gti.header["DATE-END"] = t_end.tt.isot - - hdulist.append(tbhdu_gti) - - pyfits.HDUList(hdulist).writeto(fitsfile, clobber=clobber) - - @parallel_root_only - def write_simput_file(self, prefix, clobber=False, emin=None, emax=None): - r""" - Write events to a SIMPUT file that may be read by the SIMX instrument - simulator. - - Parameters - ---------- - prefix : string - The filename prefix. - clobber : boolean, optional - Set to True to overwrite previous files. - e_min : float, optional - The minimum energy of the photons to save in keV. - e_max : float, optional - The maximum energy of the photons to save in keV. - """ - pyfits = _astropy.pyfits - if isinstance(self.parameters["Area"], string_types): - mylog.error("Writing SIMPUT files is only supported if you didn't convolve with responses.") - raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.") - - if emin is None: - emin = self["eobs"].min().value - if emax is None: - emax = self["eobs"].max().value - - idxs = np.logical_and(self["eobs"].d >= emin, self["eobs"].d <= emax) - flux = np.sum(self["eobs"][idxs].in_units("erg")) / \ - self.parameters["ExposureTime"]/self.parameters["Area"] - - col1 = pyfits.Column(name='ENERGY', format='E', array=self["eobs"].d) - col2 = pyfits.Column(name='DEC', format='D', array=self["ysky"].d) - col3 = pyfits.Column(name='RA', format='D', array=self["xsky"].d) - - coldefs = pyfits.ColDefs([col1, col2, col3]) - - tbhdu = pyfits.BinTableHDU.from_columns(coldefs) - tbhdu.update_ext_name("PHLIST") - - tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT" - tbhdu.header["HDUCLAS1"] = "PHOTONS" - tbhdu.header["HDUVERS"] = "1.1.0" - tbhdu.header["EXTVER"] = 1 - tbhdu.header["REFRA"] = 0.0 - tbhdu.header["REFDEC"] = 0.0 - tbhdu.header["TUNIT1"] = "keV" - tbhdu.header["TUNIT2"] = "deg" - tbhdu.header["TUNIT3"] = "deg" - - phfile = prefix+"_phlist.fits" - - tbhdu.writeto(phfile, clobber=clobber) - - col1 = pyfits.Column(name='SRC_ID', format='J', array=np.array([1]).astype("int32")) - col2 = pyfits.Column(name='RA', format='D', array=np.array([0.0])) - col3 = pyfits.Column(name='DEC', format='D', array=np.array([0.0])) - col4 = pyfits.Column(name='E_MIN', format='D', array=np.array([float(emin)])) - col5 = pyfits.Column(name='E_MAX', format='D', array=np.array([float(emax)])) - col6 = pyfits.Column(name='FLUX', format='D', array=np.array([flux.value])) - col7 = pyfits.Column(name='SPECTRUM', format='80A', array=np.array([phfile+"[PHLIST,1]"])) - col8 = pyfits.Column(name='IMAGE', format='80A', array=np.array([phfile+"[PHLIST,1]"])) - col9 = pyfits.Column(name='SRC_NAME', format='80A', array=np.array(["yt_src"])) - - coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8, col9]) - - wrhdu = pyfits.BinTableHDU.from_columns(coldefs) - wrhdu.update_ext_name("SRC_CAT") - - wrhdu.header["HDUCLASS"] = "HEASARC" - wrhdu.header["HDUCLAS1"] = "SIMPUT" - wrhdu.header["HDUCLAS2"] = "SRC_CAT" - wrhdu.header["HDUVERS"] = "1.1.0" - wrhdu.header["RADECSYS"] = "FK5" - wrhdu.header["EQUINOX"] = 2000.0 - wrhdu.header["TUNIT2"] = "deg" - wrhdu.header["TUNIT3"] = "deg" - wrhdu.header["TUNIT4"] = "keV" - wrhdu.header["TUNIT5"] = "keV" - wrhdu.header["TUNIT6"] = "erg/s/cm**2" - - simputfile = prefix+"_simput.fits" - - wrhdu.writeto(simputfile, clobber=clobber) - - @parallel_root_only - def write_h5_file(self, h5file): - """ - Write an EventList to the HDF5 file given by *h5file*. - """ - f = h5py.File(h5file, mode="w") - - p = f.create_group("parameters") - p.create_dataset("exp_time", data=float(self.parameters["ExposureTime"])) - area = self.parameters["Area"] - if not isinstance(area, string_types): - area = float(area) - p.create_dataset("area", data=area) - p.create_dataset("redshift", data=self.parameters["Redshift"]) - p.create_dataset("d_a", data=float(self.parameters["AngularDiameterDistance"])) - if "ARF" in self.parameters: - p.create_dataset("arf", data=self.parameters["ARF"]) - if "RMF" in self.parameters: - p.create_dataset("rmf", data=self.parameters["RMF"]) - if "ChannelType" in self.parameters: - p.create_dataset("channel_type", data=self.parameters["ChannelType"]) - if "Mission" in self.parameters: - p.create_dataset("mission", data=self.parameters["Mission"]) - if "Telescope" in self.parameters: - p.create_dataset("telescope", data=self.parameters["Telescope"]) - if "Instrument" in self.parameters: - p.create_dataset("instrument", data=self.parameters["Instrument"]) - p.create_dataset("sky_center", data=self.parameters["sky_center"].d) - p.create_dataset("pix_center", data=self.parameters["pix_center"]) - p.create_dataset("dtheta", data=float(self.parameters["dtheta"])) - - d = f.create_group("data") - d.create_dataset("xpix", data=self["xpix"]) - d.create_dataset("ypix", data=self["ypix"]) - d.create_dataset("xsky", data=self["xsky"].d) - d.create_dataset("ysky", data=self["ysky"].d) - d.create_dataset("eobs", data=self["eobs"].d) - if "PI" in self.events: - d.create_dataset("pi", data=self.events["PI"]) - if "PHA" in self.events: - d.create_dataset("pha", data=self.events["PHA"]) - - f.close() - - @parallel_root_only - def write_fits_image(self, imagefile, clobber=False, - emin=None, emax=None): - r""" - Generate a image by binning X-ray counts and write it to a FITS file. - - Parameters - ---------- - imagefile : string - The name of the image file to write. - clobber : boolean, optional - Set to True to overwrite a previous file. - emin : float, optional - The minimum energy of the photons to put in the image, in keV. - emax : float, optional - The maximum energy of the photons to put in the image, in keV. - """ - if emin is None: - mask_emin = np.ones(self.num_events, dtype='bool') - else: - mask_emin = self["eobs"].d > emin - if emax is None: - mask_emax = np.ones(self.num_events, dtype='bool') - else: - mask_emax = self["eobs"].d < emax - - mask = np.logical_and(mask_emin, mask_emax) - - nx = int(2*self.parameters["pix_center"][0]-1.) - ny = int(2*self.parameters["pix_center"][1]-1.) - - xbins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True) - ybins = np.linspace(0.5, float(ny)+0.5, ny+1, endpoint=True) - - H, xedges, yedges = np.histogram2d(self["xpix"][mask], - self["ypix"][mask], - bins=[xbins,ybins]) - - hdu = _astropy.pyfits.PrimaryHDU(H.T) - - hdu.header["MTYPE1"] = "EQPOS" - hdu.header["MFORM1"] = "RA,DEC" - hdu.header["CTYPE1"] = "RA---TAN" - hdu.header["CTYPE2"] = "DEC--TAN" - hdu.header["CRPIX1"] = 0.5*(nx+1) - hdu.header["CRPIX2"] = 0.5*(nx+1) - hdu.header["CRVAL1"] = float(self.parameters["sky_center"][0]) - hdu.header["CRVAL2"] = float(self.parameters["sky_center"][1]) - hdu.header["CUNIT1"] = "deg" - hdu.header["CUNIT2"] = "deg" - hdu.header["CDELT1"] = -float(self.parameters["dtheta"]) - hdu.header["CDELT2"] = float(self.parameters["dtheta"]) - hdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"]) - - hdu.writeto(imagefile, clobber=clobber) - - @parallel_root_only - def write_spectrum(self, specfile, bin_type="channel", emin=0.1, - emax=10.0, nchan=2000, clobber=False, energy_bins=False): - r""" - Bin event energies into a spectrum and write it to a FITS binary table. Can bin - on energy or channel. In that case, the spectral binning will be determined by - the RMF binning. - - Parameters - ---------- - specfile : string - The name of the FITS file to be written. - bin_type : string, optional - Bin on "energy" or "channel". If an RMF is detected, channel information will be - imported from it. - emin : float, optional - The minimum energy of the spectral bins in keV. Only used if binning without an RMF. - emax : float, optional - The maximum energy of the spectral bins in keV. Only used if binning without an RMF. - nchan : integer, optional - The number of channels. Only used if binning without an RMF. - energy_bins : boolean, optional - Bin on energy or channel. Deprecated in favor of *bin_type*. - """ - if energy_bins: - bin_type = "energy" - warnings.warn("The energy_bins keyword is deprecated. Please use " - "the bin_type keyword instead. Setting bin_type == 'energy'.") - pyfits = _astropy.pyfits - if bin_type == "channel" and "ChannelType" in self.parameters: - spectype = self.parameters["ChannelType"] - f = pyfits.open(self.parameters["RMF"]) - nchan = int(f["EBOUNDS"].header["DETCHANS"]) - num = 0 - if "MATRIX" in f: - mat_key = "MATRIX" - elif "SPECRESP MATRIX" in f: - mat_key = "SPECRESP MATRIX" - for i in range(1,len(f[mat_key].columns)+1): - if f[mat_key].header["TTYPE%d" % i] == "F_CHAN": - num = i - break - if num > 0: - tlmin = "TLMIN%d" % num - cmin = int(f[mat_key].header[tlmin]) - else: - mylog.warning("Cannot determine minimum allowed value for channel. " + - "Setting to 0, which may be wrong.") - cmin = 0 - f.close() - minlength = nchan - if cmin == 1: minlength += 1 - spec = np.bincount(self[spectype],minlength=minlength) - if cmin == 1: spec = spec[1:] - bins = (np.arange(nchan)+cmin).astype("int32") - else: - espec = self["eobs"].d - erange = (emin, emax) - spec, ee = np.histogram(espec, bins=nchan, range=erange) - if bin_type == "energy": - bins = 0.5*(ee[1:]+ee[:-1]) - spectype = "energy" - else: - mylog.info("Events haven't been convolved with an RMF, so assuming " - "a perfect response and %d PI channels." % nchan) - bins = (np.arange(nchan)+1).astype("int32") - spectype = "pi" - - col1 = pyfits.Column(name='CHANNEL', format='1J', array=bins) - col2 = pyfits.Column(name=spectype.upper(), format='1D', array=bins.astype("float64")) - col3 = pyfits.Column(name='COUNTS', format='1J', array=spec.astype("int32")) - col4 = pyfits.Column(name='COUNT_RATE', format='1D', array=spec/float(self.parameters["ExposureTime"])) - - coldefs = pyfits.ColDefs([col1, col2, col3, col4]) - - tbhdu = pyfits.BinTableHDU.from_columns(coldefs) - tbhdu.update_ext_name("SPECTRUM") - - tbhdu.header["DETCHANS"] = spec.shape[0] - tbhdu.header["TOTCTS"] = spec.sum() - tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"]) - tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"]) - tbhdu.header["CONTENT"] = spectype - tbhdu.header["HDUCLASS"] = "OGIP" - tbhdu.header["HDUCLAS1"] = "SPECTRUM" - tbhdu.header["HDUCLAS2"] = "TOTAL" - tbhdu.header["HDUCLAS3"] = "TYPE:I" - tbhdu.header["HDUCLAS4"] = "COUNT" - tbhdu.header["HDUVERS"] = "1.1.0" - tbhdu.header["HDUVERS1"] = "1.1.0" - tbhdu.header["CHANTYPE"] = spectype - tbhdu.header["BACKFILE"] = "none" - tbhdu.header["CORRFILE"] = "none" - tbhdu.header["POISSERR"] = True - if "RMF" in self.parameters: - tbhdu.header["RESPFILE"] = self.parameters["RMF"] - else: - tbhdu.header["RESPFILE"] = "none" - if "ARF" in self.parameters: - tbhdu.header["ANCRFILE"] = self.parameters["ARF"] - else: - tbhdu.header["ANCRFILE"] = "none" - if "Mission" in self.parameters: - tbhdu.header["MISSION"] = self.parameters["Mission"] - else: - tbhdu.header["MISSION"] = "none" - if "Telescope" in self.parameters: - tbhdu.header["TELESCOP"] = self.parameters["Telescope"] - else: - tbhdu.header["TELESCOP"] = "none" - if "Instrument" in self.parameters: - tbhdu.header["INSTRUME"] = self.parameters["Instrument"] - else: - tbhdu.header["INSTRUME"] = "none" - tbhdu.header["AREASCAL"] = 1.0 - tbhdu.header["CORRSCAL"] = 0.0 - tbhdu.header["BACKSCAL"] = 1.0 - - hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu]) - - hdulist.writeto(specfile, clobber=clobber) - -def merge_files(input_files, output_file, clobber=False, - add_exposure_times=False): - r""" - Helper function for merging PhotonList or EventList HDF5 files. - - Parameters - ---------- - input_files : list of strings - List of filenames that will be merged together. - output_file : string - Name of the merged file to be outputted. - clobber : boolean, default False - If a the output file already exists, set this to True to - overwrite it. - add_exposure_times : boolean, default False - If set to True, exposure times will be added together. Otherwise, - the exposure times of all of the files must be the same. - - Examples - -------- - >>> from yt.analysis_modules.photon_simulator.api import merge_files - >>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5", - ... clobber=True, add_exposure_times=True) - - Notes - ----- - Currently, to merge files it is mandated that all of the parameters have the - same values, with the possible exception of the exposure time parameter "exp_time" - if add_exposure_times=False. - """ - if os.path.exists(output_file) and not clobber: - raise IOError("Cannot overwrite existing file %s. " % output_file + - "If you want to do this, set clobber=True.") - - f_in = h5py.File(input_files[0], mode="r") - f_out = h5py.File(output_file, mode="w") - - exp_time_key = "" - p_out = f_out.create_group("parameters") - for key, param in f_in["parameters"].items(): - if key.endswith("exp_time"): - exp_time_key = key - else: - p_out[key] = param.value - - skip = [exp_time_key] if add_exposure_times else [] - for fn in input_files[1:]: - f = h5py.File(fn, mode="r") - validate_parameters(f_in["parameters"], f["parameters"], skip=skip) - f.close() - - f_in.close() - - data = defaultdict(list) - tot_exp_time = 0.0 - - for i, fn in enumerate(input_files): - f = h5py.File(fn, mode="r") - if add_exposure_times: - tot_exp_time += f["/parameters"][exp_time_key].value - elif i == 0: - tot_exp_time = f["/parameters"][exp_time_key].value - for key in f["/data"]: - data[key].append(f["/data"][key][:]) - f.close() - - p_out["exp_time"] = tot_exp_time - - d = f_out.create_group("data") - for k in data: - d.create_dataset(k, data=np.concatenate(data[k])) - - f_out.close() - -def convert_old_file(input_file, output_file, clobber=False): - r""" - Helper function for converting old PhotonList or EventList HDF5 - files (pre yt v3.3) to their new versions. - - Parameters - ---------- - input_file : list of strings - The filename of the old-versioned file to be converted. - output_file : string - Name of the new file to be outputted. - clobber : boolean, default False - If a the output file already exists, set this to True to - overwrite it. - - Examples - -------- - >>> from yt.analysis_modules.photon_simulator.api import convert_old_file - >>> convert_old_file("photons_old.h5", "photons_new.h5", clobber=True) - """ - if os.path.exists(output_file) and not clobber: - raise IOError("Cannot overwrite existing file %s. " % output_file + - "If you want to do this, set clobber=True.") - - f_in = h5py.File(input_file, mode="r") - - if "num_photons" in f_in: - params = ["fid_exp_time", "fid_area", "fid_d_a", "fid_redshift", - "dimension", "width", "hubble", "omega_matter", - "omega_lambda"] - data = ["x", "y", "z", "dx", "vx", "vy", "vz", "energy", "num_photons"] - elif "pix_center" in f_in: - params = ["exp_time", "area", "redshift", "d_a", "arf", - "rmf", "channel_type", "mission", "telescope", - "instrument", "sky_center", "pix_center", "dtheta"] - data = ["xsky", "ysky", "xpix", "ypix", "eobs", "pi", "pha"] - - f_out = h5py.File(output_file, mode="w") - - p = f_out.create_group("parameters") - d = f_out.create_group("data") - - for key in params: - if key in f_in: - p.create_dataset(key, data=f_in[key].value) - - for key in data: - if key in f_in: - d.create_dataset(key, data=f_in[key].value) - - f_in.close() - f_out.close() diff --git a/yt/analysis_modules/photon_simulator/spectral_models.py b/yt/analysis_modules/photon_simulator/spectral_models.py deleted file mode 100644 index 316cae53cc6..00000000000 --- a/yt/analysis_modules/photon_simulator/spectral_models.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Photon emission and absorption models for use with the -photon simulator. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np -import os - -from yt.funcs import mylog -from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.on_demand_imports import _astropy -from yt.utilities.physical_constants import hcgs, clight -from yt.utilities.physical_ratios import erg_per_keV, amu_grams -from yt.analysis_modules.photon_simulator.utils import broaden_lines - -hc = (hcgs*clight).in_units("keV*angstrom").v -cl = clight.v -K = 1.0/np.sqrt(2.*np.pi) - -class SpectralModel(object): - - def __init__(self, emin, emax, nchan): - self.emin = YTQuantity(emin, "keV") - self.emax = YTQuantity(emax, "keV") - self.nchan = nchan - self.ebins = YTArray(np.linspace(self.emin, self.emax, nchan+1), "keV") - self.de = np.diff(self.ebins) - self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1]) - - def prepare_spectrum(self): - pass - - def get_spectrum(self): - pass - - def cleanup_spectrum(self): - pass - -class XSpecThermalModel(SpectralModel): - r""" - Initialize a thermal gas emission model from PyXspec. - - Parameters - ---------- - model_name : string - The name of the thermal emission model. - emin : float - The minimum energy for the spectral model. - emax : float - The maximum energy for the spectral model. - nchan : integer - The number of channels in the spectral model. - settings : dictionary, optional - A dictionary of key, value pairs (must both be strings) - that can be used to set various options in XSPEC. - - Examples - -------- - >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000) - """ - def __init__(self, model_name, emin, emax, nchan, - thermal_broad=False, settings=None): - self.model_name = model_name - self.thermal_broad = thermal_broad - if settings is None: settings = {} - self.settings = settings - super(XSpecThermalModel, self).__init__(emin, emax, nchan) - - def prepare_spectrum(self, zobs): - """ - Prepare the thermal model for execution given a redshift *zobs* for the spectrum. - """ - import xspec - xspec.Xset.chatter = 0 - xspec.AllModels.setEnergies("%f %f %d lin" % - (self.emin.value, self.emax.value, self.nchan)) - self.model = xspec.Model(self.model_name) - self.thermal_comp = getattr(self.model,self.model_name) - if self.model_name == "bremss": - self.norm = 3.02e-15 - else: - self.norm = 1.0e-14 - self.thermal_comp.norm = 1.0 - self.thermal_comp.Redshift = zobs - if self.thermal_broad: - xspec.Xset.addModelString("APECTHERMAL","yes") - for k,v in self.settings.items(): - xspec.Xset.addModelString(k,v) - - def get_spectrum(self, kT): - """ - Get the thermal emission spectrum given a temperature *kT* in keV. - """ - self.thermal_comp.kT = kT - self.thermal_comp.Abundanc = 0.0 - cosmic_spec = np.array(self.model.values(0)) - if self.model_name == "bremss": - metal_spec = np.zeros(self.nchan) - else: - self.thermal_comp.Abundanc = 1.0 - metal_spec = np.array(self.model.values(0)) - cosmic_spec - cosmic_spec *= self.norm - metal_spec *= self.norm - return YTArray(cosmic_spec, "cm**3/s"), YTArray(metal_spec, "cm**3/s") - - def cleanup_spectrum(self): - del self.thermal_comp - del self.model - -class XSpecAbsorbModel(SpectralModel): - r""" - Initialize an absorption model from PyXspec. - - Parameters - ---------- - model_name : string - The name of the absorption model. - nH : float - The foreground column density *nH* in units of 10^22 cm^{-2}. - emin : float, optional - The minimum energy for the spectral model. - emax : float, optional - The maximum energy for the spectral model. - nchan : integer, optional - The number of channels in the spectral model. - settings : dictionary, optional - A dictionary of key, value pairs (must both be strings) - that can be used to set various options in XSPEC. - - Examples - -------- - >>> abs_model = XSpecAbsorbModel("wabs", 0.1) - """ - def __init__(self, model_name, nH, emin=0.01, emax=50.0, - nchan=100000, settings=None): - self.model_name = model_name - self.nH = nH - if settings is None: settings = {} - self.settings = settings - super(XSpecAbsorbModel, self).__init__(emin, emax, nchan) - - def prepare_spectrum(self): - """ - Prepare the absorption model for execution given a redshift *zobs* for the spectrum. - """ - import xspec - xspec.Xset.chatter = 0 - xspec.AllModels.setEnergies("%f %f %d lin" % - (self.emin.value, self.emax.value, self.nchan)) - self.model = xspec.Model(self.model_name+"*powerlaw") - self.model.powerlaw.norm = self.nchan/(self.emax.value-self.emin.value) - self.model.powerlaw.PhoIndex = 0.0 - for k,v in self.settings.items(): - xspec.Xset.addModelString(k,v) - - def get_spectrum(self): - """ - Get the absorption spectrum. - """ - m = getattr(self.model,self.model_name) - m.nH = self.nH - return np.array(self.model.values(0)) - - def cleanup_spectrum(self): - del self.model - -class TableApecModel(SpectralModel): - r""" - Initialize a thermal gas emission model from the AtomDB APEC tables - available at http://www.atomdb.org. This code borrows heavily from Python - routines used to read the APEC tables developed by Adam Foster at the - CfA (afoster@cfa.harvard.edu). - - Parameters - ---------- - apec_root : string - The directory root where the APEC model files are stored. - emin : float - The minimum energy for the spectral model. - emax : float - The maximum energy for the spectral model. - nchan : integer - The number of channels in the spectral model. - apec_vers : string, optional - The version identifier string for the APEC files, e.g. - "2.0.2" - thermal_broad : boolean, optional - Whether to apply thermal broadening to spectral lines. Only should - be used if you are attempting to simulate a high-spectral resolution - detector. - - Examples - -------- - >>> apec_model = TableApecModel("$SPECTRAL_DATA/spectral/", 0.05, 50.0, - ... 1000, apec_vers="3.0", thermal_broad=True) - """ - def __init__(self, apec_root, emin, emax, nchan, - apec_vers="2.0.2", thermal_broad=False): - self.apec_root = apec_root - self.apec_prefix = "apec_v"+apec_vers - self.cocofile = os.path.join(self.apec_root, - self.apec_prefix+"_coco.fits") - self.linefile = os.path.join(self.apec_root, - self.apec_prefix+"_line.fits") - super(TableApecModel, self).__init__(emin, emax, nchan) - self.wvbins = hc/self.ebins[::-1].d - # H, He, and trace elements - self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30] - # Non-trace metals - self.metal_elem = [6,7,8,10,12,13,14,16,18,20,26,28] - self.thermal_broad = thermal_broad - self.A = np.array([0.0,1.00794,4.00262,6.941,9.012182,10.811, - 12.0107,14.0067,15.9994,18.9984,20.1797, - 22.9898,24.3050,26.9815,28.0855,30.9738, - 32.0650,35.4530,39.9480,39.0983,40.0780, - 44.9559,47.8670,50.9415,51.9961,54.9380, - 55.8450,58.9332,58.6934,63.5460,65.3800]) - - def prepare_spectrum(self, zobs): - """ - Prepare the thermal model for execution. - """ - try: - self.line_handle = _astropy.pyfits.open(self.linefile) - except IOError: - mylog.error("LINE file %s does not exist" % self.linefile) - raise IOError("LINE file %s does not exist" % self.linefile) - try: - self.coco_handle = _astropy.pyfits.open(self.cocofile) - except IOError: - mylog.error("COCO file %s does not exist" % self.cocofile) - raise IOError("COCO file %s does not exist" % self.cocofile) - - self.Tvals = self.line_handle[1].data.field("kT") - self.dTvals = np.diff(self.Tvals) - self.minlam = self.wvbins.min() - self.maxlam = self.wvbins.max() - self.scale_factor = 1.0/(1.+zobs) - - def _make_spectrum(self, kT, element, tindex): - - tmpspec = np.zeros(self.nchan) - - line_data = self.line_handle[tindex].data - coco_data = self.coco_handle[tindex].data - - i = np.where((line_data.field('element') == element) & - (line_data.field('lambda') > self.minlam) & - (line_data.field('lambda') < self.maxlam))[0] - - E0 = hc/line_data.field('lambda')[i].astype("float64")*self.scale_factor - amp = line_data.field('epsilon')[i].astype("float64") - ebins = self.ebins.d - de = self.de.d - emid = self.emid.d - if self.thermal_broad: - sigma = E0*np.sqrt(2.*kT*erg_per_keV/(self.A[element]*amu_grams))/cl - vec = broaden_lines(E0, sigma, amp, ebins) - else: - vec = np.histogram(E0, ebins, weights=amp)[0] - tmpspec += vec - - ind = np.where((coco_data.field('Z') == element) & - (coco_data.field('rmJ') == 0))[0] - if len(ind) == 0: - return tmpspec - else: - ind = ind[0] - - n_cont = coco_data.field('N_Cont')[ind] - e_cont = coco_data.field('E_Cont')[ind][:n_cont] - continuum = coco_data.field('Continuum')[ind][:n_cont] - - tmpspec += np.interp(emid, e_cont*self.scale_factor, continuum)*de/self.scale_factor - - n_pseudo = coco_data.field('N_Pseudo')[ind] - e_pseudo = coco_data.field('E_Pseudo')[ind][:n_pseudo] - pseudo = coco_data.field('Pseudo')[ind][:n_pseudo] - - tmpspec += np.interp(emid, e_pseudo*self.scale_factor, pseudo)*de/self.scale_factor - - return tmpspec*self.scale_factor - - def get_spectrum(self, kT): - """ - Get the thermal emission spectrum given a temperature *kT* in keV. - """ - cspec_l = np.zeros(self.nchan) - mspec_l = np.zeros(self.nchan) - cspec_r = np.zeros(self.nchan) - mspec_r = np.zeros(self.nchan) - tindex = np.searchsorted(self.Tvals, kT)-1 - if tindex >= self.Tvals.shape[0]-1 or tindex < 0: - return YTArray(cspec_l, "cm**3/s"), YTArray(mspec_l, "cm**3/s") - dT = (kT-self.Tvals[tindex])/self.dTvals[tindex] - # First do H,He, and trace elements - for elem in self.cosmic_elem: - cspec_l += self._make_spectrum(kT, elem, tindex+2) - cspec_r += self._make_spectrum(kT, elem, tindex+3) - # Next do the metals - for elem in self.metal_elem: - mspec_l += self._make_spectrum(kT, elem, tindex+2) - mspec_r += self._make_spectrum(kT, elem, tindex+3) - cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s") - metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s") - return cosmic_spec, metal_spec - -class TableAbsorbModel(SpectralModel): - r""" - Initialize an absorption model from a table stored in an HDF5 file. - - Parameters - ---------- - filename : string - The name of the table file. - nH : float - The foreground column density *nH* in units of 10^22 cm^{-2}. - - Examples - -------- - >>> abs_model = XSpecAbsorbModel("abs_table.h5", 0.1) - """ - def __init__(self, filename, nH): - if not os.path.exists(filename): - raise IOError("File does not exist: %s." % filename) - self.filename = filename - f = h5py.File(self.filename, mode="r") - emin = f["energy"][:].min() - emax = f["energy"][:].max() - self.sigma = YTArray(f["cross_section"][:], "cm**2") - nchan = self.sigma.shape[0] - f.close() - super(TableAbsorbModel, self).__init__(emin, emax, nchan) - self.nH = YTQuantity(nH*1.0e22, "cm**-2") - - def prepare_spectrum(self): - """ - Prepare the absorption model for execution. - """ - pass - - def get_spectrum(self): - """ - Get the absorption spectrum. - """ - return np.exp(-self.sigma*self.nH) diff --git a/yt/analysis_modules/photon_simulator/tests/__init__.py b/yt/analysis_modules/photon_simulator/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py deleted file mode 100644 index 2559b06a9e0..00000000000 --- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py +++ /dev/null @@ -1,171 +0,0 @@ -""" -A unit test for the photon_simulator analysis module. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import warnings - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from yt.analysis_modules.photon_simulator.api import \ - XSpecThermalModel, XSpecAbsorbModel, \ - ThermalPhotonModel, PhotonList -from yt.config import ytcfg -from yt.testing import requires_file, requires_module -import numpy as np -from yt.utilities.physical_ratios import \ - K_per_keV, mass_hydrogen_grams -from yt.utilities.physical_constants import clight -from yt.frontends.stream.api import load_uniform_grid -import os -import tempfile -import shutil -from numpy.random import RandomState - -ckms = clight.in_units("km/s").v - -def setup(): - from yt.config import ytcfg - ytcfg["yt", "__withintesting"] = "True" - -xray_data_dir = ytcfg.get("yt", "xray_data_dir") - -arf = os.path.join(xray_data_dir,"sxt-s_120210_ts02um_intallpxl.arf") -rmf = os.path.join(xray_data_dir,"ah_sxs_5ev_basefilt_20100712.rmf") - -@requires_module("xspec") -@requires_file(arf) -@requires_file(rmf) -def test_beta_model(): - import xspec - - xspec.Fit.statMethod = "cstat" - xspec.Xset.addModelString("APECTHERMAL","yes") - xspec.Fit.query = "yes" - xspec.Fit.method = ["leven","10","0.01"] - xspec.Fit.delta = 0.01 - xspec.Xset.chatter = 5 - - my_prng = RandomState(24) - - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - R = 1.0 - r_c = 0.05 - rho_c = 0.04*mass_hydrogen_grams - beta = 1. - kT_sim = 6.0 - v_shift = 4.0e7 - v_width = 4.0e7 - nx = 128 - - ddims = (nx,nx,nx) - - x, y, z = np.mgrid[-R:R:nx*1j, - -R:R:nx*1j, - -R:R:nx*1j] - - r = np.sqrt(x**2+y**2+z**2) - - dens = np.zeros(ddims) - dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta) - dens[r > R] = 0.0 - temp = kT_sim*K_per_keV*np.ones(ddims) - bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) - velz = my_prng.normal(loc=v_shift,scale=v_width,size=ddims) - - data = {} - data["density"] = (dens, "g/cm**3") - data["temperature"] = (temp, "K") - data["velocity_x"] = (np.zeros(ddims), "cm/s") - data["velocity_y"] = (np.zeros(ddims), "cm/s") - data["velocity_z"] = (velz, "cm/s") - - ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"), - nprocs=64, bbox=bbox) - - A = 3000. - exp_time = 1.0e5 - redshift = 0.05 - nH_sim = 0.02 - - apec_model = XSpecThermalModel("bapec", 0.1, 11.5, 20000, - thermal_broad=True) - abs_model = XSpecAbsorbModel("TBabs", nH_sim) - - sphere = ds.sphere("c", (0.5, "Mpc")) - - mu_sim = -v_shift / 1.0e5 - sigma_sim = v_width / 1.0e5 - - Z_sim = 0.3 - - thermal_model = ThermalPhotonModel(apec_model, Zmet=Z_sim, X_H=0.76, - prng=my_prng) - photons = PhotonList.from_scratch(sphere, redshift, A, exp_time, - thermal_model) - - D_A = photons.parameters["FiducialAngularDiameterDistance"] - - norm_sim = sphere.quantities.total_quantity("emission_measure") - norm_sim *= 1.0e-14/(4*np.pi*D_A*D_A*(1.+redshift)*(1.+redshift)) - norm_sim = float(norm_sim.in_cgs()) - - events = photons.project_photons("z", responses=[arf,rmf], - absorb_model=abs_model, - convolve_energies=True, prng=my_prng) - events.write_spectrum("beta_model_evt.pi", clobber=True) - - s = xspec.Spectrum("beta_model_evt.pi") - s.ignore("**-0.5") - s.ignore("9.0-**") - - m = xspec.Model("tbabs*bapec") - m.bapec.kT = 5.5 - m.bapec.Abundanc = 0.25 - m.bapec.norm = 1.0 - m.bapec.Redshift = 0.05 - m.bapec.Velocity = 300.0 - m.TBabs.nH = 0.02 - - m.bapec.Velocity.frozen = False - m.bapec.Abundanc.frozen = False - m.bapec.Redshift.frozen = False - m.TBabs.nH.frozen = True - - xspec.Fit.renorm() - xspec.Fit.nIterations = 100 - xspec.Fit.perform() - - kT = m.bapec.kT.values[0] - mu = (m.bapec.Redshift.values[0]-redshift)*ckms - Z = m.bapec.Abundanc.values[0] - sigma = m.bapec.Velocity.values[0] - norm = m.bapec.norm.values[0] - - dkT = m.bapec.kT.sigma - dmu = m.bapec.Redshift.sigma*ckms - dZ = m.bapec.Abundanc.sigma - dsigma = m.bapec.Velocity.sigma - dnorm = m.bapec.norm.sigma - - assert np.abs(mu-mu_sim) < dmu - assert np.abs(kT-kT_sim) < dkT - assert np.abs(Z-Z_sim) < dZ - assert np.abs(sigma-sigma_sim) < dsigma - assert np.abs(norm-norm_sim) < dnorm - - xspec.AllModels.clear() - xspec.AllData.clear() - - os.chdir(curdir) - shutil.rmtree(tmpdir) diff --git a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py deleted file mode 100644 index fa8e543e664..00000000000 --- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -Answer test the photon_simulator analysis module. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.analysis_modules.photon_simulator.api import \ - TableApecModel, TableAbsorbModel, \ - ThermalPhotonModel, PhotonList, EventList, \ - convert_old_file, merge_files -from yt.config import ytcfg -from yt.testing import \ - requires_file, \ - assert_almost_equal -from yt.utilities.answer_testing.framework import requires_ds, \ - GenericArrayTest, data_dir_load -from numpy.random import RandomState -from yt.units.yt_array import uconcatenate -import os -import tempfile -import shutil - -def setup(): - from yt.config import ytcfg - ytcfg["yt", "__withintesting"] = "True" - -test_data_dir = ytcfg.get("yt", "test_data_dir") -xray_data_dir = ytcfg.get("yt", "xray_data_dir") - -rmfs = ["pn-med.rmf", "acisi_aimpt_cy17.rmf", - "aciss_aimpt_cy17.rmf", "nustar.rmf", - "ah_sxs_5ev_basefilt_20100712.rmf"] -arfs = ["pn-med.arf", "acisi_aimpt_cy17.arf", - "aciss_aimpt_cy17.arf", "nustar_3arcminA.arf", - "sxt-s_120210_ts02um_intallpxl.arf"] - -gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300" -APEC = xray_data_dir -TBABS = os.path.join(xray_data_dir, "tbabs_table.h5") -old_photon_file = os.path.join(xray_data_dir, "old_photons.h5") -old_event_file = os.path.join(xray_data_dir, "old_events.h5") - -def return_data(data): - def _return_data(name): - return data - return _return_data - -@requires_ds(gslr) -@requires_file(APEC) -@requires_file(TBABS) -@requires_file(old_photon_file) -@requires_file(old_event_file) -def test_sloshing(): - - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - prng = RandomState(0x4d3d3d3) - - ds = data_dir_load(gslr) - A = 2000. - exp_time = 1.0e4 - redshift = 0.1 - - apec_model = TableApecModel(APEC, 0.1, 11.0, 10000) - tbabs_model = TableAbsorbModel(TBABS, 0.1) - - sphere = ds.sphere("c", (0.1, "Mpc")) - - thermal_model = ThermalPhotonModel(apec_model, Zmet=0.3, prng=prng) - photons1 = PhotonList.from_scratch(sphere, redshift, A, exp_time, - thermal_model) - - return_photons = return_data(photons1.photons) - - tests = [GenericArrayTest(ds, return_photons, args=["photons"])] - - for a, r in zip(arfs, rmfs): - arf = os.path.join(xray_data_dir, a) - rmf = os.path.join(xray_data_dir, r) - events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf], - absorb_model=tbabs_model, - convolve_energies=True, prng=prng) - events1['xsky'] - return_events = return_data(events1.events) - - tests.append(GenericArrayTest(ds, return_events, args=[a])) - - for test in tests: - test_sloshing.__name__ = test.description - yield test - - photons1.write_h5_file("test_photons.h5") - events1.write_h5_file("test_events.h5") - - photons2 = PhotonList.from_file("test_photons.h5") - events2 = EventList.from_h5_file("test_events.h5") - - convert_old_file(old_photon_file, "converted_photons.h5") - convert_old_file(old_event_file, "converted_events.h5") - - PhotonList.from_file("converted_photons.h5") - EventList.from_h5_file("converted_events.h5") - - for k in photons1.keys(): - if k == "Energy": - arr1 = uconcatenate(photons1[k]) - arr2 = uconcatenate(photons2[k]) - else: - arr1 = photons1[k] - arr2 = photons2[k] - assert_almost_equal(arr1, arr2) - for k in events1.keys(): - assert_almost_equal(events1[k], events2[k]) - - nevents = 0 - - for i in range(4): - events = photons1.project_photons([1.0,-0.5,0.2], - exp_time_new=0.25*exp_time, - absorb_model=tbabs_model, - prng=prng) - events.write_h5_file("split_events_%d.h5" % i) - nevents += len(events["xsky"]) - - merge_files(["split_events_%d.h5" % i for i in range(4)], - "merged_events.h5", add_exposure_times=True, - clobber=True) - - merged_events = EventList.from_h5_file("merged_events.h5") - assert len(merged_events["xsky"]) == nevents - assert merged_events.parameters["ExposureTime"] == exp_time - - os.chdir(curdir) - shutil.rmtree(tmpdir) diff --git a/yt/analysis_modules/photon_simulator/tests/test_spectra.py b/yt/analysis_modules/photon_simulator/tests/test_spectra.py deleted file mode 100644 index fbc0aa60105..00000000000 --- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py +++ /dev/null @@ -1,44 +0,0 @@ -from yt.analysis_modules.photon_simulator.api import \ - TableApecModel, XSpecThermalModel -from yt.testing import requires_module, fake_random_ds -from yt.utilities.answer_testing.framework import \ - GenericArrayTest -from yt.config import ytcfg - -def setup(): - ytcfg["yt", "__withintesting"] = "True" - -xray_data_dir = ytcfg.get("yt", "xray_data_dir") - -ds = fake_random_ds(64) - -@requires_module("xspec") -@requires_module("astropy") -def test_apec(): - - settings = {"APECROOT":xray_data_dir+"/apec_v2.0.2"} - xmod = XSpecThermalModel("apec", 0.1, 10.0, 10000, thermal_broad=True, - settings=settings) - xmod.prepare_spectrum(0.2) - - xcspec, xmspec = xmod.get_spectrum(6.0) - spec1 = xcspec+0.3*xmspec - - amod = TableApecModel(xray_data_dir, 0.1, 10.0, - 10000, thermal_broad=True) - amod.prepare_spectrum(0.2) - - acspec, amspec = amod.get_spectrum(6.0) - spec2 = acspec+0.3*amspec - - def spec1_test(): - return spec1.v - def spec2_test(): - return spec2.v - - for test in [GenericArrayTest(ds, spec1_test), - GenericArrayTest(ds, spec2_test)]: - test_apec.__name__ = test.description - yield test - - xmod.cleanup_spectrum() diff --git a/yt/analysis_modules/photon_simulator/utils.pyx b/yt/analysis_modules/photon_simulator/utils.pyx deleted file mode 100644 index 84fd4ec34db..00000000000 --- a/yt/analysis_modules/photon_simulator/utils.pyx +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -cimport numpy as np -cimport cython - -cdef extern from "platform_dep.h": - double erf(double x) - -@cython.cdivision(True) -@cython.boundscheck(False) -@cython.wraparound(False) -def broaden_lines(np.ndarray[np.float64_t, ndim=1] E0, - np.ndarray[np.float64_t, ndim=1] sigma, - np.ndarray[np.float64_t, ndim=1] amp, - np.ndarray[np.float64_t, ndim=1] ebins): - - cdef int i, j, n, m - cdef double x, isigma - cdef np.ndarray[np.float64_t, ndim=1] cdf, vec - - n = E0.shape[0] - m = ebins.shape[0] - cdf = np.zeros(m) - vec = np.zeros(m-1) - - for i in range(n): - isigma = 1.0/sigma[i] - for j in range(m): - x = (ebins[j]-E0[i])*isigma - cdf[j] = 0.5*(1+erf(x)) - for j in range(m-1): - vec[j] = vec[j] + (cdf[j+1] - cdf[j])*amp[i] - return vec diff --git a/yt/analysis_modules/ppv_cube/api.py b/yt/analysis_modules/ppv_cube/api.py index 2526e86ca10..26012b96fcb 100644 --- a/yt/analysis_modules/ppv_cube/api.py +++ b/yt/analysis_modules/ppv_cube/api.py @@ -1,22 +1,7 @@ -""" -API for ppv_cube -""" -from __future__ import absolute_import -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +from yt.utilities.exceptions import \ + YTModuleRemoved -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the PPVCube module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .ppv_cube import PPVCube +raise YTModuleRemoved( + "PPVCube", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/ppv_cube/ppv_cube.py b/yt/analysis_modules/ppv_cube/ppv_cube.py deleted file mode 100644 index dffdcf2bf3d..00000000000 --- a/yt/analysis_modules/ppv_cube/ppv_cube.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -Generating PPV FITS cubes -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -from yt.utilities.on_demand_imports import _astropy -from yt.utilities.orientation import Orientation -from yt.visualization.fits_image import FITSImageData, sanitize_fits_unit -from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection -from yt.funcs import get_pbar -from yt.utilities.physical_constants import clight, mh -import yt.units.dimensions as ytdims -from yt.units.yt_array import YTQuantity -from yt.funcs import iterable -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - parallel_root_only, parallel_objects -import re -from . import ppv_utils -from yt.funcs import is_root -from yt.extern.six import string_types - -def create_vlos(normal, no_shifting): - if no_shifting: - def _v_los(field, data): - return data.ds.arr(data["index", "zeros"], "cm/s") - elif isinstance(normal, string_types): - def _v_los(field, data): - return -data["gas", "velocity_%s" % normal] - else: - orient = Orientation(normal) - los_vec = orient.unit_vectors[2] - def _v_los(field, data): - vz = data["gas", "velocity_x"]*los_vec[0] + \ - data["gas", "velocity_y"]*los_vec[1] + \ - data["gas", "velocity_z"]*los_vec[2] - return -vz - return _v_los - -fits_info = {"velocity": ("m/s", "VOPT", "v"), - "frequency": ("Hz", "FREQ", "f"), - "energy": ("eV", "ENER", "E"), - "wavelength": ("angstrom", "WAVE", "lambda")} - -class PPVCube(object): - def __init__(self, ds, normal, field, velocity_bounds, center="c", - width=(1.0,"unitary"), dims=100, thermal_broad=False, - atomic_weight=56., depth=(1.0,"unitary"), depth_res=256, - method="integrate", weight_field=None, no_shifting=False, - north_vector=None, no_ghost=True, data_source=None): - r""" Initialize a PPVCube object. - - Parameters - ---------- - ds : dataset - The dataset. - normal : array_like or string - The normal vector along with to make the projections. If an array, it - will be normalized. If a string, it will be assumed to be along one of the - principal axes of the domain ("x", "y", or "z"). - field : string - The field to project. - velocity_bounds : tuple - A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to - integrate over. - center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set to - 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") or - ("max","dark_matter_density"). Units can be specified by passing in *center* - as a tuple containing a coordinate and string unit name or by passing - in a YTArray. If a list or unitless array is supplied, code units are - assumed. - width : float, tuple, or YTQuantity. - The width of the projection. A float will assume the width is in code units. - A (value, unit) tuple or YTQuantity allows for the units of the width to be - specified. Implies width = height, e.g. the aspect ratio of the PPVCube's - spatial dimensions is 1. - dims : integer, optional - The spatial resolution of the cube. Implies nx = ny, e.g. the - aspect ratio of the PPVCube's spatial dimensions is 1. - thermal_broad : boolean, optional - Whether or not to broaden the line using the gas temperature. Default: False. - atomic_weight : float, optional - Set this value to the atomic weight of the particle that is emitting the line - if *thermal_broad* is True. Defaults to 56 (Fe). - depth : A tuple or a float, optional - A tuple containing the depth to project through and the string - key of the unit: (width, 'unit'). If set to a float, code units - are assumed. Only for off-axis cubes. - depth_res : integer, optional - Deprecated, this is still in the function signature for API - compatibility - method : string, optional - Set the projection method to be used. - "integrate" : line of sight integration over the line element. - "sum" : straight summation over the line of sight. - weight_field : string, optional - The name of the weighting field. Set to None for no weight. - no_shifting : boolean, optional - If set, no shifting due to velocity will occur but only thermal broadening. - Should not be set when *thermal_broad* is False, otherwise nothing happens! - north_vector : a sequence of floats - A vector defining the 'up' direction. This option sets the orientation of - the plane of projection. If not set, an arbitrary grid-aligned north_vector - is chosen. Ignored in the case of on-axis cubes. - no_ghost: bool, optional - Optimization option for off-axis cases. If True, homogenized bricks will - extrapolate out from grid instead of interpolating from - ghost zones that have to first be calculated. This can - lead to large speed improvements, but at a loss of - accuracy/smoothness in resulting image. The effects are - less notable when the transfer function is smooth and - broad. Default: True - data_source : yt.data_objects.data_containers.YTSelectionContainer, optional - If specified, this will be the data source used for selecting regions to project. - - Examples - -------- - >>> i = 60*np.pi/180. - >>> L = [0.0,np.sin(i),np.cos(i)] - >>> cube = PPVCube(ds, L, "density", (-5.,4.,100,"km/s"), width=(10.,"kpc")) - """ - - self.ds = ds - self.field = field - self.width = width - self.particle_mass = atomic_weight*mh - self.thermal_broad = thermal_broad - self.no_shifting = no_shifting - - if not isinstance(normal, string_types): - width = ds.coordinates.sanitize_width(normal, width, depth) - width = tuple(el.in_units('code_length').v for el in width) - - if not hasattr(ds.fields.gas, "temperature") and thermal_broad: - raise RuntimeError("thermal_broad cannot be True if there is " - "no 'temperature' field!") - - if no_shifting and not thermal_broad: - raise RuntimeError("no_shifting cannot be True when thermal_broad is False!") - - self.center = ds.coordinates.sanitize_center(center, normal)[0] - - self.nx = dims - self.ny = dims - self.nv = velocity_bounds[2] - - if method not in ["integrate","sum"]: - raise RuntimeError("Only the 'integrate' and 'sum' projection +" - "methods are supported in PPVCube.") - - dd = ds.all_data() - fd = dd._determine_fields(field)[0] - self.field_units = ds._get_field_info(fd).units - - self.vbins = ds.arr(np.linspace(velocity_bounds[0], - velocity_bounds[1], - velocity_bounds[2]+1), velocity_bounds[3]) - - self._vbins = self.vbins.copy() - self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1]) - self.vmid_cgs = self.vmid.in_cgs().v - self.dv = self.vbins[1]-self.vbins[0] - self.dv_cgs = self.dv.in_cgs().v - - self.current_v = 0.0 - - _vlos = create_vlos(normal, self.no_shifting) - self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s", - sampling_type='cell') - - _intensity = self._create_intensity() - self.ds.add_field(("gas","intensity"), function=_intensity, - units=self.field_units, sampling_type='cell') - - if method == "integrate" and weight_field is None: - self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units) - elif method == "sum": - self.proj_units = self.field_units - - storage = {} - pbar = get_pbar("Generating cube.", self.nv) - for sto, i in parallel_objects(range(self.nv), storage=storage): - self.current_v = self.vmid_cgs[i] - if isinstance(normal, string_types): - prj = ds.proj("intensity", ds.coordinates.axis_id[normal], method=method, - weight_field=weight_field, data_source=data_source) - buf = prj.to_frb(width, self.nx, center=self.center)["intensity"] - else: - if data_source is None: - source = ds - else: - source = data_source - buf = off_axis_projection(source, self.center, normal, width, - (self.nx, self.ny), "intensity", - north_vector=north_vector, no_ghost=no_ghost, - method=method, weight=weight_field) - sto.result_id = i - sto.result = buf.swapaxes(0,1) - pbar.update(i) - pbar.finish() - - self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.proj_units) - if is_root(): - for i, buf in sorted(storage.items()): - self.data[:,:,i] = buf.transpose() - - self.axis_type = "velocity" - - # Now fix the width - if iterable(self.width): - self.width = ds.quan(self.width[0], self.width[1]) - elif not isinstance(self.width, YTQuantity): - self.width = ds.quan(self.width, "code_length") - - self.ds.field_info.pop(("gas","intensity")) - self.ds.field_info.pop(("gas","v_los")) - - def transform_spectral_axis(self, rest_value, units): - """ - Change the units of the spectral axis to some equivalent unit, such - as energy, wavelength, or frequency, by providing a *rest_value* and the - *units* of the new spectral axis. This corresponds to the Doppler-shifting - of lines due to gas motions and thermal broadening. - """ - if self.axis_type != "velocity": - self.reset_spectral_axis() - x0 = self.ds.quan(rest_value, units) - if x0.units.dimensions == ytdims.rate or x0.units.dimensions == ytdims.energy: - self.vbins = x0*(1.-self.vbins.in_cgs()/clight) - elif x0.units.dimensions == ytdims.length: - self.vbins = x0/(1.-self.vbins.in_cgs()/clight) - self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1]) - self.dv = self.vbins[1]-self.vbins[0] - dims = self.dv.units.dimensions - if dims == ytdims.rate: - self.axis_type = "frequency" - elif dims == ytdims.length: - self.axis_type = "wavelength" - elif dims == ytdims.energy: - self.axis_type = "energy" - elif dims == ytdims.velocity: - self.axis_type = "velocity" - - def reset_spectral_axis(self): - """ - Reset the spectral axis to the original velocity range and units. - """ - self.vbins = self._vbins.copy() - self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1]) - self.dv = self.vbins[1]-self.vbins[0] - - @parallel_root_only - def write_fits(self, filename, overwrite=False, length_unit=None, - sky_scale=None, sky_center=None, **kwargs): - r""" Write the PPVCube to a FITS file. - - Parameters - ---------- - filename : string - The name of the file to write to. - overwrite : boolean, optional - Whether to overwrite a file with the same name that already - exists. Default False. - length_unit : string, optional - The units to convert the coordinates to in the file. - sky_scale : tuple, optional - Conversion between an angle unit and a length unit, if sky - coordinates are desired, e.g. (1.0, "arcsec/kpc") - sky_center : tuple, optional - The (RA, Dec) coordinate in degrees of the central pixel. Must - be specified with *sky_scale*. - - Notes - ----- - Additional keyword arguments are passed to - :meth:`~astropy.io.fits.HDUList.writeto`. - - Examples - -------- - >>> cube.write_fits("my_cube.fits", overwrite=False, - ... sky_scale=(1.0,"arcsec/kpc"), sky_center=(30.,45.)) - """ - vunit = fits_info[self.axis_type][0] - vtype = fits_info[self.axis_type][1] - - v_center = 0.5*(self.vbins[0]+self.vbins[-1]).in_units(vunit).value - - if length_unit is None: - units = str(self.ds.get_smallest_appropriate_unit(self.width)) - else: - units = length_unit - units = sanitize_fits_unit(units) - dx = self.width.in_units(units).v/self.nx - dy = self.width.in_units(units).v/self.ny - dv = self.dv.in_units(vunit).v - - w = _astropy.pywcs.WCS(naxis=3) - w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)] - w.wcs.cdelt = [dx,dy,dv] - w.wcs.crval = [0.0,0.0,v_center] - w.wcs.cunit = [units,units,vunit] - w.wcs.ctype = ["LINEAR","LINEAR",vtype] - - fib = FITSImageData(self.data.transpose(), fields=self.field, wcs=w) - fib.update_all_headers("bunit", re.sub('()', '', str(self.proj_units))) - fib.update_all_headers("btype", self.field) - if sky_scale is not None and sky_center is not None: - fib.create_sky_wcs(sky_center, sky_scale) - fib.writeto(filename, overwrite=overwrite, **kwargs) - - def __repr__(self): - return "PPVCube [%d %d %d] (%s < %s < %s)" % (self.nx, self.ny, self.nv, - self.vbins[0], - fits_info[self.axis_type][2], - self.vbins[-1]) - - def __getitem__(self, item): - return self.data[item] - - def _create_intensity(self): - if self.thermal_broad: - def _intensity(field, data): - v = self.current_v-data["gas", "v_los"].in_cgs().v - T = data["gas", "temperature"].in_cgs().v - w = ppv_utils.compute_weight(self.thermal_broad, self.dv_cgs, - self.particle_mass, v.flatten(), T.flatten()) - w[np.isnan(w)] = 0.0 - return data[self.field]*w.reshape(v.shape) - else: - def _intensity(field, data): - w = 1.-np.fabs(self.current_v-data["gas", "v_los"].in_cgs().v)/self.dv_cgs - w[w < 0.0] = 0.0 - return data[self.field]*w - return _intensity diff --git a/yt/analysis_modules/ppv_cube/ppv_utils.pyx b/yt/analysis_modules/ppv_cube/ppv_utils.pyx deleted file mode 100644 index b9ed29c8952..00000000000 --- a/yt/analysis_modules/ppv_cube/ppv_utils.pyx +++ /dev/null @@ -1,30 +0,0 @@ -import numpy as np -cimport numpy as np -cimport cython -from yt.utilities.physical_constants import kboltz -from libc.math cimport exp, sqrt - -cdef double kb = kboltz.v -cdef double pi = np.pi - -@cython.cdivision(True) -@cython.boundscheck(False) -@cython.wraparound(False) -def compute_weight(np.uint8_t thermal_broad, - double dv, - double m_part, - np.ndarray[np.float64_t, ndim=1] v, - np.ndarray[np.float64_t, ndim=1] T): - - cdef int i, n - cdef double v2_th, x - cdef np.ndarray[np.float64_t, ndim=1] w - - n = v.shape[0] - w = np.zeros(n) - - for i in range(n): - v2_th = 2.*kb*T[i]/m_part - w[i] = dv*exp(-v[i]*v[i]/v2_th)/sqrt(v2_th*pi) - - return w diff --git a/yt/analysis_modules/ppv_cube/tests/__init__.py b/yt/analysis_modules/ppv_cube/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/ppv_cube/tests/test_ppv.py b/yt/analysis_modules/ppv_cube/tests/test_ppv.py deleted file mode 100644 index 2ba087d168d..00000000000 --- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Unit test the PPVCube analysis module. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.frontends.stream.api import load_uniform_grid -from yt.analysis_modules.ppv_cube.api import PPVCube -import yt.units as u -from yt.utilities.physical_constants import kboltz, mh, clight -import numpy as np -from yt.testing import assert_allclose_units - -def setup(): - """Test specific setup.""" - from yt.config import ytcfg - ytcfg["yt", "__withintesting"] = "True" - -def test_ppv(): - - np.random.seed(seed=0x4d3d3d3) - - dims = (8, 8, 128) - v_shift = 1.0e7*u.cm/u.s - sigma_v = 2.0e7*u.cm/u.s - T_0 = 1.0e8*u.Kelvin - data = {"density":(np.ones(dims),"g/cm**3"), - "temperature":(T_0.v*np.ones(dims), "K"), - "velocity_x":(np.zeros(dims),"cm/s"), - "velocity_y":(np.zeros(dims),"cm/s"), - "velocity_z":(np.random.normal(loc=v_shift.v,scale=sigma_v.v,size=dims), "cm/s")} - - ds = load_uniform_grid(data, dims) - - cube = PPVCube(ds, "z", "density", (-300., 300., 1024, "km/s"), - dims=8, thermal_broad=True) - - dv = cube.dv - v_th = np.sqrt(2.*kboltz*T_0/(56.*mh) + 2.*sigma_v**2).in_units("km/s") - a = cube.data.mean(axis=(0,1)).v - b = dv*np.exp(-((cube.vmid+v_shift)/v_th)**2)/(np.sqrt(np.pi)*v_th) - - assert_allclose_units(a, b, 1.0e-2) - - E_0 = 6.8*u.keV - - cube.transform_spectral_axis(E_0.v, str(E_0.units)) - - dE = -cube.dv - delta_E = E_0*v_th.in_cgs()/clight - E_shift = E_0*(1.+v_shift/clight) - - c = dE*np.exp(-((cube.vmid-E_shift)/delta_E)**2)/(np.sqrt(np.pi)*delta_E) - - assert_allclose_units(a, c, 1.0e-2) - -def test_ppv_nothermalbroad(): - - np.random.seed(seed=0x4d3d3d3) - - dims = (16, 16, 128) - v_shift = 1.0e6*u.cm/u.s - sigma_v = 2.0e6*u.cm/u.s - data = {"density":(np.ones(dims),"g/cm**3"), - "velocity_x":(np.zeros(dims),"cm/s"), - "velocity_y":(np.zeros(dims),"cm/s"), - "velocity_z":(np.random.normal(loc=v_shift.v,scale=sigma_v.v,size=dims), "cm/s")} - - ds = load_uniform_grid(data, dims) - - cube = PPVCube(ds, "z", "density", (-100., 100., 128, "km/s"), - dims=16, thermal_broad=False) - - dv = cube.dv - v_noth = np.sqrt(2)*(sigma_v).in_units("km/s") - a = cube.data.mean(axis=(0,1)).v - b = dv*np.exp(-((cube.vmid+v_shift)/v_noth)**2)/(np.sqrt(np.pi)*v_noth) - - assert_allclose_units(a, b, atol=5.0e-3) diff --git a/yt/analysis_modules/radmc3d_export/RadMC3DImageUtilities.py b/yt/analysis_modules/radmc3d_export/RadMC3DImageUtilities.py deleted file mode 100644 index f3b8462d31a..00000000000 --- a/yt/analysis_modules/radmc3d_export/RadMC3DImageUtilities.py +++ /dev/null @@ -1,93 +0,0 @@ -''' - -Functions for dealing with the image.out files created by RADMC-3D - -''' - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - - -def parse_radmc3d_image_header(lines): - ''' - - Parses the header lines from the image.out file. - Returns a dictionary containing the image parameters. - - ''' - - header_data = {} - - # an integer flag describing the image format. - # this function only works for images made in - # "observer at infinity" mode, i.e. iformat is 1 - iformat = np.int64(lines[0].strip()) - assert(iformat == 1) - header_data["iformat"] = iformat - - # The number of pixels in the x and y-directions - Nx, Ny = [np.int64(Npix) for Npix in lines[1].strip().split()] - header_data["Nx"] = Nx - header_data["Ny"] = Ny - - # You can produce images at multiple wavelenths in a single - # pass. This function assumes that there is only 1. - num_wavelengths = np.int64(lines[2].strip()) - assert(num_wavelengths == 1) - header_data["num_wavelengths"] = num_wavelengths - - # The size of pixel in each direction. Note that - # this only makes sense if iformat is 1 - pixel_size_cm_x, pixel_size_cm_y = \ - [np.float64(Npix) for Npix in lines[3].strip().split()] - header_data["pixel_size_cm_x"] = pixel_size_cm_x - header_data["pixel_size_cm_y"] = pixel_size_cm_y - - # The wavelength at which the image was produced. - # We assume there is only 1 image here. - wavelength_microns = np.float64(lines[4].strip()) # assume 1 wavelength - header_data["wavelength_microns"] = wavelength_microns - - return header_data - - -def read_radmc3d_image(filename): - ''' - - Loads the image.out file created by radmc-3d. - Returns an np.array that contains the image data - as well as a dictionary with some useful metadata. - - ''' - - fileh = open(filename, 'r') - lines = fileh.readlines() - fileh.close() - - # The header should always be 5 lines long, - # as per the radmc-3d manual - header_lines = lines[0:6] - header = parse_radmc3d_image_header(header_lines) - - Nx = header["Nx"] - Ny = header["Ny"] - - # The rest of the lines are the image data, with the - # possible exception of a newline at the end - image_lines = lines[6:] - image = np.array([np.float64(line.strip()) for line in image_lines - if not line.isspace()]) - - # This had better be true - assert(image.size == Nx*Ny) - - image = image.reshape(header["Nx"], header["Ny"]) - - return header, image diff --git a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py deleted file mode 100644 index 6a41e049726..00000000000 --- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py +++ /dev/null @@ -1,423 +0,0 @@ -""" -Code to export from yt to RadMC3D - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -from yt.utilities.lib.write_array import \ - write_3D_array, write_3D_vector_array - - -class RadMC3DLayer: - ''' - - This class represents an AMR "layer" of the style described in - the radmc3d manual. Unlike yt grids, layers may not have more - than one parent, so level L grids will need to be split up - if they straddle two or more level L - 1 grids. - - ''' - def __init__(self, level, parent, unique_id, LE, RE, dim): - self.level = level - self.parent = parent - self.LeftEdge = LE - self.RightEdge = RE - self.ActiveDimensions = dim - self.id = unique_id - - def get_overlap_with(self, grid): - ''' - - Returns the overlapping region between two Layers, - or a layer and a grid. RE < LE means in any direction - means no overlap. - - ''' - LE = np.maximum(self.LeftEdge, grid.LeftEdge) - RE = np.minimum(self.RightEdge, grid.RightEdge) - return LE, RE - - def overlaps(self, grid): - ''' - - Returns whether or not this layer overlaps a given grid - - ''' - LE, RE = self.get_overlap_with(grid) - if np.any(RE <= LE): - return False - else: - return True - - -class RadMC3DWriter: - ''' - - This class provides a mechanism for writing out data files in a format - readable by radmc3d. Currently, only the ASCII, "Layer" style file format - is supported. For more information please see the radmc3d manual at: - http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d - - Parameters - ---------- - - ds : `Dataset` - This is the dataset object corresponding to the - simulation output to be written out. - - max_level : int - An int corresponding to the maximum number of levels of refinement - to include in the output. Often, this does not need to be very large - as information on very high levels is frequently unobservable. - Default = 2. - - Examples - -------- - - This will create a field called "DustDensity" and write it out to the - file "dust_density.inp" in a form readable by RadMC3D. - - >>> import yt - >>> from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter - - >>> dust_to_gas = 0.01 - >>> def _DustDensity(field, data): - ... return dust_to_gas*data["Density"] - >>> yt.add_field("DustDensity", function=_DustDensity) - - >>> ds = yt.load("galaxy0030/galaxy0030") - - >>> writer = RadMC3DWriter(ds) - >>> writer.write_amr_grid() - >>> writer.write_dust_file("DustDensity", "dust_density.inp") - - --- - - This example will create a field called "NumberDensityCO" and write it out - to the file "numberdens_co.inp". It will also write out information about - the gas velocity to "gas_velocity.inp" so that this broadening may be - included in the radiative transfer calculation by radmc3d: - - >>> import yt - >>> from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter - - >>> x_co = 1.0e-4 - >>> mu_h = yt.Quantity(2.34e-24, 'g') - >>> def _NumberDensityCO(field, data): - ... return (x_co/mu_h)*data["Density"] - >>> yt.add_field("NumberDensityCO", function=_NumberDensityCO) - - >>> ds = yt.load("galaxy0030/galaxy0030") - >>> writer = RadMC3DWriter(ds) - - >>> writer.write_amr_grid() - >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp") - >>> velocity_fields = ["velocity_x", "velocity_y", "velocity_z"] - >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") - - ''' - - def __init__(self, ds, max_level=2): - self.max_level = max_level - self.cell_count = 0 - self.layers = [] - self.domain_dimensions = ds.domain_dimensions - self.domain_left_edge = ds.domain_left_edge - self.domain_right_edge = ds.domain_right_edge - self.grid_filename = "amr_grid.inp" - self.ds = ds - - base_layer = RadMC3DLayer(0, None, 0, - self.domain_left_edge, - self.domain_right_edge, - self.domain_dimensions) - - self.layers.append(base_layer) - self.cell_count += np.product(ds.domain_dimensions) - - sorted_grids = sorted(ds.index.grids, key=lambda x: x.Level) - for grid in sorted_grids: - if grid.Level <= self.max_level: - self._add_grid_to_layers(grid) - - def _get_parents(self, grid): - parents = [] - for potential_parent in self.layers: - if potential_parent.level == grid.Level - 1: - if potential_parent.overlaps(grid): - parents.append(potential_parent) - return parents - - def _add_grid_to_layers(self, grid): - parents = self._get_parents(grid) - for parent in parents: - LE, RE = parent.get_overlap_with(grid) - N = (RE - LE) / grid.dds - N = np.array([int(n + 0.5) for n in N]) - new_layer = RadMC3DLayer(grid.Level, parent.id, - len(self.layers), - LE, RE, N) - self.layers.append(new_layer) - self.cell_count += np.product(N) - - def write_amr_grid(self): - ''' - This routine writes the "amr_grid.inp" file that describes the mesh - radmc3d will use. - - ''' - dims = self.domain_dimensions - LE = self.domain_left_edge - RE = self.domain_right_edge - - # RadMC-3D wants the cell wall positions in cgs. Convert here: - LE_cgs = LE.in_units('cm').d # don't write the units, though - RE_cgs = RE.in_units('cm').d - - # calculate cell wall positions - xs = [str(x) for x in np.linspace(LE_cgs[0], RE_cgs[0], dims[0]+1)] - ys = [str(y) for y in np.linspace(LE_cgs[1], RE_cgs[1], dims[1]+1)] - zs = [str(z) for z in np.linspace(LE_cgs[2], RE_cgs[2], dims[2]+1)] - - # writer file header - grid_file = open(self.grid_filename, 'w') - grid_file.write('1 \n') # iformat is always 1 - if self.max_level == 0: - grid_file.write('0 \n') - else: - grid_file.write('10 \n') # only layer-style files are supported - grid_file.write('1 \n') # only cartesian coordinates are supported - grid_file.write('0 \n') - grid_file.write('{} {} {} \n'.format(1, 1, 1)) # assume 3D - grid_file.write('{} {} {} \n'.format(dims[0], dims[1], dims[2])) - if self.max_level != 0: - s = str(self.max_level) + ' ' + str(len(self.layers)-1) + '\n' - grid_file.write(s) - - # write base grid cell wall positions - for x in xs: - grid_file.write(x + ' ') - grid_file.write('\n') - - for y in ys: - grid_file.write(y + ' ') - grid_file.write('\n') - - for z in zs: - grid_file.write(z + ' ') - grid_file.write('\n') - - # write information about fine layers, skipping the base layer: - for layer in self.layers[1:]: - p = layer.parent - dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions) - if p == 0: - ind = (layer.LeftEdge - LE) / (2.0*dds) + 1 - else: - parent_LE = np.zeros(3) - for potential_parent in self.layers: - if potential_parent.id == p: - parent_LE = potential_parent.LeftEdge - ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1 - ix = int(ind[0]+0.5) - iy = int(ind[1]+0.5) - iz = int(ind[2]+0.5) - nx, ny, nz = layer.ActiveDimensions / 2 - s = '{} {} {} {} {} {} {} \n' - s = s.format(p, ix, iy, iz, nx, ny, nz) - grid_file.write(s) - - grid_file.close() - - def _write_layer_data_to_file(self, fhandle, field, level, LE, dim): - cg = self.ds.covering_grid(level, LE, dim, num_ghost_zones=1) - if isinstance(field, list): - data_x = cg[field[0]] - data_y = cg[field[1]] - data_z = cg[field[2]] - write_3D_vector_array(data_x, data_y, data_z, fhandle) - else: - data = cg[field] - write_3D_array(data, fhandle) - - def write_dust_file(self, field, filename): - ''' - This method writes out fields in the format radmc3d needs to compute - thermal dust emission. In particular, if you have a field called - "DustDensity", you can write out a dust_density.inp file. - - Parameters - ---------- - - field : string - The name of the field to be written out - filename : string - The name of the file to write the data to. The filenames radmc3d - expects for its various modes of operations are described in the - radmc3d manual. - - ''' - fhandle = open(filename, 'w') - - # write header - fhandle.write('1 \n') - fhandle.write(str(self.cell_count) + ' \n') - fhandle.write('1 \n') - - # now write fine layers: - for layer in self.layers: - lev = layer.level - if lev == 0: - LE = self.domain_left_edge - N = self.domain_dimensions - else: - LE = layer.LeftEdge - N = layer.ActiveDimensions - - self._write_layer_data_to_file(fhandle, field, lev, LE, N) - - fhandle.close() - - def write_line_file(self, field, filename): - ''' - This method writes out fields in the format radmc3d needs to compute - line emission. - - Parameters - ---------- - - field : string or list of 3 strings - If a string, the name of the field to be written out. If a list, - three fields that will be written to the file as a vector quantity. - filename : string - The name of the file to write the data to. The filenames radmc3d - expects for its various modes of operation are described in the - radmc3d manual. - - ''' - fhandle = open(filename, 'w') - - # write header - fhandle.write('1 \n') - fhandle.write(str(self.cell_count) + ' \n') - - # now write fine layers: - for layer in self.layers: - lev = layer.level - if lev == 0: - LE = self.domain_left_edge - N = self.domain_dimensions - else: - LE = layer.LeftEdge - N = layer.ActiveDimensions - - self._write_layer_data_to_file(fhandle, field, lev, LE, N) - - fhandle.close() - - def write_source_files(self, sources, wavelengths): - ''' - - This function creates the stars.inp and wavelength_micron.inp - files that RadMC3D uses for its dust continuum calculations. - - Parameters - ---------- - - sources: a list of RadMC3DSource objects - A list that contains all the sources you would like yt - to create - wavelengths: np.array of float values - An array listing the wavelength points you would like to - use the radiative transfer calculation - - ''' - - nstars = len(sources) - nlam = len(wavelengths) - - filename = 'stars.inp' - fhandle = open(filename, 'w') - - # write header - fhandle.write('2 \n') # a format flag that should always be 2 - fhandle.write('%d %d \n' % (nstars, nlam)) - - # write source information - for source in sources: - fhandle.write(str(source.radius) + ' ') - fhandle.write(str(source.mass) + ' ') - fhandle.write('%f %f %f' %(source.position[0], \ - source.position[1], \ - source.position[2])) - fhandle.write('\n') - - # write wavelength information - for wavelength in wavelengths: - fhandle.write('%f \n' % wavelength) - - # finally write blackbody temperature for each source - for source in sources: - # the negative sign is a flag used internally - # by RadMC3D to indicate that this is a blackbody - # source - fhandle.write('%f \n' % -source.temperature) - - # done with stars.inp - fhandle.close() - - # now do the wavelength_micron.inp file - filename = 'wavelength_micron.inp' - fhandle = open(filename, 'w') - - fhandle.write('%d \n' % nlam) - for wavelength in wavelengths: - fhandle.write('%f \n' % wavelength) - - # done with both - fhandle.close() - - -class RadMC3DSource: - ''' - - A class that contains the data associated with a single RadMC3D photon source. - This is designed to help export data about the stars in a dataset into a format - that can be read in by RadMC3D. Although RadMC3D can handle non-blackbody - sources, here we assume that the source is a blackbody with a given temperature. - - Parameters - ---------- - - radius: float - The size of the source in cm - mass: float - The mass of the source in g - position: list of floats - The x, y, and z coordinates of the source, in cm - temperature: float - The blackbody temperature of the source, in K - - ''' - - def __init__(self, radius, mass, position, temperature): - self.radius = radius - self.mass = mass - self.position = position - self.temperature = temperature - - # some basic sanity checks - assert(self.radius > 0.0) - assert(self.mass > 0.0) - assert(self.temperature > 0) - assert(len(self.position) == 3) # 3D only, please diff --git a/yt/analysis_modules/radmc3d_export/api.py b/yt/analysis_modules/radmc3d_export/api.py index 0af8f9b9d6b..e1d0478f3ea 100644 --- a/yt/analysis_modules/radmc3d_export/api.py +++ b/yt/analysis_modules/radmc3d_export/api.py @@ -1,30 +1,7 @@ -""" -API for RadMC3D Export code +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the radmc3d_export module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .RadMC3DInterface import \ - RadMC3DWriter, \ - RadMC3DSource - -from .RadMC3DImageUtilities import \ - read_radmc3d_image +raise YTModuleRemoved( + "radmc3d_export", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/radmc3d_export/tests/__init__.py b/yt/analysis_modules/radmc3d_export/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py b/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py deleted file mode 100644 index df3d0acf68e..00000000000 --- a/yt/analysis_modules/radmc3d_export/tests/test_radmc3d_exporter.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Unit test for the RADMC3D Exporter analysis module -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import yt -from yt.testing import assert_allclose -from yt.analysis_modules.radmc3d_export.api import RadMC3DWriter -from yt.utilities.answer_testing.framework import \ - AnswerTestingTest, \ - requires_ds -import tempfile -import numpy as np -import os -import shutil - - -class RadMC3DValuesTest(AnswerTestingTest): - ''' - - This test writes out a "dust_density.inp" file, - reads it back in, and checks the sum of the - values for degradation. - - ''' - _type_name = "RadMC3DValuesTest" - _attrs = ("field", ) - - def __init__(self, ds_fn, field, decimals=10): - super(RadMC3DValuesTest, self).__init__(ds_fn) - self.field = field - self.decimals = decimals - - def run(self): - - # Set up in a temp dir - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - # try to write the output files - writer = RadMC3DWriter(self.ds) - writer.write_amr_grid() - writer.write_dust_file(self.field, "dust_density.inp") - - # compute the sum of the values in the resulting file - total = 0.0 - with open('dust_density.inp', 'r') as f: - for i, line in enumerate(f): - - # skip header - if i < 3: - continue - - line = line.rstrip() - total += np.float64(line) - - # clean up - os.chdir(curdir) - shutil.rmtree(tmpdir) - - return total - - def compare(self, new_result, old_result): - err_msg = "Total value for %s not equal." % (self.field,) - assert_allclose(new_result, old_result, 10.**(-self.decimals), - err_msg=err_msg, verbose=True) - - -ISO_GAL = "IsolatedGalaxy/galaxy0030/galaxy0030" - - -@requires_ds(ISO_GAL) -def test_radmc3d_exporter_continuum(): - """ - This test is simply following the description in the docs for how to - generate the necessary output files to run a continuum emission map from - dust for one of our sample datasets. - """ - - ds = yt.load(ISO_GAL) - - # Make up a dust density field where dust density is 1% of gas density - dust_to_gas = 0.01 - def _DustDensity(field, data): - return dust_to_gas * data["density"] - ds.add_field(("gas", "dust_density"), function=_DustDensity, units="g/cm**3") - - yield RadMC3DValuesTest(ds, ("gas", "dust_density")) diff --git a/yt/analysis_modules/spectral_integrator/api.py b/yt/analysis_modules/spectral_integrator/api.py index 64415d34095..77f0c089a9d 100644 --- a/yt/analysis_modules/spectral_integrator/api.py +++ b/yt/analysis_modules/spectral_integrator/api.py @@ -1,8 +1,4 @@ -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning("The spectral_integrator module is deprecated. " - "'add_xray_emissivity_field' can now be imported " - "from the yt module.") - -from yt.fields.xray_emission_fields import \ - add_xray_emissivity_field \ No newline at end of file +raise RuntimeError( + "The spectral_integrator module as been moved to yt.fields." + "'add_xray_emissivity_field' can now be imported " + "from the yt module.") diff --git a/yt/analysis_modules/star_analysis/api.py b/yt/analysis_modules/star_analysis/api.py index a0568b3f3e5..bc4d7cf5dff 100644 --- a/yt/analysis_modules/star_analysis/api.py +++ b/yt/analysis_modules/star_analysis/api.py @@ -1,26 +1,7 @@ -""" -API for star_analysis +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "The star_analysis module has been deprecated. This code has been moved " - "to the yt attic (https://github.com/yt-project/yt_attic) and will " - "be removed in a future release.") - -from .sfr_spectrum import \ - StarFormationRate, \ - SpectrumBuilder, \ - Zsun +raise YTModuleRemoved( + "star_analysis", + "https://github.com/yt-project/yt_attic", + "https://yt-attic.readthedocs.io/") diff --git a/yt/analysis_modules/star_analysis/sfr_spectrum.py b/yt/analysis_modules/star_analysis/sfr_spectrum.py deleted file mode 100644 index 856e385427b..00000000000 --- a/yt/analysis_modules/star_analysis/sfr_spectrum.py +++ /dev/null @@ -1,613 +0,0 @@ -""" -StarAnalysis - Functions to analyze stars. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import os -import numpy as np -from yt.utilities.on_demand_imports import _h5py as h5py -import math - -from yt.config import ytcfg -from yt.extern.six.moves import zip as izip -from yt.funcs import \ - get_pbar -from yt.units import \ - g, s, Zsun -from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.cosmology import \ - Cosmology -from yt.utilities.logger import ytLogger as mylog -from yt.utilities.physical_constants import \ - speed_of_light_cgs - - -class StarFormationRate(object): - - r"""Calculates the star formation rate for a given population of - star particles. - - Parameters - ---------- - ds : EnzoDataset object - data_source : AMRRegion object, optional - The region from which stars are extracted for analysis. If this - is not supplied, the next three must be, otherwise the next - three do not need to be specified. - star_mass : Ordered array or list of floats - The mass of the stars to be analyzed in units of Msun. - star_creation_time : Ordered array or list of floats - The creation time for the stars in code units. - volume : Float - The comoving volume of the region for the specified list of stars. - bins : Integer - The number of time bins used for binning the stars. Default = 300. - star_filter : A user-defined filtering rule for stars. - See: http://yt-project.org/docs/dev/analyzing/filtering.html - Default: ct>0 - - Examples - -------- - - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import StarFormationRate - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - >>> sfr = StarFormationRate(ds, sp) - """ - - def __init__(self, ds, data_source=None, star_mass=None, - star_creation_time=None, bins=300, volume=None, - star_filter=None): - self._ds = ds - self._data_source = data_source - self._filter = star_filter - self.ds_provided = self._data_source is not None - self.filter_provided = self._filter is not None - self.bin_count = bins - - # Set up for time conversion. - self.cosm = Cosmology( - hubble_constant=self._ds.hubble_constant, - omega_matter=self._ds.omega_matter, - omega_lambda=self._ds.omega_lambda) - # Find the time right now. - self.time_now = self._ds.current_time - - if not self.ds_provided: - # Check to make sure we have the right set of informations. - if star_mass is None or star_creation_time is None \ - or volume is None: - mylog.error(""" - If data_source is not provided, all of these parameters - need to be set: - star_mass (array, Msun), - star_creation_time (array, code units), - volume (float, cMpc**3).""") - return None - - if isinstance(star_mass, YTArray): - assert star_mass.units.same_dimensions_as(g.units) - elif star_mass is not None: - star_mass = YTArray(star_mass, 'Msun') - self.star_mass = star_mass - - if isinstance(star_creation_time, YTArray): - assert star_creation_time.units.same_dimensions_as(s.units) - elif star_creation_time is not None: - star_creation_time = self._ds.arr(star_creation_time, - 'code_time') - self.star_creation_time = star_creation_time - - if isinstance(volume, YTQuantity): - assert volume.units.same_dimensions_as( - self._ds.quan(1.0, 'Mpccm**3').units - ) - elif volume is not None: - volume = self._ds.quan(volume, 'Mpccm**3') - self.volume = volume - - # Build the distribution. - self.build_dist() - # Attach some convenience arrays. - self.attach_arrays() - - def build_dist(self): - """ - Build the data for plotting. - """ - # Pick out the stars. - if self.filter_provided: - ct = self._filter['creation_time'] - mass_stars = self._data_source[self._filter, "particle_mass"] - else: - if self.ds_provided: - ct = self._data_source['creation_time'] - if ct is None: - errmsg = 'data source must have particle_age!' - mylog.error(errmsg) - raise RuntimeError(errmsg) - mask = ct > 0 - if not any(mask): - errmsg = 'all particles have age < 0' - mylog.error(errmsg) - raise RuntimeError(errmsg) - # type = self._data_source['particle_type'] - ct_stars = ct[mask] - mass_stars = self._data_source[ - 'particle_mass'][mask].in_units('Msun') - del mask - else: - ct_stars = self.star_creation_time - mass_stars = self.star_mass - # Find the oldest stars in units of code time. - tmin = ct_stars.min().in_units("s") - # Multiply the end to prevent numerical issues. - self.time_bins = np.linspace( - tmin * 1.01, self._ds.current_time.in_units("s"), - num=self.bin_count + 1) - # Figure out which bins the stars go into. - inds = np.digitize(ct_stars.in_units("s"), self.time_bins) - 1 - # Sum up the stars created in each time bin. - self.mass_bins = YTArray( - np.zeros(self.bin_count + 1, dtype='float64'), "Msun" - ) - for index in np.unique(inds): - self.mass_bins[index] += (mass_stars[inds == index]).sum() - # We will want the time taken between bins. - self.time_bins_dt = self.time_bins[1:] - self.time_bins[:-1] - - def attach_arrays(self): - """ - Attach convenience arrays to the class for easy access. - """ - if self.ds_provided: - try: - vol = self._data_source[ - 'cell_volume'].in_units('Mpccm ** 3').sum() - except AttributeError: - # If we're here, this is probably a HOPHalo object, and we - # can get the volume this way. - ds = self._data_source.get_sphere() - vol = ds['cell_volume'].in_units('Mpccm ** 3').sum() - else: - vol = self.volume.in_units('Mpccm ** 3') - - # Use the center of the time_bin, not the left edge. - self.time = 0.5 * \ - (self.time_bins[1:] + self.time_bins[:-1]).in_units('yr') - self.lookback_time = self.time_now - self.time # now in code_time... - self.redshift = self.cosm.z_from_t(self.time) - - self.Msol_yr = ( - self.mass_bins[:-1] / self.time_bins_dt[:]).in_units('Msun/yr') - # changed vol from mpc to mpccm used in literature - self.Msol_yr_vol = self.Msol_yr / vol - - self.Msol = self.mass_bins[:-1].in_units("Msun") - self.Msol_cumulative = self.Msol.cumsum() - - def write_out(self, name="StarFormationRate.out"): - r"""Write out the star analysis to a text file *name*. The columns are in - order. - - The columns in the output file are: - 1. Time (yrs) - 2. Look-back time (yrs) - 3. Redshift - 4. Star formation rate in this bin per year (Msol/yr) - 5. Star formation rate in this bin per year - per Mpc**3 (Msol/yr/Mpc**3) - 6. Stars formed in this time bin (Msol) - 7. Cumulative stars formed up to this time bin (Msol) - - Parameters - ---------- - name : String - The name of the file to write to. Default = StarFormationRate.out. - - Examples - -------- - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import StarFormationRate - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - >>> sfr = StarFormationRate(ds, sp) - >>> sfr.write_out("stars-SFR.out") - """ - fp = open(name, "w") - fp.write( - "#time\tlookback\tredshift\tMsol/yr\tMsol/yr/Mpc3\tMsol\tcumMsol\t\n") - for i, time in enumerate(self.time): - line = "%1.5e %1.5e %1.5e %1.5e %1.5e %1.5e %1.5e\n" % \ - (time.in_units("yr"), # Time - self.lookback_time[i].in_units('yr'), # Lookback time - self.redshift[i], # Redshift - self.Msol_yr[i].in_units("Msun/yr"), - self.Msol_yr_vol[i], - self.Msol[i].in_units("Msun"), # Msol in bin - self.Msol_cumulative[i].in_units("Msun")) # cumulative - fp.write(line) - fp.close() - -# Begin Synthetic Spectrum Stuff. - -CHABRIER = { - "Z0001": "bc2003_hr_m22_chab_ssp.ised.h5", # /* 0.5% */ - "Z0004": "bc2003_hr_m32_chab_ssp.ised.h5", # /* 2% */ - "Z004": "bc2003_hr_m42_chab_ssp.ised.h5", # /* 20% */ - "Z008": "bc2003_hr_m52_chab_ssp.ised.h5", # /* 40% */ - "Z02": "bc2003_hr_m62_chab_ssp.ised.h5", # /* solar; 0.02 */ - "Z05": "bc2003_hr_m72_chab_ssp.ised.h5" # /* 250% */ -} - -SALPETER = { - "Z0001": "bc2003_hr_m22_salp_ssp.ised.h5", # /* 0.5% */ - "Z0004": "bc2003_hr_m32_salp_ssp.ised.h5", # /* 2% */ - "Z004": "bc2003_hr_m42_salp_ssp.ised.h5", # /* 20% */ - "Z008": "bc2003_hr_m52_salp_ssp.ised.h5", # /* 40% */ - "Z02": "bc2003_hr_m62_salp_ssp.ised.h5", # /* solar; 0.02 */ - "Z05": "bc2003_hr_m72_salp_ssp.ised.h5" # /* 250% */ -} - -# /* dividing line of metallicity; linear in log(Z/Zsun) */ -METAL1 = 0.01 # /* in units of Z/Zsun */ -METAL2 = 0.0632 -METAL3 = 0.2828 -METAL4 = 0.6325 -METAL5 = 1.5811 -METALS = np.array([METAL1, METAL2, METAL3, METAL4, METAL5]) - -# Translate METALS array digitize to the table dicts -MtoD = np.array(["Z0001", "Z0004", "Z004", "Z008", "Z02", "Z05"]) - -""" -This spectrum code is based on code from Ken Nagamine, converted from C to -Python. I've also reversed the order of elements in the flux arrays to be in -C-ordering, for faster memory access.""" - - -class SpectrumBuilder(object): - - r"""Initialize the data to build a summed flux spectrum for a - collection of stars using the models of Bruzual & Charlot (2003). - This function loads the necessary data tables into memory and - must be called before analyzing any star particles. - - Parameters - ---------- - ds : EnzoDataset object - bcdir : String - Path to directory containing Bruzual & Charlot h5 fit files. - model : String - Choice of Initial Metalicity Function model, 'chabrier' or - 'salpeter'. Default = 'chabrier'. - - Examples - -------- - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> spec = SpectrumBuilder(ds, "bc", model="salpeter") - """ - - def __init__(self, ds, bcdir="", model="chabrier", time_now=None, - star_filter=None): - self._ds = ds - if not os.path.isdir(bcdir): - bcdir = os.path.join(ytcfg.get("yt", "test_data_dir"), bcdir) - if not os.path.isdir(bcdir): - raise RuntimeError("Failed to locate %s" % bcdir) - self.bcdir = bcdir - self._filter = star_filter - self.filter_provided = self._filter is not None - if model == "chabrier": - self.model = CHABRIER - elif model == "salpeter": - self.model = SALPETER - # Set up for time conversion. - self.cosm = Cosmology( - hubble_constant=self._ds.hubble_constant, - omega_matter=self._ds.omega_matter, - omega_lambda=self._ds.omega_lambda) - # Find the time right now. - - if time_now is None: - self.time_now = self._ds.current_time - else: - self.time_now = time_now - - # Read the tables. - self.read_bclib() - - def read_bclib(self): - """ - Read in the age and wavelength bins, and the flux bins for each - metallicity. - """ - self.flux = {} - for file in self.model: - fname = self.bcdir + "/" + self.model[file] - fp = h5py.File(fname, mode='r') - self.age = YTArray(fp["agebins"][:], 'yr') # 1D floats - self.wavelength = fp["wavebins"][:] # 1D floats - self.flux[file] = fp["flam"][:, :] # 2D floats, [agebin, wavebin] - fp.close() - - def calculate_spectrum(self, data_source=None, star_mass=None, - star_creation_time=None, - star_metallicity_fraction=None, - star_metallicity_constant=None, - min_age=YTQuantity(0.0, 'yr')): - - r"""For the set of stars, calculate the collective spectrum. - Attached to the output are several useful objects: - - Attributes - ---------- - final_spec: array - The collective spectrum in units of flux binned in wavelength. - wavelength: array - The wavelength for the spectrum bins, in Angstroms. - total_mass: float - Total mass of all the stars. - avg_mass: float - Average mass of all the stars. - avg_metal: float - Average metallicity of all the stars. - - Parameters - ---------- - data_source : AMRRegion object, optional - The region from which stars are extracted for analysis. If this is - not specified, the next three parameters must be supplied. - star_mass : Array or list of floats - An array of star masses in Msun units. - star_creation_time : Array or list of floats - An array of star creation times in code units. - star_metallicity_fraction : Array or list of floats - An array of star metallicity fractions, in code - units (which is not Z/Zsun, rather just Z). - star_metallicity_constant : Float - If desired, override the star - metallicity fraction of all the stars to the given value. - min_age : Float - Removes young stars younger than this number (in years) - from the spectrum. Default: 0 (all stars). - - Examples - -------- - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> spec = SpectrumBuilder(ds, "bc", model="salpeter") - >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - >>> spec.calculate_spectrum(data_source=sp, min_age=1.e6) - """ - - # Initialize values - self.final_spec = np.zeros(self.wavelength.size, dtype='float64') - self._data_source = data_source - - if isinstance(star_mass, YTArray): - assert star_mass.units.same_dimensions_as(g.units) - elif star_mass is not None: - star_mass = YTArray(star_mass, 'Msun') - self.star_mass = star_mass - - if isinstance(star_creation_time, YTArray): - assert star_creation_time.units.same_dimensions_as(s.units) - elif star_creation_time is not None: - star_creation_time = self._ds.arr(star_creation_time, - 'code_time') - self.star_creation_time = star_creation_time - - if isinstance(star_metallicity_fraction, YTArray): - assert \ - star_metallicity_fraction.units.same_dimensions_as(Zsun.units) - elif star_metallicity_fraction is not None: - star_metallicity_fraction = self._ds.arr( - star_metallicity_fraction, 'code_metallicity' - ) - self.star_metallicity_fraction = star_metallicity_fraction - - if isinstance(min_age, YTQuantity): - assert min_age.units.same_dimensions_as(s.units) - elif min_age is not None: - min_age = YTQuantity(min_age, 'yr') - self.min_age = min_age - - # Check to make sure we have the right set of data. - if data_source is None: - if self.star_mass is None or self.star_creation_time is None or \ - (star_metallicity_fraction is None and - star_metallicity_constant is None): - mylog.error( - """ - If data_source is not provided, all of these parameters - need to be set: - star_mass (array, Msun), - star_creation_time (array, code units), - And one of: - star_metallicity_fraction (array, code units). - --OR-- - star_metallicity_constant (float, code units). - """) - return None - - if star_metallicity_fraction is not None: - self.star_metal = star_metallicity_fraction - else: - self.star_metal = \ - self._ds.arr(np.ones_like(self.star_mass) * - star_metallicity_constant, 'Zsun') - else: - # Get the data we need. - if self.filter_provided: - ct = self._filter['creation_time'] - # mass_stars = self._data_source[self._filter, "particle_mass"] - if star_metallicity_constant is None: - self.star_metal = self._data_source[ - self._filter, "metallicity_fraction"].in_units('Zsun') - else: - self.star_metal = \ - self._ds.arr(np.ones_like( - self._data_source[self._filter, - "metallicity_fraction"]) * - star_metallicity_constant, "Zsun") - else: - ct = self._data_source["creation_time"] - if ct is None: - errmsg = 'data source must have particle_age!' - mylog.error(errmsg) - raise RuntimeError(errmsg) - mask = ct > 0 - if not any(mask): - errmsg = 'all particles have age < 0' - mylog.error(errmsg) - raise RuntimeError(errmsg) - # type = self._data_source['particle_type'] - self.star_creation_time = ct[mask] - self.star_mass = self._data_source[ - 'particle_mass'][mask].in_units('Msun') - if star_metallicity_constant is not None: - self.star_metal = self._ds.arr( - np.ones_like(self.star_mass) * - star_metallicity_constant, 'Zsun') - else: - self.star_metal = self._data_source[ - "metallicity_fraction"][mask].in_units('Zsun') - # Age of star in years. - dt = (self.time_now - self.star_creation_time).in_units('yr') - dt[dt < 0.0] = 0.0 - # Remove young stars - sub = dt >= self.min_age - if len(sub) == 0: - return - self.star_metal = self.star_metal[sub] - dt = dt[sub] - self.star_creation_time = self.star_creation_time[sub] - # Figure out which METALS bin the star goes into. - Mindex = np.digitize(self.star_metal.in_units('Zsun'), METALS) - # Replace the indices with strings. - Mname = MtoD[Mindex] - # Figure out which age bin this star goes into. - Aindex = np.digitize(dt, self.age) - # Ratios used for the interpolation. - ratio1 = (dt - self.age[Aindex - 1]) / \ - (self.age[Aindex] - self.age[Aindex - 1]) - ratio2 = (self.age[Aindex] - dt) / \ - (self.age[Aindex] - self.age[Aindex - 1]) - # Sort the stars by metallicity and then by age, which should reduce - # memory access time by a little bit in the loop. - indexes = np.arange(self.star_metal.size) - sort = np.asarray([indexes[i] - for i in np.lexsort([indexes, Aindex, Mname])]) - Mname = Mname[sort] - Aindex = Aindex[sort] - ratio1 = ratio1[sort] - ratio2 = ratio2[sort] - self.star_mass = self.star_mass[sort] - self.star_creation_time = self.star_creation_time[sort] - self.star_metal = self.star_metal[sort] - - # Interpolate the flux for each star, adding to the total by weight. - pbar = get_pbar("Calculating fluxes", len(self.star_mass)) - for i, star in enumerate(izip(Mname, Aindex, ratio1, ratio2, - self.star_mass)): - # Pick the right age bin for the right flux array. - flux = self.flux[star[0]][star[1], :] - # Get the one just before the one above. - flux_1 = self.flux[star[0]][star[1] - 1, :] - # interpolate in log(flux), linear in time. - int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux) - # Add this flux to the total, weighted by mass. - self.final_spec += np.power(10., int_flux) * star[4] - pbar.update(i) - pbar.finish() - - # Normalize. - self.total_mass = self.star_mass.sum() - self.avg_mass = self.star_mass.mean() - tot_metal = (self.star_metal * self.star_mass).sum() - if tot_metal > 0: - self.avg_metal = math.log10( - (tot_metal / self.total_mass).in_units('Zsun')) - else: - self.avg_metal = -99 - - def write_out(self, name="sum_flux.out"): - r"""Write out the summed flux to a file. - - The output file from this function has two columns: Wavelength - (Angstrom) and Flux (Luminosity per unit wavelength, L_sun Ang^-1, - L_sun = 3.826 * 10^33 ergs s^-1.). - - Parameters - ---------- - name : String - Name of file to write to. Default = "sum_flux.out" - - Examples - -------- - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - >>> spec = SpectrumBuilder(ds, "bc", model="salpeter") - >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6) - >>> spec.write_out("spec.out") - """ - fp = open(name, 'w') - for i, wave in enumerate(self.wavelength): - fp.write("%1.5e\t%1.5e\n" % (wave, self.final_spec[i])) - fp.close() - - def write_out_SED(self, name="sum_SED.out", flux_norm=5200.): - r"""Write out the summed SED to a file. The file has two columns: - 1) Wavelength (Angstrom) - 2) Relative flux normalized to the flux at *flux_norm*. - It also will attach to the SpectrumBuilder object - an array *f_nu* which is the normalized flux, - identical to the disk output. - - Parameters - ---------- - name : String - Name of file to write to. Default = "sum_SED.out" - flux_norm : Float - Wavelength of the flux to normalize the distribution against. - Default = 5200 Ang. - - Examples - -------- - >>> import yt - >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder - >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006") - >>> spec = SpectrumBuilder(ds, "bc", model="salpeter") - >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1) - >>> spec.calculate_spectrum(data_source=sp, min_age = 1.e6) - >>> spec.write_out_SED(name = "SED.out", flux_norm = 6000.) - """ - # find the f_nu closest to flux_norm - fn_wavelength = np.argmin(abs(self.wavelength - flux_norm)) - f_nu = self.final_spec * np.power(self.wavelength, 2.) \ - / speed_of_light_cgs - # Normalize f_nu - self.f_nu = f_nu / f_nu[fn_wavelength] - # Write out. - fp = open(name, 'w') - for i, wave in enumerate(self.wavelength): - fp.write("%1.5e\t%1.5e\n" % (wave, self.f_nu[i])) - fp.close() diff --git a/yt/analysis_modules/sunrise_export/api.py b/yt/analysis_modules/sunrise_export/api.py index 222e97851b3..2e69ffab1c3 100644 --- a/yt/analysis_modules/sunrise_export/api.py +++ b/yt/analysis_modules/sunrise_export/api.py @@ -1,24 +1,7 @@ -""" -API for Sunrise Export code +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "The sunrise_exporter module has been deprecated. This code has been " - "moved to the yt attic (https://github.com/yt-project/yt_attic) and " - "will be removed in a future release.") - -from .sunrise_exporter import \ - export_to_sunrise +raise YTModuleRemoved( + "sunrise_export", + "https://github.com/yt-project/yt_attic", + "https://yt-attic.readthedocs.io/") diff --git a/yt/analysis_modules/sunrise_export/sunrise_exporter.py b/yt/analysis_modules/sunrise_export/sunrise_exporter.py deleted file mode 100644 index b3f55528ebc..00000000000 --- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py +++ /dev/null @@ -1,659 +0,0 @@ -""" -Code to export from yt to Sunrise - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import os -import time -import numpy as np - -import yt.utilities.lib.api as amr_utils -from yt.utilities.on_demand_imports import _astropy as astropy - -from yt import add_field -from yt.funcs import get_pbar, mylog -from yt.utilities.physical_ratios import \ - kpc_per_cm, \ - sec_per_year - -def export_to_sunrise(ds, fn, star_particle_type, fc, fwidth, ncells_wide=None, - debug=False,dd=None,**kwargs): - r"""Convert the contents of a dataset to a FITS file format that Sunrise - understands. - - This function will accept a dataset, and from that dataset - construct a depth-first octree containing all of the data in the parameter - file. This octree will be written to a FITS file. It will probably be - quite big, so use this function with caution! Sunrise is a tool for - generating synthetic spectra, available at - https://bitbucket.org/lutorm/sunrise/ . - - Parameters - ---------- - ds : `Dataset` - The dataset to convert. - fn : string - The filename of the output FITS file. - fc : array - The center of the extraction region - fwidth : array - Ensure this radius around the center is enclosed - Array format is (nx,ny,nz) where each element is floating point - in unitary position units where 0 is leftmost edge and 1 - the rightmost. - - Notes - ----- - - Note that the process of generating simulated images from Sunrise will - require substantial user input; see the Sunrise wiki at - https://bitbucket.org/lutorm/sunrise/ for more information. - - """ - fc = np.array(fc) - fwidth = np.array(fwidth) - - #we must round the dle,dre to the nearest root grid cells - ile,ire,super_level,ncells_wide= \ - round_ncells_wide(ds.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide) - - assert np.all((ile-ire)==(ile-ire)[0]) - mylog.info("rounding specified region:") - mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth))) - mylog.info("to [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire))) - fle,fre = ile*1.0/ds.domain_dimensions, ire*1.0/ds.domain_dimensions - mylog.info("to [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre))) - - #Create a list of the star particle properties in PARTICLE_DATA - #Include ID, parent-ID, position, velocity, creation_mass, - #formation_time, mass, age_m, age_l, metallicity, L_bol - particle_data,nstars = prepare_star_particles(ds,star_particle_type,fle=fle,fre=fre, - dd=dd,**kwargs) - - #Create the refinement hilbert octree in GRIDSTRUCTURE - #For every leaf (not-refined) cell we have a column n GRIDDATA - #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR - #since the octree always starts with one cell, an our 0-level mesh - #may have many cells, we must create the octree region sitting - #ontop of the first mesh by providing a negative level - output, refinement,dd,nleaf = prepare_octree(ds,ile,start_level=super_level, - debug=debug,dd=dd,center=fc) - - create_fits_file(ds,fn, refinement,output,particle_data,fle,fre) - - return fle,fre,ile,ire,dd,nleaf,nstars - -def export_to_sunrise_from_halolist(ds,fni,star_particle_type, - halo_list,domains_list=None,**kwargs): - """ - Using the center of mass and the virial radius - for a halo, calculate the regions to extract for sunrise. - The regions are defined on the root grid, and so individual - octs may span a large range encompassing many halos - and subhalos. Instead of repeating the oct extraction for each - halo, arrange halos such that we only calculate what we need to. - - Parameters - ---------- - ds : `Dataset` - The dataset to convert. We use the root grid to specify the domain. - fni : string - The filename of the output FITS file, but depends on the domain. The - dle and dre are appended to the name. - particle_type : int - The particle index for stars - halo_list : list of halo objects - The halo list objects must have halo.CoM and halo.Rvir, - both of which are assumed to be in unitary length units. - frvir (optional) : float - Ensure that CoM +/- frvir*Rvir is contained within each domain - domains_list (optional): dict of halos - Organize halos into a dict of domains. Keys are DLE/DRE tuple - values are a list of halos - """ - dn = ds.domain_dimensions - if domains_list is None: - domains_list = domains_from_halos(ds,halo_list,**kwargs) - if fni.endswith('.fits'): - fni = fni.replace('.fits','') - - for (num_halos, domain, halos) in domains_list: - dle,dre = domain - print('exporting: ') - print("[%03i %03i %03i] -"%tuple(dle), end=' ') - print("[%03i %03i %03i] "%tuple(dre), end=' ') - print(" with %i halos"%num_halos) - dle,dre = domain - dle, dre = np.array(dle),np.array(dre) - fn = fni - fn += "%03i_%03i_%03i-"%tuple(dle) - fn += "%03i_%03i_%03i"%tuple(dre) - fnf = fn + '.fits' - fnt = fn + '.halos' - if os.path.exists(fnt): - os.remove(fnt) - fh = open(fnt,'w') - for halo in halos: - fh.write("%i "%halo.ID) - fh.write("%6.6e "%(halo.CoM[0]*ds['kpc'])) - fh.write("%6.6e "%(halo.CoM[1]*ds['kpc'])) - fh.write("%6.6e "%(halo.CoM[2]*ds['kpc'])) - fh.write("%6.6e "%(halo.Mvir)) - fh.write("%6.6e \n"%(halo.Rvir*ds['kpc'])) - fh.close() - export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn) - -def domains_from_halos(ds,halo_list,frvir=0.15): - domains = {} - dn = ds.domain_dimensions - for halo in halo_list: - fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir - dle,dre = np.floor(fle*dn), np.ceil(fre*dn) - dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int')) - if (dle,dre) in domains.keys(): - domains[(dle,dre)] += halo, - else: - domains[(dle,dre)] = [halo,] - #for niceness, let's process the domains in order of - #the one with the most halos - domains_list = [(len(v),k,v) for k,v in domains.items()] - domains_list.sort() - domains_list.reverse() #we want the most populated domains first - return domains_list - -def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None): - if dd is None: - #we keep passing dd around to not regenerate the data all the time - dd = ds.all_data() - try: - dd['MetalMass'] - except KeyError: - add_fields() #add the metal mass field that sunrise wants - def _temp_times_mass(field, data): - return data["Temperature"]*data["CellMassMsun"] - add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass) - fields = ["CellMassMsun","TemperatureTimesCellMassMsun", - "MetalMass","CellVolumeCode"] - - #gather the field data from octs - pbar = get_pbar("Retrieving field data",len(fields)) - field_data = [] - for fi,f in enumerate(fields): - field_data += dd[f], - pbar.update(fi) - pbar.finish() - del field_data - - #first we cast every cell as an oct - #ngrids = np.max([g.id for g in ds._grids]) - grids = {} - levels_all = {} - levels_finest = {} - for l in range(100): - levels_finest[l]=0 - levels_all[l]=0 - pbar = get_pbar("Initializing octs ",len(ds.index.grids)) - for gi,g in enumerate(ds.index.grids): - ff = np.array([g[f] for f in fields]) - og = amr_utils.OctreeGrid( - g.child_index_mask.astype('int32'), - ff.astype("float64"), - g.LeftEdge.astype("float64"), - g.ActiveDimensions.astype("int32"), - np.ones(1,dtype="float64")*g.dds[0], - g.Level, - g.id) - grids[g.id] = og - #how many refinement cells will we have? - #measure the 'volume' of each mesh, but many - #cells do not exist. an overestimate - levels_all[g.Level] += g.ActiveDimensions.prod() - #how many leaves do we have? - #this overestimates. a child of -1 means no child, - #but that cell may still be expanded on a submesh because - #(at least in ART) the meshes are inefficient. - g.clear_data() - pbar.update(gi) - pbar.finish() - - #create the octree grid list - #oct_list = amr_utils.OctreeGridList(grids) - - #initialize arrays to be passed to the recursion algo - o_length = np.sum(levels_all.values()) - r_length = np.sum(levels_all.values()) - output = np.zeros((o_length,len(fields)), dtype='float64') - refined = np.zeros(r_length, dtype='int32') - levels = np.zeros(r_length, dtype='int32') - ids = np.zeros(r_length, dtype='int32') - pos = position() - hs = hilbert_state() - start_time = time.time() - if debug: - printing = lambda x: print_oct(x) - else: - printing = None - pbar = get_pbar("Building Hilbert DFO octree",len(refined)) - RecurseOctreeDepthFirstHilbert( - ile, - pos, - grids[0], #we always start on the root grid - hs, - output,refined,levels, - grids, - start_level, - ids, - debug=printing, - tracker=pbar) - pbar.finish() - #by time we get it here the 'current' position is actually - #for the next spot, so we're off by 1 - print('took %1.2e seconds'%(time.time()-start_time)) - print('refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos)) - print('first few entries :',refined[:12]) - output = output[:pos.output_pos] - refined = refined[:pos.refined_pos] - levels = levels[:pos.refined_pos] - return output,refined,dd,pos.refined_pos - -def print_oct(data,nd=None,nc=None): - ci = data['cell_index'] - l = data['level'] - g = data['grid'] - o = g.offset - fle = g.left_edges+g.dx*ci - fre = g.left_edges+g.dx*(ci+1) - if nd is not None: - fle *= nd - fre *= nd - if nc is not None: - fle -= nc - fre -= nc - txt = '%+1i ' - txt += '%+1i ' - txt += '%+1.3f '*3+'- ' - txt += '%+1.3f '*3 - if l<2: - print(txt%((l,)+(o,)+tuple(fle)+tuple(fre))) - -def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index] - pos, #the output hydro data position and refinement position - grid, #grid that this oct lives on (not its children) - hilbert, #the hilbert state - output, #holds the hydro data - refined, #holds the refinement status of Octs, 0s and 1s - levels, #For a given Oct, what is the level - grids, #list of all patch grids available to us - level, #starting level of the oct (not the children) - ids, #record the oct ID - debug=None,tracker=True): - if tracker is not None: - if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos) - if debug is not None: - debug(vars()) - child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]] - #record the refinement state - levels[pos.refined_pos] = level - is_leaf = (child_grid_index==-1) and (level>0) - refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf - ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf - pos.refined_pos+= 1 - if is_leaf: #never subdivide if we are on a superlevel - #then we have hit a leaf cell; write it out - for field_index in range(grid.fields.shape[0]): - output[pos.output_pos,field_index] = \ - grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]] - pos.output_pos+= 1 - else: - assert child_grid_index>-1 - #find the grid we descend into - #then find the eight cells we break up into - subgrid = grids[child_grid_index] - #calculate the floating point LE of the children - #then translate onto the subgrid integer index - parent_fle = grid.left_edges + cell_index*grid.dx - subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx) - for (vertex, hilbert_child) in hilbert: - #vertex is a combination of three 0s and 1s to - #denote each of the 8 octs - if level < 0: - subgrid = grid #we don't actually descend if we're a superlevel - #child_ile = cell_index + np.array(vertex)*2**(-level) - child_ile = cell_index + np.array(vertex)*2**(-(level+1)) - child_ile = child_ile.astype('int') - else: - child_ile = subgrid_ile+np.array(vertex) - child_ile = child_ile.astype('int') - - RecurseOctreeDepthFirstHilbert(child_ile,pos, - subgrid,hilbert_child,output,refined,levels,grids, - level+1,ids = ids, - debug=debug,tracker=tracker) - - - -def create_fits_file(ds,fn, refined,output,particle_data,fle,fre): - #first create the grid structure - pyfits = astropy.pyfits - structure = pyfits.Column("structure", format="B", array=refined.astype("bool")) - cols = pyfits.ColDefs([structure]) - st_table = pyfits.new_table(cols) - st_table.name = "GRIDSTRUCTURE" - st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid") - fdx = fre-fle - for i,a in enumerate('xyz'): - st_table.header.update("min%s" % a, fle[i] * ds['kpc']) - st_table.header.update("max%s" % a, fre[i] * ds['kpc']) - st_table.header.update("n%s" % a, fdx[i]) - st_table.header.update("subdiv%s" % a, 2) - st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision") - - #not the hydro grid data - fields = ["CellMassMsun","TemperatureTimesCellMassMsun", - "MetalMass","CellVolumeCode"] - fd = {} - for i,f in enumerate(fields): - fd[f]=output[:,i] - del output - col_list = [] - size = fd["CellMassMsun"].size - tm = fd["CellMassMsun"].sum() - col_list.append(pyfits.Column("mass_gas", format='D', - array=fd['CellMassMsun'], unit="Msun")) - col_list.append(pyfits.Column("mass_metals", format='D', - array=fd['MetalMass'], unit="Msun")) - # col_list.append(pyfits.Column("mass_stars", format='D', - # array=np.zeros(size,dtype='D'),unit="Msun")) - # col_list.append(pyfits.Column("mass_stellar_metals", format='D', - # array=np.zeros(size,dtype='D'),unit="Msun")) - # col_list.append(pyfits.Column("age_m", format='D', - # array=np.zeros(size,dtype='D'),unit="yr*Msun")) - # col_list.append(pyfits.Column("age_l", format='D', - # array=np.zeros(size,dtype='D'),unit="yr*Msun")) - # col_list.append(pyfits.Column("L_bol", format='D', - # array=np.zeros(size,dtype='D'))) - # col_list.append(pyfits.Column("L_lambda", format='D', - # array=np.zeros(size,dtype='D'))) - # The units for gas_temp are really K*Msun. For older Sunrise versions - # you must set the unit to just K - col_list.append(pyfits.Column("gas_temp_m", format='D', - array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun")) - col_list.append(pyfits.Column("gas_teff_m", format='D', - array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun")) - col_list.append(pyfits.Column("cell_volume", format='D', - array=fd['CellVolumeCode'].astype('float64')*ds['kpc']**3.0, - unit="kpc^3")) - col_list.append(pyfits.Column("SFR", format='D', - array=np.zeros(size, dtype='D'))) - cols = pyfits.ColDefs(col_list) - mg_table = pyfits.new_table(cols) - mg_table.header.update("M_g_tot", tm) - mg_table.header.update("timeunit", "yr") - mg_table.header.update("tempunit", "K") - mg_table.name = "GRIDDATA" - - # Add a dummy Primary; might be a better way to do this! - col_list = [pyfits.Column("dummy", format="F", array=np.zeros(1, dtype='float32'))] - cols = pyfits.ColDefs(col_list) - md_table = pyfits.new_table(cols) - md_table.header.update("snaptime", ds.current_time*ds['years']) - md_table.name = "YT" - - phdu = pyfits.PrimaryHDU() - phdu.header.update('nbodycod','yt') - hls = [phdu, st_table, mg_table,md_table] - hls.append(particle_data) - hdus = pyfits.HDUList(hls) - hdus.writeto(fn, overwrite=True) - -def nearest_power(x): - #round to the nearest power of 2 - x-=1 - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - x+=1 - return x - -def round_ncells_wide(dds,fle,fre,nwide=None): - fc = (fle+fre)/2.0 - assert np.all(fle < fc) - assert np.all(fre > fc) - ic = np.rint(fc*dds) #nearest vertex to the center - ile,ire = ic.astype('int'),ic.astype('int') - cfle,cfre = fc.copy(),fc.copy() - idx = np.array([0,0,0]) #just a random non-equal array - width = 0.0 - if nwide is None: - #expand until borders are included and - #we have an equally-sized, non-zero box - idxq,out=False,True - while not out or not idxq: - cfle,cfre = fc-width, fc+width - ile = np.rint(cfle*dds).astype('int') - ire = np.rint(cfre*dds).astype('int') - idx = ire-ile - width += 0.1/dds - #quit if idxq is true: - idxq = idx[0]>0 and np.all(idx==idx[0]) - out = np.all(fle>cfle) and np.all(fre0 - maxlevel = -np.rint(np.log2(nwide)).astype('int') - assert abs(np.log2(nwide)-np.rint(np.log2(nwide)))<1e-5 #nwide should be a power of 2 - return ile,ire,maxlevel,nwide - -def round_nearest_edge(ds,fle,fre): - dds = ds.domain_dimensions - ile = np.floor(fle*dds).astype('int') - ire = np.ceil(fre*dds).astype('int') - - #this is the number of cells the super octree needs to expand to - #must round to the nearest power of 2 - width = np.max(ire-ile) - width = nearest_power(width) - - maxlevel = -np.rint(np.log2(width)).astype('int') - return ile,ire,maxlevel - -def prepare_star_particles(ds,star_type,pos=None,vel=None, age=None, - creation_time=None,initial_mass=None, - current_mass=None,metallicity=None, - radius = None, - fle=[0.,0.,0.],fre=[1.,1.,1.], - dd=None): - pyfits = astropy.pyfits - if dd is None: - dd = ds.all_data() - idxst = dd["particle_type"] == star_type - - #make sure we select more than a single particle - assert np.sum(idxst)>0 - if pos is None: - pos = np.array([dd["particle_position_%s" % ax] - for ax in 'xyz']).transpose() - idx = idxst & np.all(pos>fle,axis=1) & np.all(pos0 - pos = pos[idx]*ds['kpc'] #unitary units -> kpc - if age is None: - age = dd["particle_age"][idx]*ds['years'] # seconds->years - if vel is None: - vel = np.array([dd["particle_velocity_%s" % ax][idx] - for ax in 'xyz']).transpose() - # Velocity is cm/s, we want it to be kpc/yr - #vel *= (ds["kpc"]/ds["cm"]) / (365*24*3600.) - vel *= kpc_per_cm * sec_per_year - if initial_mass is None: - #in solar masses - initial_mass = dd["particle_mass_initial"][idx]*ds['Msun'] - if current_mass is None: - #in solar masses - current_mass = dd["particle_mass"][idx]*ds['Msun'] - if metallicity is None: - #this should be in dimensionless units, metals mass / particle mass - metallicity = dd["particle_metallicity"][idx] - assert np.all(metallicity>0.0) - if radius is None: - radius = initial_mass*0.0+10.0/1000.0 #10pc radius - formation_time = ds.current_time*ds['years']-age - #create every column - col_list = [] - col_list.append(pyfits.Column("ID", format="J", array=np.arange(current_mass.size).astype('int32'))) - col_list.append(pyfits.Column("parent_ID", format="J", array=np.arange(current_mass.size).astype('int32'))) - col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc")) - col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr")) - col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun")) - col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr")) - col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc")) - col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun")) - col_list.append(pyfits.Column("age", format="D", array=age,unit='yr')) - #For particles, Sunrise takes - #the dimensionless metallicity, not the mass of the metals - col_list.append(pyfits.Column("metallicity", format="D", - array=metallicity,unit="Msun")) - - #make the table - cols = pyfits.ColDefs(col_list) - pd_table = pyfits.new_table(cols) - pd_table.name = "PARTICLEDATA" - - #make sure we have nonzero particle number - assert pd_table.data.shape[0]>0 - return pd_table,np.sum(idx) - - -def add_fields(): - """Add three Eulerian fields Sunrise uses""" - def _MetalMass(field, data): - return data["Metallicity"] * data["CellMassMsun"] - - def _convMetalMass(data): - return 1.0 - add_field("MetalMass", function=_MetalMass, - convert_function=_convMetalMass) - def _initial_mass_cen_ostriker(field, data): - # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm - # Check Grid_AddToDiskProfile.C and star_maker7.src - star_mass_ejection_fraction = data.ds.get_parameter("StarMassEjectionFraction",float) - xv1 = ((data.ds["InitialTime"] - data["creation_time"]) - / data["dynamical_time"]) - denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*np.exp(-xv1))) - minitial = data["ParticleMassMsun"] / denom - return minitial - - add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker) - - -class position: - def __init__(self): - self.output_pos = 0 - self.refined_pos = 0 - -class hilbert_state(): - def __init__(self,dim=None,sgn=None,octant=None): - if dim is None: dim = [0,1,2] - if sgn is None: sgn = [1,1,1] - if octant is None: octant = 5 - self.dim = dim - self.sgn = sgn - self.octant = octant - def flip(self,i): - self.sgn[i]*=-1 - def swap(self,i,j): - temp = self.dim[i] - self.dim[i]=self.dim[j] - self.dim[j]=temp - axis = self.sgn[i] - self.sgn[i] = self.sgn[j] - self.sgn[j] = axis - def reorder(self,i,j,k): - ndim = [self.dim[i],self.dim[j],self.dim[k]] - nsgn = [self.sgn[i],self.sgn[j],self.sgn[k]] - self.dim = ndim - self.sgn = nsgn - def copy(self): - return hilbert_state([self.dim[0],self.dim[1],self.dim[2]], - [self.sgn[0],self.sgn[1],self.sgn[2]], - self.octant) - def descend(self,o): - child = self.copy() - child.octant = o - if o==0: - child.swap(0,2) - elif o==1: - child.swap(1,2) - elif o==2: - pass - elif o==3: - child.flip(0) - child.flip(2) - child.reorder(2,0,1) - elif o==4: - child.flip(0) - child.flip(1) - child.reorder(2,0,1) - elif o==5: - pass - elif o==6: - child.flip(1) - child.flip(2) - child.swap(1,2) - elif o==7: - child.flip(0) - child.flip(2) - child.swap(0,2) - return child - - def __iter__(self): - vertex = [0,0,0] - j=0 - for i in range(3): - vertex[self.dim[i]] = 0 if self.sgn[i]>0 else 1 - yield vertex, self.descend(j) - vertex[self.dim[0]] += self.sgn[0] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[1]] += self.sgn[1] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[0]] -= self.sgn[0] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[2]] += self.sgn[2] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[0]] += self.sgn[0] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[1]] -= self.sgn[1] - j+=1 - yield vertex, self.descend(j) - vertex[self.dim[0]] -= self.sgn[0] - j+=1 - yield vertex, self.descend(j) - diff --git a/yt/analysis_modules/sunyaev_zeldovich/__init__.py b/yt/analysis_modules/sunyaev_zeldovich/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/sunyaev_zeldovich/api.py b/yt/analysis_modules/sunyaev_zeldovich/api.py index 8461b1990f3..e32ec19a91b 100644 --- a/yt/analysis_modules/sunyaev_zeldovich/api.py +++ b/yt/analysis_modules/sunyaev_zeldovich/api.py @@ -1,21 +1,7 @@ -""" -API for sunyaev_zeldovich -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +from yt.utilities.exceptions import \ + YTModuleRemoved -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "Development of the SZProjection module has been moved to " - "the yt_astro_analysis package. This version is deprecated " - "and will be removed from yt in a future release. See " - "https://github.com/yt-project/yt_astro_analysis for further " - "information.") - -from .projection import SZProjection +raise YTModuleRemoved( + "sunyaev_zeldovich", + "https://github.com/yt-project/yt_astro_analysis", + "https://yt-astro-analysis.readthedocs.io/") diff --git a/yt/analysis_modules/sunyaev_zeldovich/projection.py b/yt/analysis_modules/sunyaev_zeldovich/projection.py deleted file mode 100644 index 9d901d25393..00000000000 --- a/yt/analysis_modules/sunyaev_zeldovich/projection.py +++ /dev/null @@ -1,510 +0,0 @@ -""" -Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least -version 1.1.1) to be downloaded and installed: - -http://www.jb.man.ac.uk/~jchluba/Science/SZpack/SZpack.html - -For details on the computations involved please refer to the following references: - -Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778 -Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206 -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.config import \ - ytcfg -from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb -from yt.funcs import fix_axis, get_pbar -from yt.visualization.volume_rendering.off_axis_projection import \ - off_axis_projection -from yt.utilities.parallel_tools.parallel_analysis_interface import \ - communication_system, parallel_root_only -from yt import units -from yt.utilities.on_demand_imports import _astropy - -import numpy as np - -I0 = (2*(kboltz*Tcmb)**3/((hcgs*clight)**2)/units.sr).in_units("MJy/steradian") - -try: - import SZpack -except ImportError: - pass - -vlist = "xyz" -def setup_sunyaev_zeldovich_fields(ds): - def _t_squared(field, data): - return data["gas","density"]*data["gas","kT"]*data["gas","kT"] - ds.add_field(("gas", "t_squared"), function = _t_squared, - units="g*keV**2/cm**3") - - def _beta_par_squared(field, data): - return data["gas","beta_par"]**2/data["gas","density"] - ds.add_field(("gas","beta_par_squared"), function = _beta_par_squared, - units="g/cm**3") - - def _beta_perp_squared(field, data): - return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"] - ds.add_field(("gas","beta_perp_squared"), function = _beta_perp_squared, - units="g/cm**3") - - def _t_beta_par(field, data): - return data["gas","kT"]*data["gas","beta_par"] - ds.add_field(("gas","t_beta_par"), function = _t_beta_par, - units="keV*g/cm**3") - - def _t_sz(field, data): - return data["gas","density"]*data["gas","kT"] - ds.add_field(("gas","t_sz"), function = _t_sz, - units="keV*g/cm**3") - -def generate_beta_par(L): - def _beta_par(field, data): - vpar = data["density"]*(data["velocity_x"]*L[0]+ - data["velocity_y"]*L[1]+ - data["velocity_z"]*L[2]) - return vpar/clight - return _beta_par - -class SZProjection(object): - r""" Initialize a SZProjection object. - - Parameters - ---------- - ds : ~yt.data_objects.static_output.Dataset - The dataset - freqs : array_like - The frequencies (in GHz) at which to compute the SZ spectral distortion. - mue : float, optional - Mean molecular weight for determining the electron number density. - high_order : boolean, optional - Should we calculate high-order moments of velocity and temperature? - - Examples - -------- - >>> freqs = [90., 180., 240.] - >>> szprj = SZProjection(ds, freqs, high_order=True) - """ - def __init__(self, ds, freqs, mue=1.143, high_order=False): - - self.ds = ds - self.num_freqs = len(freqs) - self.high_order = high_order - self.freqs = ds.arr(freqs, "GHz") - self.mueinv = 1./mue - self.xinit = hcgs*self.freqs.in_units("Hz")/(kboltz*Tcmb) - self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs] - self.data = {} - - self.display_names = {} - self.display_names["TeSZ"] = r"$\mathrm{T_e}$" - self.display_names["Tau"] = r"$\mathrm{\tau}$" - - for f, field in zip(self.freqs, self.freq_fields): - self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % int(f) - - def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None): - r""" Make an on-axis projection of the SZ signal. - - Parameters - ---------- - axis : integer or string - The axis of the simulation domain along which to make the SZprojection. - center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set to - 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") or - ("max","dark_matter_density"). Units can be specified by passing in *center* - as a tuple containing a coordinate and string unit name or by passing - in a YTArray. If a list or unitless array is supplied, code units are - assumed. - width : tuple or a float. - Width can have four different formats to support windows with variable - x and y widths. They are: - - ================================== ======================= - format example - ================================== ======================= - (float, string) (10,'kpc') - ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) - float 0.2 - (float, float) (0.2, 0.3) - ================================== ======================= - - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 - kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. - nx : integer, optional - The dimensions on a side of the projection image. - source : yt.data_objects.data_containers.YTSelectionContainer, optional - If specified, this will be the data source used for selecting regions to project. - - Examples - -------- - >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere) - """ - axis = fix_axis(axis, self.ds) - ctr, dctr = self.ds.coordinates.sanitize_center(center, axis) - width = self.ds.coordinates.sanitize_width(axis, width, None) - - L = np.zeros(3) - L[axis] = 1.0 - - beta_par = generate_beta_par(L) - self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3") - setup_sunyaev_zeldovich_fields(self.ds) - proj = self.ds.proj("density", axis, center=ctr, data_source=source) - frb = proj.to_frb(width[0], nx, height=width[1]) - dens = frb["density"] - Te = frb["t_sz"]/dens - bpar = frb["beta_par"]/dens - omega1 = frb["t_squared"]/dens/(Te*Te) - 1. - bperp2 = np.zeros((nx,nx)) - sigma1 = np.zeros((nx,nx)) - kappa1 = np.zeros((nx,nx)) - if self.high_order: - bperp2 = frb["beta_perp_squared"]/dens - sigma1 = frb["t_beta_par"]/dens/Te - bpar - kappa1 = frb["beta_par_squared"]/dens - bpar*bpar - tau = sigma_thompson*dens*self.mueinv/mh - - nx,ny = frb.buff_size - self.bounds = frb.bounds - self.dx = (frb.bounds[1]-frb.bounds[0])/nx - self.dy = (frb.bounds[3]-frb.bounds[2])/ny - self.nx = nx - - self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar), - np.array(omega1), np.array(sigma1), - np.array(kappa1), np.array(bperp2)) - - self.ds.field_info.pop(("gas","beta_par")) - - def off_axis(self, L, center="c", width=(1.0, "unitary"), depth=(1.0,"unitary"), - nx=800, nz=800, north_vector=None, no_ghost=False, source=None): - r""" Make an off-axis projection of the SZ signal. - - Parameters - ---------- - L : array_like - The normal vector of the projection. - center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set to - 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") or - ("max","dark_matter_density"). Units can be specified by passing in *center* - as a tuple containing a coordinate and string unit name or by passing - in a YTArray. If a list or unitless array is supplied, code units are - assumed. - width : tuple or a float. - Width can have four different formats to support windows with variable - x and y widths. They are: - - ================================== ======================= - format example - ================================== ======================= - (float, string) (10,'kpc') - ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) - float 0.2 - (float, float) (0.2, 0.3) - ================================== ======================= - - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 - kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. - depth : A tuple or a float - A tuple containing the depth to project through and the string - key of the unit: (width, 'unit'). If set to a float, code units - are assumed - nx : integer, optional - The dimensions on a side of the projection image. - nz : integer, optional - Deprecated, this is still in the function signature for API - compatibility - north_vector : a sequence of floats - A vector defining the 'up' direction in the plot. This - option sets the orientation of the slicing plane. If not - set, an arbitrary grid-aligned north-vector is chosen. - no_ghost: bool, optional - Optimization option for off-axis cases. If True, homogenized bricks will - extrapolate out from grid instead of interpolating from - ghost zones that have to first be calculated. This can - lead to large speed improvements, but at a loss of - accuracy/smoothness in resulting image. The effects are - less notable when the transfer function is smooth and - broad. Default: True - source : yt.data_objects.data_containers.YTSelectionContainer, optional - If specified, this will be the data source used for selecting regions - to project. - - Examples - -------- - >>> L = np.array([0.5, 1.0, 0.75]) - >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc")) - """ - wd = self.ds.coordinates.sanitize_width(L, width, depth) - w = tuple(el.in_units('code_length').v for el in wd) - ctr, dctr = self.ds.coordinates.sanitize_center(center, L) - res = (nx, nx) - - if source is None: - source = self.ds - - beta_par = generate_beta_par(L) - self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3") - setup_sunyaev_zeldovich_fields(self.ds) - - dens = off_axis_projection(source, ctr, L, w, res, "density", - north_vector=north_vector, no_ghost=no_ghost) - Te = off_axis_projection(source, ctr, L, w, res, "t_sz", - north_vector=north_vector, no_ghost=no_ghost)/dens - bpar = off_axis_projection(source, ctr, L, w, res, "beta_par", - north_vector=north_vector, no_ghost=no_ghost)/dens - omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared", - north_vector=north_vector, no_ghost=no_ghost)/dens - omega1 = omega1/(Te*Te) - 1. - if self.high_order: - bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared", - north_vector=north_vector, no_ghost=no_ghost)/dens - sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par", - north_vector=north_vector, no_ghost=no_ghost)/dens - sigma1 = sigma1/Te - bpar - kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared", - north_vector=north_vector, no_ghost=no_ghost)/dens - kappa1 -= bpar - else: - bperp2 = np.zeros((nx,nx)) - sigma1 = np.zeros((nx,nx)) - kappa1 = np.zeros((nx,nx)) - tau = sigma_thompson*dens*self.mueinv/mh - - self.bounds = (-0.5*wd[0], 0.5*wd[0], -0.5*wd[1], 0.5*wd[1]) - self.dx = wd[0]/nx - self.dy = wd[1]/nx - self.nx = nx - - self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar), - np.array(omega1), np.array(sigma1), - np.array(kappa1), np.array(bperp2)) - - self.ds.field_info.pop(("gas","beta_par")) - - def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2): - - # Bad hack, but we get NaNs if we don't do something like this - small_beta = np.abs(bpar) < 1.0e-20 - bpar[small_beta] = 1.0e-20 - - comm = communication_system.communicators[-1] - - nx, ny = self.nx,self.nx - signal = np.zeros((self.num_freqs,nx,ny)) - xo = np.zeros(self.num_freqs) - - k = int(0) - - start_i = comm.rank*nx//comm.size - end_i = (comm.rank+1)*nx//comm.size - - pbar = get_pbar("Computing SZ signal.", nx*nx) - - for i in range(start_i, end_i): - for j in range(ny): - xo[:] = self.xinit[:] - SZpack.compute_combo_means(xo, tau[i,j], Te[i,j], - bpar[i,j], omega1[i,j], - sigma1[i,j], kappa1[i,j], bperp2[i,j]) - signal[:,i,j] = xo[:] - pbar.update(k) - k += 1 - - signal = comm.mpi_allreduce(signal) - - pbar.finish() - - for i, field in enumerate(self.freq_fields): - self.data[field] = I0*self.xinit[i]**3*signal[i,:,:] - self.data["Tau"] = self.ds.arr(tau, "dimensionless") - self.data["TeSZ"] = self.ds.arr(Te, "keV") - - @parallel_root_only - def write_fits(self, filename, sky_scale=None, sky_center=None, overwrite=True, - **kwargs): - r""" Export images to a FITS file. Writes the SZ distortion in all - specified frequencies as well as the mass-weighted temperature and the - optical depth. Distance units are in kpc, unless *sky_center* - and *scale* are specified. - - Parameters - ---------- - filename : string - The name of the FITS file to be written. - sky_scale : tuple - Conversion between an angle unit and a length unit, if sky - coordinates are desired, e.g. (1.0, "arcsec/kpc") - sky_center : tuple, optional - The (RA, Dec) coordinate in degrees of the central pixel. Must - be specified with *sky_scale*. - overwrite : boolean, optional - If the file already exists, do we overwrite? - **kwargs - Additional keyword arguments are passed to - :meth:`~astropy.io.fits.HDUList.writeto`. - - Examples - -------- - >>> # This example just writes out a FITS file with kpc coords - >>> szprj.write_fits("SZbullet.fits", overwrite=False) - >>> # This example uses sky coords - >>> sky_scale = (1., "arcsec/kpc") # One arcsec per kpc - >>> sky_center = (30., 45., "deg") - >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale) - """ - from yt.visualization.fits_image import FITSImageData - - dx = self.dx.in_units("kpc") - dy = dx - - w = _astropy.pywcs.WCS(naxis=2) - w.wcs.crpix = [0.5*(self.nx+1)]*2 - w.wcs.cdelt = [dx.v,dy.v] - w.wcs.crval = [0.0,0.0] - w.wcs.cunit = ["kpc"]*2 - w.wcs.ctype = ["LINEAR"]*2 - - fib = FITSImageData(self.data, fields=self.data.keys(), wcs=w) - if sky_scale is not None and sky_center is not None: - fib.create_sky_wcs(sky_center, sky_scale) - fib.writeto(filename, overwrite=overwrite, **kwargs) - - @parallel_root_only - def write_png(self, filename_prefix, cmap_name=None, - axes_units="kpc", log_fields=None): - r""" Export images to PNG files. Writes the SZ distortion in all - specified frequencies as well as the mass-weighted temperature and the - optical depth. Distance units are in kpc. - - Parameters - ---------- - filename_prefix : string - The prefix of the image filenames. - - Examples - -------- - >>> szprj.write_png("SZsloshing") - """ - if cmap_name is None: - cmap_name = ytcfg.get("yt", "default_colormap") - - import matplotlib - matplotlib.use('Agg') - import matplotlib.pyplot as plt - if log_fields is None: log_fields = {} - ticks_font = matplotlib.font_manager.FontProperties(family='serif',size=16) - extent = tuple([bound.in_units(axes_units).value for bound in self.bounds]) - for field, image in self.items(): - data = image.copy() - vmin, vmax = image.min(), image.max() - negative = False - crossover = False - if vmin < 0 and vmax < 0: - data *= -1 - negative = True - if field in log_fields: - log_field = log_fields[field] - else: - log_field = True - if log_field: - formatter = matplotlib.ticker.LogFormatterMathtext() - norm = matplotlib.colors.LogNorm() - if vmin < 0 and vmax > 0: - crossover = True - linthresh = min(vmax, -vmin)/100. - norm=matplotlib.colors.SymLogNorm(linthresh, - vmin=vmin, vmax=vmax) - else: - norm = None - formatter = None - filename = filename_prefix+"_"+field+".png" - cbar_label = self.display_names[field] - units = self.data[field].units.latex_representation() - if units is not None and units != "": - cbar_label += r'$\ \ ('+units+r')$' - fig = plt.figure(figsize=(10.0,8.0)) - ax = fig.add_subplot(111) - cax = ax.imshow(data.d, norm=norm, extent=extent, cmap=cmap_name, origin="lower") - for label in ax.get_xticklabels(): - label.set_fontproperties(ticks_font) - for label in ax.get_yticklabels(): - label.set_fontproperties(ticks_font) - ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % axes_units, fontsize=16) - ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % axes_units, fontsize=16) - cbar = fig.colorbar(cax, format=formatter) - cbar.ax.set_ylabel(cbar_label, fontsize=16) - if negative: - cbar.ax.set_yticklabels(["-"+label.get_text() - for label in cbar.ax.get_yticklabels()]) - if crossover: - yticks = list(-10**np.arange(np.floor(np.log10(-vmin)), - np.rint(np.log10(linthresh))-1, -1)) + [0] + \ - list(10**np.arange(np.rint(np.log10(linthresh)), - np.ceil(np.log10(vmax))+1)) - cbar.set_ticks(yticks) - for label in cbar.ax.get_yticklabels(): - label.set_fontproperties(ticks_font) - fig.tight_layout() - plt.savefig(filename) - - @parallel_root_only - def write_hdf5(self, filename): - r"""Export the set of S-Z fields to a set of HDF5 datasets. - - Parameters - ---------- - filename : string - This file will be opened in "write" mode. - - Examples - -------- - >>> szprj.write_hdf5("SZsloshing.h5") - """ - for field, data in self.items(): - data.write_hdf5(filename, dataset_name=field) - - def keys(self): - return self.data.keys() - - def items(self): - return self.data.items() - - def values(self): - return self.data.values() - - def has_key(self, key): - return key in self.data.keys() - - def __getitem__(self, key): - return self.data[key] - - @property - def shape(self): - return (self.nx,self.nx) diff --git a/yt/analysis_modules/sunyaev_zeldovich/tests/__init__.py b/yt/analysis_modules/sunyaev_zeldovich/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py deleted file mode 100644 index 50fef56f5a3..00000000000 --- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Unit test the sunyaev_zeldovich analysis module. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.frontends.stream.api import load_uniform_grid -from yt.funcs import get_pbar -from yt.utilities.physical_ratios import \ - cm_per_kpc, \ - K_per_keV, \ - cm_per_km -from yt.utilities.physical_constants import \ - mh, \ - kboltz, \ - Tcmb, \ - hcgs, \ - clight, \ - sigma_thompson -from yt.testing import requires_module, assert_almost_equal -from yt.utilities.answer_testing.framework import requires_ds, \ - GenericArrayTest, data_dir_load, GenericImageTest -try: - from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0 -except ImportError: - pass -import numpy as np -try: - import SZpack -except ImportError: - pass - -mue = 1./0.88 -freqs = np.array([30., 90., 240.]) - -def setup(): - """Test specific setup.""" - from yt.config import ytcfg - ytcfg["yt", "__withintesting"] = "True" - -def full_szpack3d(ds, xo): - data = ds.index.grids[0] - dz = ds.index.get_smallest_dx().in_units("cm") - nx,ny,nz = data["density"].shape - dn = np.zeros((nx,ny,nz)) - Dtau = np.array(sigma_thompson*data["density"]/(mh*mue)*dz) - Te = data["kT"].ndarray_view() - betac = np.array(data["velocity_z"]/clight) - pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx) - for i in range(nx): - pbar.update(i) - for j in range(ny): - for k in range(nz): - dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k], - Te[i,j,k], betac[i,j,k], - 1.0, 0.0, 0.0, 1.0e-5) - pbar.finish() - return np.array(I0*xo**3*np.sum(dn, axis=2)) - -def setup_cluster(): - - R = 1000. - r_c = 100. - rho_c = 1.673e-26 - beta = 1. - T0 = 4. - nx,ny,nz = 16,16,16 - c = 0.17 - a_c = 30. - a = 200. - v0 = 300.*cm_per_km - ddims = (nx,ny,nz) - - x, y, z = np.mgrid[-R:R:nx*1j, - -R:R:ny*1j, - -R:R:nz*1j] - - r = np.sqrt(x**2+y**2+z**2) - - dens = np.zeros(ddims) - dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta) - temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c) - velz = v0*temp/(T0*K_per_keV) - - data = {} - data["density"] = (dens, "g/cm**3") - data["temperature"] = (temp, "K") - data["velocity_x"] = (np.zeros(ddims), "cm/s") - data["velocity_y"] = (np.zeros(ddims), "cm/s") - data["velocity_z"] = (velz, "cm/s") - - L = 2 * R * cm_per_kpc - bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L - - ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox) - ds.index - - return ds - -@requires_module("SZpack") -def test_projection(): - ds = setup_cluster() - nx,ny,nz = ds.domain_dimensions - xinit = np.array(1.0e9*hcgs*freqs/(kboltz*Tcmb)) - szprj = SZProjection(ds, freqs, mue=mue, high_order=True) - szprj.on_axis(2, nx=nx) - deltaI = np.zeros((3,nx,ny)) - for i in range(3): - deltaI[i,:,:] = full_szpack3d(ds, xinit[i]) - assert_almost_equal( - deltaI[i,:,:], np.array(szprj["%d_GHz" % int(freqs[i])]), 6) - -M7 = "DD0010/moving7_0010" -@requires_module("SZpack") -@requires_ds(M7) -def test_M7_onaxis(): - ds = data_dir_load(M7) - szprj = SZProjection(ds, freqs) - szprj.on_axis(2, nx=100) - def onaxis_array_func(): - return szprj.data - def onaxis_image_func(filename_prefix): - szprj.write_png(filename_prefix) - for test in [GenericArrayTest(ds, onaxis_array_func), - GenericImageTest(ds, onaxis_image_func, 12)]: - test_M7_onaxis.__name__ = test.description - yield test - -@requires_module("SZpack") -@requires_ds(M7) -def test_M7_offaxis(): - ds = data_dir_load(M7) - szprj = SZProjection(ds, freqs) - szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100) - def offaxis_array_func(): - return szprj.data - def offaxis_image_func(filename_prefix): - szprj.write_png(filename_prefix) - for test in [GenericArrayTest(ds, offaxis_array_func), - GenericImageTest(ds, offaxis_image_func, 12)]: - test_M7_offaxis.__name__ = test.description - yield test diff --git a/yt/analysis_modules/two_point_functions/api.py b/yt/analysis_modules/two_point_functions/api.py index 0bc1a96045d..6346eb057a6 100644 --- a/yt/analysis_modules/two_point_functions/api.py +++ b/yt/analysis_modules/two_point_functions/api.py @@ -1,26 +1,7 @@ -""" -API for two_point_functions +from yt.utilities.exceptions import \ + YTModuleRemoved - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - - -from yt.funcs import issue_deprecation_warning - -issue_deprecation_warning( - "The two_point_functions module has been deprecated. This code has been " - "moved to the yt attic (https://github.com/yt-project/yt_attic) and " - "will be removed in a future release.") - -from .two_point_functions import \ - TwoPointFunctions, \ - FcnSet +raise YTModuleRemoved( + "two_point_functions", + "https://github.com/yt-project/yt_attic", + "https://yt-attic.readthedocs.io/") diff --git a/yt/analysis_modules/two_point_functions/two_point_functions.py b/yt/analysis_modules/two_point_functions/two_point_functions.py deleted file mode 100644 index 530e73e768c..00000000000 --- a/yt/analysis_modules/two_point_functions/two_point_functions.py +++ /dev/null @@ -1,880 +0,0 @@ -""" -Two Point Functions Framework. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py -import numpy as np - -from yt.funcs import mylog -from yt.utilities.performance_counters import yt_counters -from yt.utilities.parallel_tools.parallel_analysis_interface import ParallelAnalysisInterface, parallel_blocking_call, parallel_root_only - -try: - from yt.utilities.kdtree.api import \ - fKD, free_tree, create_tree -except ImportError: - mylog.debug("The Fortran kD-Tree did not import correctly.") - -import math -import inspect -import time -from collections import defaultdict - -sep = 12 - -class TwoPointFunctions(ParallelAnalysisInterface): - r""" Initialize a two point functions object. - - Parameters - ---------- - total_values : Integer - How many total (global) pair calculations to run for each of the - functions specified. Default: 1000000. - comm_size : Integer - How entries are sent during communication. Default: 10000. - length_type : String - Controls the even spacing of the rulers lengths in - logarithmic or linear space, set by "log" or "lin", respectively. - Default: "lin". - length_number : Integer - Sets how many lengths to create, evenly spaced by the above - parameter. Default: 10. - length_range : Float - A min/max pair for the range of values to search the over - the simulated volume. Default: [sqrt(3)dx, 1/2*shortest box edge], - where dx is the smallest grid cell size. - vol_ratio : Integer - How to multiply-assign subvolumes to the parallel - tasks. This number must be an integer factor of the total number of tasks or - very bad things will happen. The default value of 1 will assign one task - to each subvolume, and there will be an equal number of subvolumes as tasks. - A value of 2 will assign two tasks to each subvolume and there will be - one-half as many subvolumes as tasks. - A value equal to the number of parallel tasks will result in each task - owning a complete copy of all the fields data, meaning each task will be - operating on the identical full volume. - Setting it to -1 will automatically adjust it such that each task - owns the entire volume. Default = 1. - salt : Integer - A number that will be added to the random number generator - seed. Use this if a different random series of numbers is desired when - keeping everything else constant from this set: (MPI task count, - number of ruler lengths, ruler min/max, number of functions, - number of point pairs per ruler length). Default = 0. - theta : Float - For random pairs of points, the second point is found by traversing - a distance along a ray set by the angle (phi, theta) from the first - point. To keep this angle constant, set ``theta`` to a value in the - range [0, pi]. Default = None, which will randomize theta for - every pair of points. - phi : Float - Similar to theta above, but the range of values is [0, 2*pi). - Default = None, which will randomize phi for every pair of points. - - Examples - -------- - >>> tpf = TwoPointFunctions(ds, ["velocity_x", "velocity_y", "velocity_z"], - ... total_values=1e5, comm_size=10000, - ... length_number=10, length_range=[1./128, .5], - ... length_type="log") - """ - def __init__(self, ds, fields, left_edge=None, right_edge=None, - total_values=1000000, comm_size=10000, length_type="lin", - length_number=10, length_range=None, vol_ratio = 1, - salt=0, theta=None, phi=None): - ParallelAnalysisInterface.__init__(self) - try: - fKD - except NameError: - raise ImportError("You need to install the Forthon kD-Tree") - self._fsets = [] - self.fields = fields - self.constant_theta = theta - self.constant_phi = phi - # MPI stuff. - self.size = self.comm.size - self.mine = self.comm.rank - self.vol_ratio = vol_ratio - if self.vol_ratio == -1: - self.vol_ratio = self.size - self.total_values = int(total_values / self.size) - # For communication. - self.recv_hooks = [] - self.send_hooks = [] - self.done_hooks = [] - self.comm_size = min(int(comm_size), self.total_values) - self.ds = ds - self.nlevels = ds.index.max_level - self.period = self.ds.domain_right_edge - self.ds.domain_left_edge - self.min_edge = min(self.period) - self.index = ds.index - self.center = (ds.domain_right_edge + ds.domain_left_edge)/2.0 - # Figure out the range of ruler lengths. - if length_range is None: - length_range = [math.sqrt(3) * self.ds.index.get_smallest_dx(), - self.min_edge/2.] - else: - if len(length_range) != 2: - raise ValueError("length_range must have two values.") - if length_range[1] <= length_range[0]: - raise ValueError("length_range[1] must be larger than length_range[0]") - if length_range[1] > self.min_edge/2.: - length_range[1] = self.min_edge/2. - mylog.info("Automatically adjusting length_range[1] to half the shortest box edge.") - if length_range[0] == -1 or length_range[0] == -1.: - mylog.info("Automatically adjusting length_range[0] to %1.5e." % \ - (math.sqrt(3) * self.ds.index.get_smallest_dx())) - length_range[0] = math.sqrt(3) * self.ds.index.get_smallest_dx() - # Make the list of ruler lengths. - if length_type == "lin": - self.lengths = np.linspace(length_range[0], length_range[1], - length_number) - elif length_type == "log": - self.lengths = np.logspace(math.log10(length_range[0]), - math.log10(length_range[1]), length_number) - else: - # Something went wrong. - raise SyntaxError("length_type is either \"lin\" or \"log\".") - # Subdivide the volume. - if not left_edge or not right_edge: - self.left_edge = self.ds.domain_left_edge - self.right_edge = self.ds.domain_right_edge - # This ds business below has to do with changes made for halo - # finding on subvolumes and serves no purpose here except - # compatibility. This is not the best policy, if I'm honest. - ds = ds.region([0.]*3, self.left_edge, self.right_edge) - padded, self.LE, self.RE, self.ds = \ - self.partition_index_3d(ds = ds, padding=0., - rank_ratio = self.vol_ratio) - else: - self.left_edge = left_edge - self.right_edge = right_edge - # We do this twice, first with no 'buffer' to get the unbuffered - # self.LE/RE, and then second to get a buffered self.ds. - padded, self.LE, self.RE, temp = \ - self.partition_region_3d(left_edge, right_edge, - rank_ratio=self.vol_ratio) - padded, temp, temp, self.ds = \ - self.partition_region_3d(left_edge - self.lengths[-1], \ - right_edge + self.lengths[-1], rank_ratio=self.vol_ratio) - mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds))) - self.width = self.ds.right_edge - self.ds.left_edge - self.mt = np.random.mtrand.RandomState(seed = 1234 * self.mine + salt) - - def add_function(self, function, out_labels, sqrt, corr_norm=None): - r"""Add a function to the list that will be evaluated at the - generated pairs of points. - - Parameters - ---------- - function : Function - The two point function of the form fcn(a, b, r1, r2, vec). - out_labels : List of strings - A list of strings labeling the outputs of the function. - sqrt : List of booleans - A list of booleans which when True will square-root the corresponding - element of the output in the text output (write_out_means()). - corr_norm : Float - Used when calculating two point correlations. If set, the output - of the function is divided by this number. Default = None. - - Examples - -------- - >>> f1 = tpf.add_function(function=rms_vel, out_labels=['RMSvdiff'], - ... sqrt=[True]) - """ - fargs = inspect.getargspec(function) - if len(fargs.args) != 5: - raise SyntaxError("The function %s needs five arguments." %\ - function.__name__) - out_labels = list(out_labels) - if len(out_labels) < 1: - raise SyntaxError("Please specify at least one out_labels for function %s." %\ - function.__name__) - sqrt = list(sqrt) - if len(sqrt) != len(out_labels): - raise SyntaxError("Please have the same number of elements in out_labels as in sqrt for function %s." %\ - function.__name__) - self._fsets.append(FcnSet(self, function, self.min_edge, - out_labels, sqrt,corr_norm)) - return self._fsets[-1] - - def __getitem__(self, key): - return self._fsets[key] - - def run_generator(self): - r"""After all the functions have been added, run the generator. - - Examples - -------- - >>> tpf.run_generator() - """ - yt_counters("run_generator") - # We need a function! - if len(self._fsets) == 0: - mylog.error("You need to add at least one function!") - return None - # Do all the startup tasks to get the grid points. - if self.nlevels == 0: - yt_counters("build_sort") - self._build_sort_array() - self.sort_done = False - yt_counters("build_sort") - else: - yt_counters("init_kd_tree") - self._init_kd_tree() - self.sort_done = True - yt_counters("init_kd_tree") - # Store the fields. - self.stored_fields = {} - yt_counters("getting data") - for field in self.fields: - self.stored_fields[field] = self.ds[field].copy() - self.ds.clear_data() - # If the arrays haven't been sorted yet and need to be, do that. - if not self.sort_done: - for field in self.fields: - self.stored_fields[field] = self.stored_fields[field][self.sort] - del self.sort - self.sort_done = True - yt_counters("getting data") - self._build_fields_vals() - yt_counters("big loop over lengths") - t_waiting = 0. - for bigloop, length in enumerate(self.lengths): - self._build_points_array() - if self.mine == 0: - mylog.info("Doing length %1.5e" % length) - # Things stop when this value below equals total_values. - self.generated_points = 0 - self.gen_array = np.zeros(self.size, dtype='int64') - self.comm_cycle_count = 0 - self.final_comm_cycle_count = 0 - self.sent_done = False - self._setup_done_hooks_on_root() - # While everyone else isn't done or I'm not done, we loop. - while self._should_cycle(): - self._setup_recv_arrays() - self._send_arrays() - t0 = time.time() - self.comm.mpi_Request_Waitall(self.send_hooks) - self.comm.mpi_Request_Waitall(self.recv_hooks) - t1 = time.time() - t_waiting += (t1-t0) - if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \ - #(np.abs(np.log10(np.abs(self.recv_points))) > 20).any(): - raise ValueError("self.recv_points is no good!") - self.points = self.recv_points.copy() - self.fields_vals = self.recv_fields_vals.copy() - self.gen_array = self.recv_gen_array.copy() - self._eval_points(length) - self.gen_array[self.mine] = self.generated_points - self.comm_cycle_count += 1 - if self.generated_points == self.total_values: - self._send_done_to_root() - if self.mine == 0: - mylog.info("Length (%d of %d) %1.5e took %d communication cycles to complete." % \ - (bigloop+1, len(self.lengths), length, self.comm_cycle_count)) - yt_counters("big loop over lengths") - if self.nlevels >= 1: - del fKD.pos, fKD.qv_many, fKD.nn_tags - free_tree(0) # Frees the kdtree object. - yt_counters("allsum") - self._allsum_bin_hits() - mylog.info("Spent %f seconds waiting for communication." % t_waiting) - yt_counters("allsum") - yt_counters("run_generator") - - def _init_kd_tree(self): - """ - Builds the kd tree of grid center points. - """ - # Grid cell centers. - mylog.info("Multigrid: Building kD-Tree.") - xp = self.ds["x"] - yp = self.ds["y"] - zp = self.ds["z"] - fKD.pos = np.asfortranarray(np.empty((3,xp.size), dtype='float64')) - # Normalize the grid points only within the kdtree. - fKD.pos[0, :] = xp[:] / self.period[0] - fKD.pos[1, :] = yp[:] / self.period[1] - fKD.pos[2, :] = zp[:] / self.period[2] - fKD.nn = 1 - fKD.sort = False - fKD.rearrange = True - create_tree(0) - - def _build_sort_array(self): - """ - When running on a unigrid simulation, the kD tree isn't necessary. - But we need to ensure that the points are sorted in the usual manner - allowing values to be found via array indices. - """ - mylog.info("Unigrid: finding cell centers.") - xp = self.ds["x"] - yp = self.ds["y"] - zp = self.ds["z"] - self.sizes = [np.unique(xp).size, np.unique(yp).size, np.unique(zp).size] - self.sort = np.lexsort([zp, yp, xp]) - del xp, yp, zp - self.ds.clear_data() - - def _build_fields_vals(self): - """ - Builds an array to store the field values array. - """ - self.fields_vals = np.empty((self.comm_size, len(self.fields)*2), \ - dtype='float64') - # At the same time build a dict to label the columns. - self.fields_columns = {} - for i,field in enumerate(self.fields): - self.fields_columns[field] = i - - def _build_points_array(self): - """ - Initializes the array that contains the random points as all negatives - to start with. - """ - self.points = np.ones((self.comm_size, 6), dtype='float64') * -1.0 - - def _setup_done_hooks_on_root(self): - """ - Opens non-blocking receives on root pointing to all the other tasks - """ - if self.mine != 0: - return - self.recv_done = {} - for task in range(self.size): - if task == self.mine: continue - self.recv_done[task] = np.zeros(1, dtype='int64') - self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \ - task, tag=15)) - - def _send_done_to_root(self): - """ - Tell the root process that I'm done. - """ - # If I've already done this, don't do it again. - if self.sent_done: return - if self.mine !=0: - # I send when I *think* things should finish. - self.send_done = np.ones(1, dtype='int64') * \ - (self.size / self.vol_ratio -1) + self.comm_cycle_count - self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \ - 0, tag=15)) - else: - # As root, I need to mark myself! - self.recv_done[0] = np.ones(1, dtype='int64') * \ - (self.size / self.vol_ratio -1) + self.comm_cycle_count - self.sent_done = True - - def _should_cycle(self): - """ - Determine if I should continue cycling the communication. - """ - if self.mine == 0: - # If other tasks aren't finished, this will return False. - status = self.comm.mpi_Request_Testall(self.done_hooks) - # Convolve this with with root's status. - status = status * (self.generated_points == self.total_values) - if status == 1: - # If they are all finished, meaning Testall returns True, - # and root has made its points, we find - # the biggest value in self.recv_done and stop there. - status = max(self.recv_done.values()) - else: - status = 0 - # Broadcast the status from root - we stop only if root thinks we should - # stop. - status = self.comm.mpi_bcast(status) - if status == 0: return True - if self.comm_cycle_count < status: - return True - # If we've come this far, we're done. - return False - - def _setup_recv_arrays(self): - """ - Creates the recv buffers and calls a non-blocking MPI receive pointing - to the left-hand neighbor. - """ - self.recv_points = np.ones((self.comm_size, 6), dtype='float64') * -1. - self.recv_fields_vals = np.zeros((self.comm_size, len(self.fields)*2), \ - dtype='float64') - self.recv_gen_array = np.zeros(self.size, dtype='int64') - self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \ - (self.mine-1)%self.size, tag=10)) - self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \ - (self.mine-1)%self.size, tag=20)) - self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_gen_array, \ - (self.mine-1)%self.size, tag=40)) - - def _send_arrays(self): - """ - Send the data arrays to the right-hand neighbor. - """ - self.send_hooks.append(self.comm.mpi_nonblocking_send(self.points,\ - (self.mine+1)%self.size, tag=10)) - self.send_hooks.append(self.comm.mpi_nonblocking_send(self.fields_vals,\ - (self.mine+1)%self.size, tag=20)) - self.send_hooks.append(self.comm.mpi_nonblocking_send(self.gen_array, \ - (self.mine+1)%self.size, tag=40)) - - def _allsum_bin_hits(self): - """ - Add up the hits to all the bins globally for all functions. - """ - for fset in self._fsets: - fset.too_low = self.comm.mpi_allreduce(fset.too_low, op='sum') - fset.too_high = self.comm.mpi_allreduce(fset.too_high, op='sum') - fset.binned = {} - if self.mine == 0: - mylog.info("Function %s had values out of range for these fields:" % \ - fset.function.__name__) - for i,field in enumerate(fset.out_labels): - mylog.info("Field %s had %d values too high and %d too low that were not binned." % \ - (field, fset.too_high[i], fset.too_low[i])) - for length in self.lengths: - fset.length_bin_hits[length] = \ - self.comm.mpi_allreduce(fset.length_bin_hits[length], op='sum') - # Find out how many were successfully binned. - fset.binned[length] = fset.length_bin_hits[length].sum() - # Normalize the counts. - fset.length_bin_hits[length] = \ - fset.length_bin_hits[length].astype('float64') / \ - fset.binned[length] - # Return it to its original shape. - fset.length_bin_hits[length] = \ - fset.length_bin_hits[length].reshape(fset.bin_number) - - def _pick_random_points(self, length, size): - """ - Picks out size random pairs separated by length *length*. - """ - # First make random points inside this subvolume. - r1 = np.empty((size,3), dtype='float64') - for dim in range(3): - r1[:,dim] = self.mt.uniform(low=self.ds.left_edge[dim], - high=self.ds.right_edge[dim], size=size) - # Next we find the second point, determined by a random - # theta, phi angle. See Eqns. 1 & 2 from - # http://mathworld.wolfram.com/SpherePointPicking.html, - # but phi and theta are switched to the Physics convention. - if self.constant_phi is None: - phi = self.mt.uniform(low=0, high=2.*math.pi, size=size) - else: phi = self.constant_phi * np.ones(size, dtype='float64') - if self.constant_theta is None: - v = self.mt.uniform(low=0., high=1, size=size) - theta = np.arccos(2 * v - 1) - else: theta = self.constant_theta * np.ones(size, dtype='float64') - r2 = np.empty((size,3), dtype='float64') - r2[:,0] = r1[:,0] + length * np.cos(phi) * np.sin(theta) - r2[:,1] = r1[:,1] + length * np.sin(phi) * np.sin(theta) - r2[:,2] = r1[:,2] + length * np.cos(theta) - # Reflect so it's inside the (full) volume. - r2 %= self.period - return (r1, r2) - - def _find_nearest_cell(self, points): - """ - Finds the closest grid cell for each point in a vectorized manner. - """ - if self.nlevels == 0: - pos = (points - self.ds.left_edge) / self.width - n = (self.sizes[2] * pos[:,2]).astype('int32') - n += self.sizes[2] * (self.sizes[1] * pos[:,1]).astype('int32') - n += self.sizes[2] * self.sizes[1] * (self.sizes[0] * pos[:,0]).astype('int32') - else: - # Normalize the points to a 1-period for use only within the kdtree. - points[:, 0] = points[:, 0] / self.period[0] - points[:, 1] = points[:, 1] / self.period[1] - points[:, 2] = points[:, 2] / self.period[2] - fKD.qv_many = points.T - fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64')) - fKD.find_many_nn_nearest_neighbors() - # The -1 is for fortran counting. - n = fKD.nn_tags[0,:] - 1 - return n - - def _get_fields_vals(self, points): - """ - Given points, return the values for the fields we need for those - points. - """ - # First find the grid data index field. - indices = self._find_nearest_cell(points) - results = np.empty((len(indices), len(self.fields)), dtype='float64') - # Put the field values into the columns of results. - for field in self.fields: - col = self.fields_columns[field] - results[:, col] = self.stored_fields[field][indices] - return results - - - def _eval_points(self, length): - # We need to loop over the points array at least once. Further - # iterations only happen if we have added new points to the array, - # but not as many as we want to, so we need to check again to see if - # we can put more points into the buffer. - added_points = True - while added_points: - # If we need to, add more points to the points array. - if self.generated_points < self.total_values: - # Look for 'empty' slots to put in new pairs. - select = (self.points[:,0] < 0) - ssum = select.sum() - # We'll generate only as many points as we need to/can. - size = min(ssum, self.total_values - self.generated_points) - (new_r1,new_r2) = self._pick_random_points(length, size) - self.generated_points += size - # If size != select.sum(), we need to pad the end of new_r1/r2 - # which is what is effectively happening below. - newpoints = np.ones((ssum, 6), dtype='float64') * -1. - newpoints[:size,:3] = new_r1 - newpoints[:size,3:] = new_r2 - # Now we insert them into self.points. - self.points[select] = newpoints - else: - added_points = False - - # If we have an empty buffer here, we can skip everything below. - if (self.points < 0).all(): - added_points = False # Not strictly required, but clearer. - break - - # Now we have a points array that is either full of unevaluated points, - # or I don't need to make any new points and I'm just processing the - # array. Start by finding the indices of the points I own. - self.points.shape = (self.comm_size*2, 3) # Doesn't make a copy - fast! - select = np.bitwise_or((self.points < self.ds.left_edge).any(axis=1), - (self.points >= self.ds.right_edge).any(axis=1)) - select = np.invert(select) - mypoints = self.points[select] - if mypoints.size > 0: - # Get the fields values. - results = self._get_fields_vals(mypoints) - # Put this into self.fields_vals. - self.fields_vals.shape = (self.comm_size*2, len(self.fields)) - self.fields_vals[select] = results - - # Put our arrays back into their original shapes cheaply! - if mypoints.size > 0: - self.fields_vals.shape = (self.comm_size, len(self.fields)*2) - self.points.shape = (self.comm_size, 6) - - # To run the functions, what is key is that the - # second point in the pair is ours. - second_points = self.points[:,3:] - select = np.bitwise_or((second_points < self.ds.left_edge).any(axis=1), - (second_points >= self.ds.right_edge).any(axis=1)) - select = np.invert(select) - if select.any(): - points_to_eval = self.points[select] - fields_to_eval = self.fields_vals[select] - - # Find the normal vector between our points. - vec = np.abs(points_to_eval[:,:3] - points_to_eval[:,3:]) - norm = np.sqrt(np.sum(np.multiply(vec,vec), axis=1)) - # I wish there was a better way to do this, but I can't find it. - for i, n in enumerate(norm): - vec[i] = np.divide(vec[i], n) - - # Now evaluate the functions. - for fcn_set in self._fsets: - fcn_results = fcn_set._eval_st_fcn(fields_to_eval,points_to_eval, - vec) - fcn_set._bin_results(length, fcn_results) - - # Now clear the buffers at the processed points. - self.points[select] = np.array([-1.]*6, dtype='float64') - - else: - # We didn't clear any points, so we should move on with our - # lives and pass this buffer along. - added_points = False - - @parallel_blocking_call - def write_out_means(self, fn = "%s.txt"): - r"""Writes out the weighted-average value for each function for - each dimension for each ruler length to a text file. The data is written - to files of the name 'function_name.txt' in the current working - directory. - - Examples - -------- - >>> tpf.write_out_means() - """ - for fset in self._fsets: - fp = self.comm.write_on_root(fn % fset.function.__name__) - fset._avg_bin_hits() - line = "# length".ljust(sep) - line += "count".ljust(sep) - for dim in fset.dims: - line += ("%s" % fset.out_labels[dim]).ljust(sep) - fp.write(line + "\n") - for length in self.lengths: - line = ("%1.5e" % length).ljust(sep) - line += ("%d" % fset.binned[length]).ljust(sep) - for dim in fset.dims: - if fset.sqrt[dim]: - line += ("%1.5e" % \ - math.sqrt(fset.length_avgs[length][dim])).ljust(sep) - else: - line += ("%1.5e" % \ - fset.length_avgs[length][dim]).ljust(sep) - line += "\n" - fp.write(line) - fp.close() - - @parallel_root_only - def write_out_arrays(self, fn = "%s.h5"): - r"""Writes out the raw probability bins and the bin edges to an HDF5 file - for each of the functions. The files are named - 'function_name.txt' and saved in the current working directory. - - Examples - -------- - >>> tpf.write_out_arrays() - """ - if self.mine == 0: - for fset in self._fsets: - f = h5py.File(fn % fset.function.__name__, mode="w") - bin_names = [] - prob_names = [] - bin_counts = [] - for dim in fset.dims: - f.create_dataset("/bin_edges_%02d_%s" % \ - (dim, fset.out_labels[dim]), \ - data=fset.bin_edges[dim]) - bin_names.append("/bin_edges_%02d_%s" % \ - (dim, fset.out_labels[dim])) - for i,length in enumerate(self.lengths): - f.create_dataset("/prob_bins_%05d" % i, \ - data=fset.length_bin_hits[length]) - prob_names.append("/prob_bins_%05d" % i) - bin_counts.append([fset.too_low.sum(), fset.binned[length], - fset.too_high.sum()]) - f.create_dataset("/bin_edges_names", data=bin_names) - #f.create_dataset("/prob_names", data=prob_names) - f.create_dataset("/lengths", data=self.lengths) - f.create_dataset("/counts", data=bin_counts) - f.close() - - @parallel_root_only - def write_out_correlation(self): - r"""A special output function for doing two point correlation functions. - Outputs the correlation function xi(r) in a text file - 'function_name_corr.txt' in the current working directory. - - Examples - -------- - >>> tpf.write_out_correlation() - """ - for fset in self._fsets: - # Only operate on correlation functions. - if fset.corr_norm is None: continue - fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__) - line = "# length".ljust(sep) - line += "\\xi".ljust(sep) - fp.write(line + "\n") - xi = fset._corr_sum_norm() - for length in self.lengths: - line = ("%1.5e" % length).ljust(sep) - line += ("%1.5e" % xi[length]).ljust(sep) - fp.write(line + "\n") - fp.close() - -class FcnSet(TwoPointFunctions): - def __init__(self,tpf, function, min_edge, out_labels, sqrt, corr_norm): - self.tpf = tpf # The overarching TPF class - self.function = function # Function to eval between the two points. - self.min_edge = min_edge # The length of the minimum edge of the box. - self.out_labels = out_labels # For output. - self.sqrt = sqrt # which columns to sqrt on output. - self.corr_norm = corr_norm # A number used to normalize a correlation function. - # These below are used to track how many times the function returns - # unbinned results. - self.too_low = np.zeros(len(self.out_labels), dtype='int32') - self.too_high = np.zeros(len(self.out_labels), dtype='int32') - - def set_pdf_params(self, bin_type="lin", bin_number=1000, bin_range=None): - r"""Set the parameters used to build the Probability Distribution Function - for each ruler length for this function. The values output by the - function are slotted into the bins described here. - - Parameters - ---------- - bin_type : String - Controls the edges of the bins spaced evenly in - logarithmic or linear space, set by "log" or "lin", respectively. - A single string, or list of strings for N-dim binning. - Default = "lin". - bin_number : Integer - Sets how many bins to create, evenly spaced by the above - parameter. A single integer, or a list of integers for N-dim - binning. Default = 1000. - bin_range : Float - A pair of values giving the range for the bins. - A pair of floats (a list), or a list of pairs for N-dim binning. - Default = None. - - Examples - -------- - >>> f1.set_pdf_params(bin_type='log', bin_range=[5e4, 5.5e13], - ... bin_number=1000) - """ - # This should be called after setSearchParams. - if not hasattr(self.tpf, "lengths"): - mylog.error("Please call setSearchParams() before calling setPDFParams().") - return None - # Make sure they're either all lists or only one is. - input = [bin_type, bin_number, bin_range] - lists = 0 - for thing in input: - if type(thing) == list: - lists += 1 - if lists > 1 and lists < 3: - mylog.error("Either all the inputs need to be lists, or only one.") - return None - # Make sure they're all the same length if they're lists. - if lists == 3: - first_len = 0 - for thing in input: - if first_len == 0: - first_len = len(thing) - if first_len == 0: - mylog.error("Input cannot be an empty list.") - return None - continue - if first_len != len(thing): - mylog.error("All the inputs need to have the same length.") - return None - # If they are not all lists, put the input into lists for convenience. - if lists == 1: - bin_type, bin_number = [bin_type], [bin_number] - bin_range = [bin_range] - self.bin_type = bin_type - self.bin_number = np.array(bin_number) - 1 - self.dims = range(len(bin_type)) - # Create the dict that stores the arrays to store the bin hits, and - # the arrays themselves. - self.length_bin_hits = {} - for length in self.tpf.lengths: - # It's easier to index flattened, but will be unflattened later. - self.length_bin_hits[length] = np.zeros(self.bin_number, - dtype='int64').flatten() - # Create the bin edges for each dimension. - # self.bins is indexed by dimension - self.bin_edges = {} - for dim in self.dims: - # Error check. - if len(bin_range[dim]) != 2: - raise ValueError("bin_range must have two values.") - if bin_range[dim][1] <= bin_range[dim][0]: - raise ValueError("bin_range[1] must be larger than bin_range[0]") - # Make the edges for this dimension. - if bin_type[dim] == "lin": - self.bin_edges[dim] = np.linspace(bin_range[dim][0], bin_range[dim][1], - bin_number[dim]) - elif bin_type[dim] == "log": - self.bin_edges[dim] = np.logspace(math.log10(bin_range[dim][0]), - math.log10(bin_range[dim][1]), bin_number[dim]) - else: - raise SyntaxError("bin_edges is either \"lin\" or \"log\".") - - def _eval_st_fcn(self, results, points, vec): - """ - Return the value of the function using the provided results. - """ - return self.function(results[:,:len(self.tpf.fields)], - results[:,len(self.tpf.fields):], points[:,:3], points[:,3:], vec) - """ - NOTE - A function looks like: - def stuff(a,b,r1,r2, vec): - return [(a[0] - b[0])/(a[1] + b[1])] - where a and b refer to different points in space and the indices - are for the different fields, which are given when the function is - added. The results need to be a list or array even if it's only one - item. - """ - - def _bin_results(self, length, results): - """ - Add hits to the bins corresponding to these results. length_hit_bins - is flattened, so we need to figure out the offset for this hit by - factoring the sizes of the other dimensions. - """ - hit_bin = np.zeros(results.shape[0], dtype='int64') - multi = 1 - good = np.ones(results.shape[0], dtype='bool') - for dim in range(len(self.out_labels)): - for d1 in range(dim): - multi *= self.bin_edges[d1].size - if dim == 0 and len(self.out_labels)==1: - try: - digi = np.digitize(results, self.bin_edges[dim]) - except ValueError: - # The user probably did something like - # return a * b rather than - # return a[0] * b[0], which will only happen - # for single field functions. - digi = np.digitize(results[0], self.bin_edges[dim]) - else: - digi = np.digitize(results[:,dim], self.bin_edges[dim]) - too_low = (digi == 0) - too_high = (digi == self.bin_edges[dim].size) - self.too_low[dim] += (too_low).sum() - self.too_high[dim] += (too_high).sum() - newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high)) - good = np.bitwise_and(good, newgood) - hit_bin += np.multiply((digi - 1), multi) - digi_bins = np.arange(self.length_bin_hits[length].size+1) - hist, digi_bins = np.histogram(hit_bin[good], digi_bins) - self.length_bin_hits[length] += hist - - def _dim_sum(self, a, dim): - """ - Given a multidimensional array a, this finds the sum over all the - elements leaving the dimension dim untouched. - """ - dims = np.arange(len(a.shape)) - dims = np.flipud(dims) - gt_dims = dims[dims > dim] - lt_dims = dims[dims < dim] - iter_dims = np.concatenate((gt_dims, lt_dims)) - for this_dim in iter_dims: - a = a.sum(axis=this_dim) - return a - - def _avg_bin_hits(self): - """ - For each dimension and length of bin_hits return the weighted average. - """ - self.length_avgs = defaultdict(dict) - for length in self.tpf.lengths: - for dim in self.dims: - self.length_avgs[length][dim] = \ - (self._dim_sum(self.length_bin_hits[length], dim) * \ - ((self.bin_edges[dim][:-1] + self.bin_edges[dim][1:]) / 2.)).sum() - - def _corr_sum_norm(self): - """ - Return the correlations xi for this function. We are tacitly assuming - that all correlation functions are one dimensional. - """ - xi = {} - for length in self.tpf.lengths: - xi[length] = -1 + np.sum(self.length_bin_hits[length] * \ - self.bin_edges[0][:-1]) / self.corr_norm - return xi diff --git a/yt/api.py b/yt/api.py index dfff4b49785..d9ec8215162 100644 --- a/yt/api.py +++ b/yt/api.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/arraytypes.py b/yt/arraytypes.py index 531928bca56..1cb0be494c2 100644 --- a/yt/arraytypes.py +++ b/yt/arraytypes.py @@ -1,22 +1,3 @@ -""" -We want to have flexible arrays, so we do it all in here, and then import from -this module. - -This is all probably overly-complicated, and should be removed at first -opportunity to ditch numarray. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import numpy.core.records as rec diff --git a/yt/config.py b/yt/config.py index 848465b86bd..82953a632cd 100644 --- a/yt/config.py +++ b/yt/config.py @@ -1,23 +1,6 @@ -""" -This module is very simple. It imports the configuration -we have written for yt. -Everything will be returned in a global config dictionary ``ytcfg`` - -""" - -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import warnings -from yt.extern.six.moves import configparser +import configparser ytcfg_defaults = dict( serialize = 'False', diff --git a/yt/convenience.py b/yt/convenience.py index 266952fb660..615d11969eb 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -1,22 +1,6 @@ -""" -Some convenience functions, objects, and iterators - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os # Named imports -from yt.extern.six import string_types from yt.config import ytcfg from yt.funcs import mylog from yt.utilities.parameter_file_storage import \ @@ -51,7 +35,7 @@ def load(*args ,**kwargs): candidates = [] valid_file = [] for argno, arg in enumerate(args): - if isinstance(arg, string_types): + if isinstance(arg, str): if os.path.exists(arg): valid_file.append(True) elif arg.startswith("http"): @@ -97,7 +81,7 @@ def load(*args ,**kwargs): if len(candidates) == 0: if ytcfg.get("yt", "enzo_db") != '' \ and len(args) == 1 \ - and isinstance(args[0], string_types): + and isinstance(args[0], str): erdb = EnzoRunDatabase() fn = erdb.find_uuid(args[0]) n = "EnzoDataset" diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index 20d005a6746..c3db735d93d 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -1,22 +1,5 @@ -""" -Analyzer objects for time series datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import inspect -from yt.extern.six import add_metaclass - analysis_task_registry = {} class RegisteredTask(type): @@ -26,8 +9,7 @@ def __init__(cls, name, b, d): return analysis_task_registry[cls.__name__] = cls -@add_metaclass(RegisteredTask) -class AnalysisTask(object): +class AnalysisTask(metaclass = RegisteredTask): def __init__(self, *args, **kwargs): # This should only get called if the subclassed object diff --git a/yt/data_objects/api.py b/yt/data_objects/api.py index ef26bf66220..003137181b6 100644 --- a/yt/data_objects/api.py +++ b/yt/data_objects/api.py @@ -1,18 +1,3 @@ -""" -API for yt.data_objects - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .grid_patch import \ AMRGridPatch diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index c231306b7a4..e229ecea516 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -1,19 +1,3 @@ -""" -Data containers that require processing before they can be utilized. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from functools import wraps import fileinput @@ -58,12 +42,21 @@ from yt.units.unit_object import Unit from yt.units.yt_array import uconcatenate import yt.geometry.particle_deposit as particle_deposit +from yt.geometry.coordinates.cartesian_coordinates import all_data from yt.utilities.grid_data_format.writer import write_to_gdf from yt.fields.field_exceptions import \ NeedsOriginalGrid from yt.frontends.stream.api import load_uniform_grid +from yt.frontends.sph.data_structures import ParticleDataset from yt.units.yt_array import YTArray -import yt.extern.six as six +from yt.utilities.lib.pixelization_routines import \ + pixelize_sph_kernel_arbitrary_grid, \ + interpolate_sph_grid_gather, \ + interpolate_sph_positions_gather, \ + normalization_3d_utility, \ + normalization_1d_utility +from yt.extern.tqdm import tqdm +from yt.utilities.lib.cyoctree import CyOctree class YTStreamline(YTSelectionContainer1D): """ @@ -158,74 +151,15 @@ def _get_cut_mask(self, grid): self._ts[grid.id] = ts return mask - -class YTQuadTreeProj(YTSelectionContainer2D): - """ - This is a data object corresponding to a line integral through the - simulation domain. - - This object is typically accessed through the `proj` object that - hangs off of index objects. YTQuadTreeProj is a projection of a - `field` along an `axis`. The field can have an associated - `weight_field`, in which case the values are multiplied by a weight - before being summed, and then divided by the sum of that weight; the - two fundamental modes of operating are direct line integral (no - weighting) and average along a line of sight (weighting.) What makes - `proj` different from the standard projection mechanism is that it - utilizes a quadtree data structure, rather than the old mechanism for - projections. It will not run in parallel, but serial runs should be - substantially faster. Note also that lines of sight are integrated at - every projected finest-level cell. - - Parameters - ---------- - field : string - This is the field which will be "projected" along the axis. If - multiple are specified (in a list) they will all be projected in - the first pass. - axis : int - The axis along which to slice. Can be 0, 1, or 2 for x, y, z. - weight_field : string - If supplied, the field being projected will be multiplied by this - weight value before being integrated, and at the conclusion of the - projection the resultant values will be divided by the projected - `weight_field`. - center : array_like, optional - The 'center' supplied to fields that use it. Note that this does - not have to have `coord` as one value. Strictly optional. - data_source : `yt.data_objects.data_containers.YTSelectionContainer`, optional - If specified, this will be the data source used for selecting - regions to project. - method : string, optional - The method of projection to be performed. - "integrate" : integration along the axis - "mip" : maximum intensity projection - "sum" : same as "integrate", except that we don't multiply by the path length - WARNING: The "sum" option should only be used for uniform resolution grid - datasets, as other datasets may result in unphysical images. - style : string, optional - The same as the method keyword. Deprecated as of version 3.0.2. - Please use method keyword instead. - field_parameters : dict of items - Values to be passed as field parameters that can be - accessed by generated fields. - - Examples - -------- - - >>> ds = load("RedshiftOutput0005") - >>> prj = ds.proj("density", 0) - >>> print proj["density"] - """ +class YTProj(YTSelectionContainer2D): _key_fields = YTSelectionContainer2D._key_fields + ['weight_field'] - _type_name = "proj" _con_args = ('axis', 'field', 'weight_field') _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field') - def __init__(self, field, axis, weight_field = None, - center = None, ds = None, data_source = None, - style = None, method = "integrate", - field_parameters = None, max_level = None): - YTSelectionContainer2D.__init__(self, axis, ds, field_parameters) + + def __init__(self, field, axis, weight_field=None, center=None, ds=None, + data_source=None, style=None, method="integrate", + field_parameters=None, max_level=None): + super(YTProj, self).__init__(axis, ds, field_parameters) # Style is deprecated, but if it is set, then it trumps method # keyword. TODO: Remove this keyword and this check at some point in # the future. @@ -264,11 +198,8 @@ def __init__(self, field, axis, weight_field = None, for f in field: nodal_flag = self.ds._get_field_info(f).nodal_flag if any(nodal_flag): - raise RuntimeError("Nodal fields are currently not supported for projections.") - - if not self.deserialize(field): - self.get_data(field) - self.serialize() + raise RuntimeError("Nodal fields are currently not supported " + "for projections.") @property def blocks(self): @@ -276,56 +207,16 @@ def blocks(self): @property def field(self): - return [k for k in self.field_data.keys() if k not in self._container_fields] - - @property - def _mrep(self): - return MinimalProjectionData(self) - - def hub_upload(self): - self._mrep.upload() - - def deserialize(self, fields): - if not ytcfg.getboolean("yt", "serialize"): - return False - for field in fields: - self[field] = None - deserialized_successfully = False - store_file = self.ds.parameter_filename + '.yt' - if os.path.isfile(store_file): - deserialized_successfully = self._mrep.restore(store_file, self.ds) - - if deserialized_successfully: - mylog.info("Using previous projection data from %s" % store_file) - for field, field_data in self._mrep.field_data.items(): - self[field] = field_data - if not deserialized_successfully: - for field in fields: - del self[field] - return deserialized_successfully - - def serialize(self): - if not ytcfg.getboolean("yt", "serialize"): - return - self._mrep.store(self.ds.parameter_filename + '.yt') - - def _get_tree(self, nvals): - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - xd = self.ds.domain_dimensions[xax] - yd = self.ds.domain_dimensions[yax] - bounds = (self.ds.domain_left_edge[xax], - self.ds.domain_right_edge[xax], - self.ds.domain_left_edge[yax], - self.ds.domain_right_edge[yax]) - return QuadTree(np.array([xd,yd], dtype='int64'), nvals, - bounds, method = self.method) + return [k for k in self.field_data.keys() if k not in + self._container_fields] def get_data(self, fields = None): fields = fields or [] fields = self._determine_fields(ensure_list(fields)) # We need a new tree for every single set of fields we add if len(fields) == 0: return + if isinstance(self.ds, ParticleDataset): + return tree = self._get_tree(len(fields)) # This only needs to be done if we are in parallel; otherwise, we can # safely build the mesh as we go. @@ -336,8 +227,6 @@ def get_data(self, fields = None): with self.data_source._field_parameter_state(self.field_parameters): for chunk in parallel_objects(self.data_source.chunks( [], "io", local_only = True)): - mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", - chunk.ires.size, get_memory_usage()/1024.) if not _units_initialized: self._initialize_projected_units(fields, chunk) _units_initialized = True @@ -397,23 +286,47 @@ def get_data(self, fields = None): mylog.info("Projection completed") self.tree = tree - def _initialize_chunk(self, chunk, tree): - icoords = chunk.icoords - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - i1 = icoords[:,xax] - i2 = icoords[:,yax] - ilevel = chunk.ires * self.ds.ires_factor - tree.initialize_chunk(i1, i2, ilevel) + def to_pw(self, fields=None, center='c', width=None, origin='center-window'): + r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this + object. + + This is a bare-bones mechanism of creating a plot window from this + object, which can then be moved around, zoomed, and on and on. All + behavior of the plot window is relegated to that routine. + """ + pw = self._get_pw(fields, center, width, origin, 'Projection') + return pw + + def plot(self, fields=None): + if hasattr(self.data_source, "left_edge") and \ + hasattr(self.data_source, "right_edge"): + left_edge = self.data_source.left_edge + right_edge = self.data_source.right_edge + center = (left_edge + right_edge)/2.0 + width = right_edge - left_edge + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + lx, rx = left_edge[xax], right_edge[xax] + ly, ry = left_edge[yax], right_edge[yax] + width = (rx-lx), (ry-ly) + else: + width = self.ds.domain_width + center = self.ds.domain_center + pw = self._get_pw(fields, center, width, 'native', 'Projection') + pw.show() + return pw def _initialize_projected_units(self, fields, chunk): for field in self.data_source._determine_fields(fields): + if field in self._projected_units: + continue finfo = self.ds._get_field_info(*field) if finfo.units is None: # First time calling a units="auto" field, infer units and cache # for future field accesses. finfo.units = str(chunk[field].units) - field_unit = Unit(finfo.units, registry=self.ds.unit_registry) + field_unit = Unit(finfo.output_units, + registry=self.ds.unit_registry) if self.method == "mip" or self._sum_only: path_length_unit = Unit(registry=self.ds.unit_registry) else: @@ -432,11 +345,151 @@ def _initialize_projected_units(self, fields, chunk): else: self._projected_units[field] = field_unit +class YTParticleProj(YTProj): + """ + A projection operation optimized for SPH particle data. + """ + _type_name = "particle_proj" + def __init__(self, field, axis, weight_field=None, center=None, ds=None, + data_source=None, style=None, method="integrate", + field_parameters=None, max_level=None): + super(YTParticleProj, self).__init__( + field, axis, weight_field, center, ds, data_source, style, method, + field_parameters, max_level) + + def _handle_chunk(self, chunk, fields, tree): + raise NotImplementedError("Particle projections have not yet been " + "implemented") + + +class YTQuadTreeProj(YTProj): + """ + This is a data object corresponding to a line integral through the + simulation domain. + + This object is typically accessed through the `proj` object that + hangs off of index objects. YTQuadTreeProj is a projection of a + `field` along an `axis`. The field can have an associated + `weight_field`, in which case the values are multiplied by a weight + before being summed, and then divided by the sum of that weight; the + two fundamental modes of operating are direct line integral (no + weighting) and average along a line of sight (weighting.) What makes + `proj` different from the standard projection mechanism is that it + utilizes a quadtree data structure, rather than the old mechanism for + projections. It will not run in parallel, but serial runs should be + substantially faster. Note also that lines of sight are integrated at + every projected finest-level cell. + + Parameters + ---------- + field : string + This is the field which will be "projected" along the axis. If + multiple are specified (in a list) they will all be projected in + the first pass. + axis : int + The axis along which to slice. Can be 0, 1, or 2 for x, y, z. + weight_field : string + If supplied, the field being projected will be multiplied by this + weight value before being integrated, and at the conclusion of the + projection the resultant values will be divided by the projected + `weight_field`. + center : array_like, optional + The 'center' supplied to fields that use it. Note that this does + not have to have `coord` as one value. Strictly optional. + data_source : `yt.data_objects.data_containers.YTSelectionContainer`, optional + If specified, this will be the data source used for selecting + regions to project. + method : string, optional + The method of projection to be performed. + "integrate" : integration along the axis + "mip" : maximum intensity projection + "sum" : same as "integrate", except that we don't multiply by the path length + WARNING: The "sum" option should only be used for uniform resolution grid + datasets, as other datasets may result in unphysical images. + style : string, optional + The same as the method keyword. Deprecated as of version 3.0.2. + Please use method keyword instead. + field_parameters : dict of items + Values to be passed as field parameters that can be + accessed by generated fields. + + Examples + -------- + + >>> ds = load("RedshiftOutput0005") + >>> prj = ds.proj("density", 0) + >>> print(proj["density"]) + """ + _type_name = "quad_proj" + def __init__(self, field, axis, weight_field=None, center=None, ds=None, + data_source=None, style=None, method="integrate", + field_parameters=None, max_level=None): + super(YTQuadTreeProj, self).__init__( + field, axis, weight_field, center, ds, data_source, style, method, + field_parameters, max_level) + + if not self.deserialize(field): + self.get_data(field) + self.serialize() + + @property + def _mrep(self): + return MinimalProjectionData(self) + + def hub_upload(self): + self._mrep.upload() + + def deserialize(self, fields): + if not ytcfg.getboolean("yt", "serialize"): + return False + for field in fields: + self[field] = None + deserialized_successfully = False + store_file = self.ds.parameter_filename + '.yt' + if os.path.isfile(store_file): + deserialized_successfully = self._mrep.restore(store_file, self.ds) + + if deserialized_successfully: + mylog.info("Using previous projection data from %s" % store_file) + for field, field_data in self._mrep.field_data.items(): + self[field] = field_data + if not deserialized_successfully: + for field in fields: + del self[field] + return deserialized_successfully + + def serialize(self): + if not ytcfg.getboolean("yt", "serialize"): + return + self._mrep.store(self.ds.parameter_filename + '.yt') + + def _get_tree(self, nvals): + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + xd = self.ds.domain_dimensions[xax] + yd = self.ds.domain_dimensions[yax] + bounds = (self.ds.domain_left_edge[xax], + self.ds.domain_right_edge[xax], + self.ds.domain_left_edge[yax], + self.ds.domain_right_edge[yax]) + return QuadTree(np.array([xd,yd], dtype='int64'), nvals, + bounds, method = self.method) + + def _initialize_chunk(self, chunk, tree): + icoords = chunk.icoords + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + i1 = icoords[:,xax] + i2 = icoords[:,yax] + ilevel = chunk.ires * self.ds.ires_factor + tree.initialize_chunk(i1, i2, ilevel) + def _handle_chunk(self, chunk, fields, tree): + mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", + chunk.ires.size, get_memory_usage()/1024.) if self.method == "mip" or self._sum_only: dl = self.ds.quan(1.0, "") else: - # This gets explicitly converted to cm ax_name = self.ds.coordinates.axis_name[self.axis] dl = chunk["index", "path_element_%s" % (ax_name)] # This is done for cases where our path element does not have a CGS @@ -463,36 +516,6 @@ def _handle_chunk(self, chunk, fields, tree): ilevel = chunk.ires * self.ds.ires_factor tree.add_chunk_to_tree(i1, i2, ilevel, v, w) - def to_pw(self, fields=None, center='c', width=None, origin='center-window'): - r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this - object. - - This is a bare-bones mechanism of creating a plot window from this - object, which can then be moved around, zoomed, and on and on. All - behavior of the plot window is relegated to that routine. - """ - pw = self._get_pw(fields, center, width, origin, 'Projection') - return pw - - def plot(self, fields=None): - if hasattr(self.data_source, "left_edge") and \ - hasattr(self.data_source, "right_edge"): - left_edge = self.data_source.left_edge - right_edge = self.data_source.right_edge - center = (left_edge + right_edge)/2.0 - width = right_edge - left_edge - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - lx, rx = left_edge[xax], right_edge[xax] - ly, ry = left_edge[yax], right_edge[yax] - width = (rx-lx), (ry-ly) - else: - width = self.ds.domain_width - center = self.ds.domain_center - pw = self._get_pw(fields, center, width, 'native', 'Projection') - pw.show() - return pw - class YTCoveringGrid(YTSelectionContainer3D): """A 3D region with all data extracted to a single, specified resolution. Left edge should align with a cell boundary, but @@ -651,7 +674,7 @@ def _sanitize_edge(self, edge): edge_units.registry = self.ds.unit_registry else: edge_units = 'code_length' - return self.ds.arr(edge, edge_units) + return self.ds.arr(edge, edge_units, dtype='float64') def _reshape_vals(self, arr): if len(arr.shape) == 3: return arr @@ -687,20 +710,36 @@ def get_data(self, fields = None): "with nonzero num_ghost_zones." % self._num_ghost_zones) else: raise - if len(part) > 0: self._fill_particles(part) + + # checking if we have a sph particles + if len(part) == 0: + is_sph_field = False + else: + is_sph_field = self.ds.field_info[part[0]].is_sph_field + + if len(part) > 0 and len(alias) == 0: + if(is_sph_field): + self._fill_sph_particles(fields) + for field in fields: + if field in gen: + gen.remove(field) + else: + self._fill_particles(part) + if len(fill) > 0: self._fill_fields(fill) for a, f in sorted(alias.items()): - if f.particle_type: + if f.sampling_type == 'particle' and not is_sph_field: self[a] = self._data_source[f] else: self[a] = f(self) self.field_data[a].convert_to_units(f.output_units) + if len(gen) > 0: part_gen = [] cell_gen = [] for field in gen: finfo = self.ds.field_info[field] - if finfo.particle_type: + if finfo.sampling_type == 'particle': part_gen.append(field) else: cell_gen.append(field) @@ -723,7 +762,7 @@ def _split_fields(self, fields_to_get): fill.append(field) for field in fill: finfo = self.ds._get_field_info(*field) - if finfo.particle_type: + if finfo.sampling_type == "particle": particles.append(field) gen = [f for f in gen if f not in fill and f not in alias] fill = [f for f in fill if f not in particles] @@ -733,6 +772,72 @@ def _fill_particles(self, part): for p in part: self[p] = self._data_source[p] + def _fill_sph_particles(self, fields): + # checks that we have the field and gets information + fields = [f for f in fields if f not in self.field_data] + if len(fields) == 0: return + + smoothing_style = getattr(self.ds, 'sph_smoothing_style', 'scatter') + normalize = getattr(self.ds, 'use_sph_normalization', True) + + bounds, size = self._get_grid_bounds_size() + + if smoothing_style == "scatter": + for field in fields: + fi = self.ds._get_field_info(field) + ptype = fi.name[0] + if ptype not in self.ds._sph_ptypes: + raise KeyError("%s is not a SPH particle type!" % ptype) + buff = np.zeros(size, dtype="float64") + if normalize: + buff_den = np.zeros(size, dtype="float64") + + pbar = tqdm(desc="Interpolating SPH field {}".format(field)) + for chunk in self._data_source.chunks([field],"io"): + px = chunk[(ptype,'particle_position_x')].in_base("code").d + py = chunk[(ptype,'particle_position_y')].in_base("code").d + pz = chunk[(ptype,'particle_position_z')].in_base("code").d + hsml = chunk[(ptype,'smoothing_length')].in_base("code").d + mass = chunk[(ptype,'particle_mass')].in_base("code").d + dens = chunk[(ptype,'density')].in_base("code").d + field_quantity = chunk[field].d + + pixelize_sph_kernel_arbitrary_grid(buff, px, py, pz, hsml, + mass, dens, field_quantity, bounds, + pbar=pbar) + if normalize: + pixelize_sph_kernel_arbitrary_grid(buff_den, px, py, pz, + hsml, mass, dens, np.ones(dens.shape[0]), + bounds, pbar=pbar) + + if normalize: + normalization_3d_utility(buff, buff_den) + + self[field] = self.ds.arr(buff, fi.units) + pbar.close() + + if(smoothing_style == "gather"): + num_neighbors = getattr(self.ds, 'num_neighbors', 32) + for field in fields: + buff = np.zeros(size, dtype="float64") + + fields_to_get = ['particle_position', 'density', 'particle_mass', + 'smoothing_length', field[1]] + all_fields = all_data(self.ds, field[0], fields_to_get, kdtree=True) + + fi = self.ds._get_field_info(field) + interpolate_sph_grid_gather(buff, all_fields['particle_position'], + bounds, + all_fields['smoothing_length'], + all_fields['particle_mass'], + all_fields['density'], + all_fields[field[1]].in_units(fi.units), + self.ds.index.kdtree, + use_normalization=normalize, + num_neigh=num_neighbors) + + self[field] = self.ds.arr(buff, fi.units) + def _fill_fields(self, fields): fields = [f for f in fields if f not in self.field_data] if len(fields) == 0: return @@ -857,6 +962,43 @@ def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None, sim_time=self.ds.current_time.v) write_to_gdf(ds, gdf_path, **kwargs) + def _get_grid_bounds_size(self): + dd = self.ds.domain_width / 2**self.level + bounds = np.zeros(6, dtype=float) + + bounds[0] = self.left_edge[0].in_base("code") + bounds[1] = bounds[0] + dd[0].d * self.ActiveDimensions[0] + bounds[2] = self.left_edge[1].in_base("code") + bounds[3] = bounds[2] + dd[1].d * self.ActiveDimensions[1] + bounds[4] = self.left_edge[2].in_base("code") + bounds[5] = bounds[4] + dd[2].d * self.ActiveDimensions[2] + size = np.ones(3, dtype=int) * 2**self.level + + return bounds, size + + def to_fits_data(self, fields, length_unit=None): + r"""Export a set of gridded fields to a FITS file. + + This will export a set of FITS images of either the fields specified + or all the fields already in the object. + + Parameters + ---------- + fields : list of strings + These fields will be pixelized and output. If "None", the keys of the + FRB will be used. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. + """ + from yt.visualization.fits_image import FITSImageData + if length_unit is None: + length_unit = self.ds.length_unit + fields = ensure_list(fields) + fid = FITSImageData(self, fields, length_unit=length_unit) + return fid + + class YTArbitraryGrid(YTCoveringGrid): """A 3D region with arbitrary bounds and dimensions. @@ -919,6 +1061,17 @@ def _fill_fields(self, fields): fi = self.ds._get_field_info(field) self[field] = self.ds.arr(dest, fi.units) + def _get_grid_bounds_size(self): + bounds = np.empty(6, dtype=float) + bounds[0] = self.left_edge[0].in_base("code") + bounds[2] = self.left_edge[1].in_base("code") + bounds[4] = self.left_edge[2].in_base("code") + bounds[1] = self.right_edge[0].in_base("code") + bounds[3] = self.right_edge[1].in_base("code") + bounds[5] = self.right_edge[2].in_base("code") + size = self.ActiveDimensions + + return bounds, size class LevelState(object): current_dx = None @@ -1170,8 +1323,8 @@ class YTSurface(YTSelectionContainer3D): >>> from yt.units import kpc >>> sp = ds.sphere("max", (10, "kpc") >>> surf = ds.surface(sp, "density", 5e-27) - >>> print surf["temperature"] - >>> print surf.vertices + >>> print(surf["temperature"]) + >>> print(surf.vertices) >>> bounds = [(sp.center[i] - 5.0*kpc, ... sp.center[i] + 5.0*kpc) for i in range(3)] >>> surf.export_ply("my_galaxy.ply", bounds = bounds) @@ -1805,8 +1958,8 @@ def export_ply(self, filename, bounds = None, color_field = None, >>> from yt.units import kpc >>> sp = ds.sphere("max", (10, "kpc") >>> surf = ds.surface(sp, "density", 5e-27) - >>> print surf["temperature"] - >>> print surf.vertices + >>> print(surf["temperature"]) + >>> print(surf.vertices) >>> bounds = [(sp.center[i] - 5.0*kpc, ... sp.center[i] + 5.0*kpc) for i in range(3)] >>> surf.export_ply("my_galaxy.ply", bounds = bounds) @@ -1861,7 +2014,7 @@ def _export_ply(self, filename, bounds = None, color_field = None, f.write(b"ply\n") f.write(b"format binary_little_endian 1.0\n") line = "element vertex %i\n" % (nv) - f.write(six.b(line)) + f.write(line.encode("latin-1")) f.write(b"property float x\n") f.write(b"property float y\n") f.write(b"property float z\n") @@ -1875,7 +2028,7 @@ def _export_ply(self, filename, bounds = None, color_field = None, else: v = np.empty(self.vertices.shape[1], dtype=vs[:3]) line = "element face %i\n" % (nv / 3) - f.write(six.b(line)) + f.write(line.encode("latin-1")) f.write(b"property list uchar int vertex_indices\n") if color_field is not None and sample_type == "face": f.write(b"property uchar red\n") @@ -2032,3 +2185,251 @@ def _upload_to_sketchfab(self, data, files): mylog.error("Problem uploading.") return model_uid + +class YTOctree(YTSelectionContainer3D): + """A 3D region with all the data filled into an octree. This container + will mean deposit particle fields onto octs using a kernel and SPH + smoothing. + + Parameters + ---------- + right_edge : array_like + The right edge of the region to be extracted. Specify units by supplying + a YTArray, otherwise code length units are assumed. + left_edge : array_like + The left edge of the region to be extracted. Specify units by supplying + a YTArray, otherwise code length units are assumed. + n_ref: int + This is the maximum number of particles per leaf in the resulting + octree. + over_refine_factor: int + Each leaf has a number of cells, the number of cells is equal to + 2**(3*over_refine_factor) + density_factor: int + This tells the tree that each node must divide into + 2**(3*density_factor) children if it contains more particles than n_ref + ptypes: list + This is the type of particles to include when building the tree. This + will default to all particles + + Examples + -------- + + octree = ds.octree(n_ref=64, over_refine_factor=2) + x_positions_of_cells = octree[('index', 'x')] + y_positions_of_cells = octree[('index', 'y')] + z_positions_of_cells = octree[('index', 'z')] + density_of_gas_in_cells = octree[('gas', 'density')] + + """ + _spatial = True + _type_name = "octree" + _con_args = ('left_edge', 'right_edge', 'n_ref') + _container_fields = (("index", "dx"), + ("index", "dy"), + ("index", "dz"), + ("index", "x"), + ("index", "y"), + ("index", "z")) + def __init__(self, left_edge=None, right_edge=None, n_ref=32, over_refine_factor=1, + density_factor=1, ptypes = None, force_build=False, ds = None, + field_parameters = None): + if field_parameters is None: + center = None + else: + center = field_parameters.get("center", None) + YTSelectionContainer3D.__init__(self, + center, ds, field_parameters) + + self.left_edge = self._sanitize_edge(left_edge, ds.domain_left_edge) + self.right_edge = self._sanitize_edge(right_edge, ds.domain_right_edge) + self.n_ref = n_ref + self.density_factor = density_factor + self.over_refine_factor = over_refine_factor + self.ptypes = self._sanitize_ptypes(ptypes) + + self._setup_data_source() + self.tree + + def __eq__(self, other): + return self.tree == other.tree + + def _generate_tree(self, fname = None): + positions = [] + for ptype in self.ptypes: + positions.append(self._data_source[(ptype, + 'particle_position')].in_units("code_length").d) + positions = np.concatenate(positions) + + if positions == []: + self._octree = None + return + + mylog.info('Allocating Octree for %s particles' % positions.shape[0]) + self.loaded = False + self._octree = CyOctree( + positions.astype('float64', copy=False), + left_edge=self.ds.domain_left_edge.in_units("code_length"), + right_edge=self.ds.domain_right_edge.in_units("code_length"), + n_ref=self.n_ref, + over_refine_factor=self.over_refine_factor, + density_factor=self.density_factor, + data_version=self.ds._file_hash + ) + + if fname is not None: + mylog.info('Saving octree to file %s' % os.path.basename(fname)) + self._octree.save(fname) + + @property + def tree(self): + self.ds.index + + # Chose _octree as _tree seems to be used + if hasattr(self, '_octree'): + return self._octree + + ds = self.ds + if getattr(ds, 'tree_filename', None) is None: + if os.path.exists(ds.parameter_filename): + fname = ds.parameter_filename + ".octree" + else: + # we don't want to write to disk for in-memory data + fname = None + else: + fname = ds.tree_filename + + if fname is None: + self._generate_tree(fname) + elif not os.path.exists(fname): + self._generate_tree(fname) + else: + self.loaded = True + mylog.info('Loading octree from %s' % os.path.basename(fname)) + self._octree = CyOctree() + self._octree.load(fname) + if self._octree.data_version != self.ds._file_hash: + mylog.info('Detected hash mismatch, regenerating Octree') + self._generate_tree(fname) + + pos = ds.arr(self._octree.cell_positions, "code_length") + self[('index', 'coordinates')] = pos + self[('index', 'x')] = pos[:, 0] + self[('index', 'y')] = pos[:, 1] + self[('index', 'z')] = pos[:, 2] + + return self._octree + + def _sanitize_ptypes(self, ptypes): + if ptypes is None: + return ['all'] + + if not isinstance(ptypes, list): + ptypes = [ptypes] + + self.ds.index + for ptype in ptypes: + if ptype not in self.ds.particle_types: + mess = "{} not found. Particle type must ".format(ptype) + mess += "be in the dataset!" + raise TypeError(mess) + + return ptypes + + def _setup_data_source(self): + self._data_source = self.ds.region( + self.center, self.left_edge, self.right_edge) + + def _sanitize_edge(self, edge, default): + if edge is None: + return default.copy() + if not iterable(edge): + edge = [edge]*len(self.ds.domain_left_edge) + if len(edge) != len(self.ds.domain_left_edge): + raise RuntimeError( + "Length of edges must match the dimensionality of the " + "dataset") + if hasattr(edge, 'units'): + edge_units = edge.units + else: + edge_units = 'code_length' + return self.ds.arr(edge, edge_units) + + def get_data(self, fields = None): + if fields is None: return + + # not sure on the best way to do this + if isinstance(fields, list) and len(fields) > 1: + for field in fields: self.get_data(field) + return + elif isinstance(fields, list): + fields = fields[0] + + sph_ptypes = getattr(self.ds, '_sph_ptypes', 'None') + if fields[0] in sph_ptypes: + smoothing_style = getattr(self.ds, 'sph_smoothing_style', 'scatter') + normalize = getattr(self.ds, 'use_sph_normalization', True) + + units = self.ds._get_field_info(fields).units + if smoothing_style == "scatter": + self.scatter_smooth(fields, units, normalize) + else: + self.gather_smooth(fields, units, normalize) + elif fields[0] == 'index': + return self[fields] + else: + raise NotImplementedError + + def gather_smooth(self, fields, units, normalize): + buff = np.zeros(self[('index', 'x')].shape[0], dtype="float64") + + num_neighbors = getattr(self.ds, 'num_neighbors', 32) + + # for the gather approach we load up all of the data, this like other + # gather approaches is not memory conservative and with spatial chunking + # this can be fixed + fields_to_get = ['particle_position', 'density', 'particle_mass', + 'smoothing_length', fields[1]] + all_fields = all_data(self.ds, fields[0], fields_to_get, kdtree=True) + + interpolate_sph_positions_gather(buff, all_fields['particle_position'], + self._octree.cell_positions, + all_fields['smoothing_length'], + all_fields['particle_mass'], + all_fields['density'], + all_fields[fields[1]].in_units(units), + self.ds.index.kdtree, + use_normalization=normalize, + num_neigh=num_neighbors) + + self[fields] = self.ds.arr(buff, units) + + def scatter_smooth(self, fields, units, normalize): + buff = np.zeros(self[('index', 'x')].shape[0], dtype="float64") + + if normalize: + buff_den = np.zeros(buff.shape[0], dtype="float64") + else: + buff_den = np.empty(0) + + ptype = fields[0] + pbar = tqdm(desc="Interpolating (scatter) SPH field {}".format(fields[0])) + for chunk in self._data_source.chunks([fields], "io"): + px = chunk[(ptype,'particle_position_x')].in_base("code").d + py = chunk[(ptype,'particle_position_y')].in_base("code").d + pz = chunk[(ptype,'particle_position_z')].in_base("code").d + hsml = chunk[(ptype,'smoothing_length')].in_base("code").d + pmass = chunk[(ptype,'particle_mass')].in_base("code").d + pdens = chunk[(ptype,'density')].in_base("code").d + field_quantity = chunk[fields].in_base("code").d + + self.tree.interpolate_sph_cells(buff, buff_den, px, py, pz, pmass, + pdens, hsml, field_quantity, + use_normalization=normalize) + pbar.update(1) + pbar.close() + + if normalize: + normalization_1d_utility(buff, buff_den) + + self[fields] = self.ds.arr(buff, units) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 7fedbe9afc0..8ebfe3988fd 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -1,18 +1,3 @@ -""" -Various non-grid data containers. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import itertools import uuid @@ -33,14 +18,16 @@ mylog, \ ensure_list, \ fix_axis, \ - iterable, validate_width_tuple -from yt.units.unit_object import UnitParseError + iterable, \ + validate_width_tuple from yt.units.yt_array import \ YTArray, \ YTQuantity import yt.units.dimensions as ytdims +from unyt.exceptions import \ + UnitConversionError, \ + UnitParseError from yt.utilities.exceptions import \ - YTUnitConversionError, \ YTFieldUnitError, \ YTFieldUnitParseError, \ YTSpatialFieldUnitError, \ @@ -50,6 +37,7 @@ YTFieldTypeNotFound, \ YTDataSelectorNotImplemented, \ YTDimensionalityError, \ + YTNonIndexedDataContainer, \ YTBooleanObjectError, \ YTBooleanObjectsWrongDataset, YTException from yt.utilities.lib.marching_cubes import \ @@ -66,7 +54,7 @@ import yt.geometry.selection_routines from yt.geometry.selection_routines import \ compose_selector -from yt.extern.six import add_metaclass, string_types +from yt.units.yt_array import uconcatenate from yt.data_objects.field_data import YTFieldData from yt.data_objects.profiles import create_profile @@ -75,8 +63,12 @@ def sanitize_weight_field(ds, field, weight): field_object = ds._get_field_info(field) if weight is None: - if field_object.particle_type: - weight_field = (field_object.name[0], 'particle_ones') + if field_object.sampling_type == "particle": + if field_object.name[0] == "gas": + ptype = ds._sph_ptypes[0] + else: + ptype = field_object.name[0] + weight_field = (ptype, 'particle_ones') else: weight_field = ('index', 'ones') else: @@ -87,10 +79,10 @@ class RegisteredDataContainer(type): def __init__(cls, name, b, d): type.__init__(cls, name, b, d) if hasattr(cls, "_type_name") and not cls._skip_add: - data_object_registry[cls._type_name] = cls + name = getattr(cls, "_override_selector_name", cls._type_name) + data_object_registry[name] = cls -@add_metaclass(RegisteredDataContainer) -class YTDataContainer(object): +class YTDataContainer(metaclass = RegisteredDataContainer): """ Generic YTDataContainer container. By itself, will attempt to generate field, read fields (method defined by derived classes) @@ -127,10 +119,14 @@ def __init__(self, ds, field_parameters): self.ds.objects.append(weakref.proxy(self)) mylog.debug("Appending object to %s (type: %s)", self.ds, type(self)) self.field_data = YTFieldData() + if self.ds.unit_system.has_current_mks: + mag_unit = "T" + else: + mag_unit = "G" self._default_field_parameters = { 'center': self.ds.arr(np.zeros(3, dtype='float64'), 'cm'), 'bulk_velocity': self.ds.arr(np.zeros(3, dtype='float64'), 'cm/s'), - 'bulk_magnetic_field': self.ds.arr(np.zeros(3, dtype='float64'), 'G'), + 'bulk_magnetic_field': self.ds.arr(np.zeros(3, dtype='float64'), mag_unit), 'normal': self.ds.arr([0.0, 0.0, 1.0], ''), } if field_parameters is None: field_parameters = {} @@ -170,22 +166,26 @@ def _is_default_field_parameter(self, parameter): self.field_parameters[parameter] def apply_units(self, arr, units): - return self.ds.arr(arr, input_units = units) + try: + arr.units.registry = self.ds.unit_registry + return arr.to(units) + except AttributeError: + return self.ds.arr(arr, units=units) def _set_center(self, center): if center is None: self.center = None return elif isinstance(center, YTArray): - self.center = self.ds.arr(center.copy()) + self.center = self.ds.arr(center.astype('float64')) self.center.convert_to_units('code_length') elif isinstance(center, (list, tuple, np.ndarray)): if isinstance(center[0], YTQuantity): - self.center = self.ds.arr([c.copy() for c in center]) + self.center = self.ds.arr([c.copy() for c in center], dtype='float64') self.center.convert_to_units('code_length') else: - self.center = self.ds.arr(center, 'code_length') - elif isinstance(center, string_types): + self.center = self.ds.arr(center, 'code_length', dtype='float64') + elif isinstance(center, str): if center.lower() in ("c", "center"): self.center = self.ds.domain_center # is this dangerous for race conditions? @@ -287,7 +287,7 @@ def _generate_field(self, field): with self._field_type_state(ftype, finfo): if fname in self._container_fields: tr = self._generate_container_field(field) - if finfo.particle_type: # This is a property now + if finfo.sampling_type == "particle": tr = self._generate_particle_field(field) else: tr = self._generate_fluid_field(field) @@ -318,7 +318,14 @@ def _generate_spatial_fluid(self, field, ngz): if finfo.units is None: raise YTSpatialFieldUnitError(field) units = finfo.units - rv = self.ds.arr(np.empty(self.ires.size, dtype="float64"), units) + try: + rv = self.ds.arr(np.zeros(self.ires.size, dtype="float64"), units) + accumulate = False + except YTNonIndexedDataContainer: + # In this case, we'll generate many tiny arrays of unknown size and + # then concatenate them. + outputs = [] + accumulate = True ind = 0 if ngz == 0: deps = self._identify_dependencies([field], spatial = True) @@ -327,6 +334,11 @@ def _generate_spatial_fluid(self, field, ngz): for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0, preload_fields = deps)): o = self._current_chunk.objs[0] + if accumulate: + rv = self.ds.arr(np.empty(o.ires.size, dtype="float64"), + units) + outputs.append(rv) + ind = 0 # Does this work with mesh? with o._activate_cache(): ind += o.select(self.selector, self[field], rv, ind) else: @@ -336,10 +348,19 @@ def _generate_spatial_fluid(self, field, ngz): gz = self._current_chunk.objs[0] gz.field_parameters = self.field_parameters wogz = gz._base_grid - ind += wogz.select( - self.selector, - gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz], - rv, ind) + if accumulate: + rv = self.ds.arr(np.empty(wogz.ires.size, + dtype="float64"), units) + outputs.append(rv) + if gz._type_name == 'octree_subset': + raise NotImplementedError + else: + ind += wogz.select( + self.selector, + gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz], + rv, ind) + if accumulate: + rv = uconcatenate(outputs) return rv def _generate_particle_field(self, field): @@ -401,6 +422,7 @@ def _parameter_iterate(self, seq): obj.field_parameters = old_fp _key_fields = None + def write_out(self, filename, fields=None, format="%0.16e"): """Write out the YTDataContainer object in a text file. @@ -609,7 +631,7 @@ def save_as_dataset(self, filename=None, fields=None): if field in self._container_fields: ftypes[field] = "grid" need_grid_positions = True - elif self.ds.field_info[field].particle_type: + elif self.ds.field_info[field].sampling_type == "particle": if field[0] not in ptypes: ptypes.append(field[0]) ftypes[field] = field[0] @@ -618,7 +640,7 @@ def save_as_dataset(self, filename=None, fields=None): ftypes[field] = "grid" need_grid_positions = True # projections and slices use px and py, so don't need positions - if self._type_name in ["cutting", "proj", "slice"]: + if self._type_name in ["cutting", "proj", "slice", "quad_proj"]: need_grid_positions = False if need_particle_positions: @@ -644,7 +666,7 @@ def save_as_dataset(self, filename=None, fields=None): extra_attrs = dict([(arg, getattr(self, arg, None)) for arg in self._con_args + self._tds_attrs]) - extra_attrs["con_args"] = self._con_args + extra_attrs["con_args"] = repr(self._con_args) extra_attrs["data_type"] = "yt_data_container" extra_attrs["container_type"] = self._type_name extra_attrs["dimensionality"] = self._dimensionality @@ -815,7 +837,7 @@ def create_firefly_object( ## you must have velocities (and they must be named "Velocities") tracked_arrays = [ - self[ptype,'relative_particle_velocity'].convert_to_units(velocity_units)] + self[ptype,'relative_particle_velocity'].in_units(velocity_units)] tracked_names = ['Velocities'] ## explicitly go after the fields we want @@ -834,7 +856,7 @@ def create_firefly_object( ## perform the unit conversion and take the log if ## necessary. - this_field_array.convert_to_units(units) + this_field_array.in_units(units) if log_flag: this_field_array = np.log10(this_field_array) @@ -851,7 +873,7 @@ def create_firefly_object( ## create a firefly ParticleGroup for this particle type pg = ParticleGroup( UIname = ptype, - coordinates=self[ptype,'relative_particle_position'].convert_to_units(coordinate_units), + coordinates=self[ptype,'relative_particle_position'].in_units(coordinate_units), tracked_arrays=tracked_arrays, tracked_names=tracked_names, tracked_filter_flags=tracked_filter_flags, @@ -899,7 +921,7 @@ def argmax(self, field, axis=None): if axis is None: mv, pos0, pos1, pos2 = self.quantities.max_location(field) return pos0, pos1, pos2 - if isinstance(axis, string_types): + if isinstance(axis, str): axis = [axis] rv = self.quantities.sample_at_max_field_values(field, axis) if len(rv) == 2: @@ -941,7 +963,7 @@ def argmin(self, field, axis=None): if axis is None: mv, pos0, pos1, pos2 = self.quantities.min_location(field) return pos0, pos1, pos2 - if isinstance(axis, string_types): + if isinstance(axis, str): axis = [axis] rv = self.quantities.sample_at_min_field_values(field, axis) if len(rv) == 2: @@ -1301,9 +1323,9 @@ def clone(self): >>> sp = ds.sphere("c", 0.1) >>> sp_clone = sp.clone() >>> sp["density"] - >>> print sp.field_data.keys() + >>> print(sp.field_data.keys()) [("gas", "density")] - >>> print sp_clone.field_data.keys() + >>> print(sp_clone.field_data.keys()) [] """ args = self.__reduce__() @@ -1337,7 +1359,8 @@ def _field_type_state(self, ftype, finfo, obj = None): if obj is None: obj = self old_particle_type = obj._current_particle_type old_fluid_type = obj._current_fluid_type - if finfo.particle_type: + fluid_types = self.ds.fluid_types + if finfo.sampling_type == "particle" and ftype not in fluid_types: obj._current_particle_type = ftype else: obj._current_fluid_type = ftype @@ -1354,8 +1377,8 @@ def _determine_fields(self, fields): continue if isinstance(field, tuple): if len(field) != 2 or \ - not isinstance(field[0], string_types) or \ - not isinstance(field[1], string_types): + not isinstance(field[0], str) or \ + not isinstance(field[1], str): raise YTFieldNotParseable(field) ftype, fname = field finfo = self.ds._get_field_info(ftype, fname) @@ -1365,8 +1388,14 @@ def _determine_fields(self, fields): else: fname = field finfo = self.ds._get_field_info("unknown", fname) - if finfo.particle_type: + if finfo.sampling_type == "particle": ftype = self._current_particle_type + if hasattr(self.ds, '_sph_ptypes'): + ptypes = self.ds._sph_ptypes + if finfo.name[0] in ptypes: + ftype = finfo.name[0] + elif finfo.alias_field and finfo.alias_name[0] in ptypes: + ftype = self._current_fluid_type else: ftype = self._current_fluid_type if (ftype, fname) not in self.ds.field_info: @@ -1384,9 +1413,13 @@ def _determine_fields(self, fields): # these tests are really insufficient as a field type may be valid, and the # field name may be valid, but not the combination (field type, field name) - if finfo.particle_type and ftype not in self.ds.particle_types: + particle_field = finfo.sampling_type == "particle" + local_field = finfo.local_sampling + if local_field: + pass + elif particle_field and ftype not in self.ds.particle_types: raise YTFieldTypeNotFound(ftype, ds=self.ds) - elif not finfo.particle_type and ftype not in self.ds.fluid_types: + elif not particle_field and ftype not in self.ds.fluid_types: raise YTFieldTypeNotFound(ftype, ds=self.ds) explicit_fields.append((ftype, fname)) return explicit_fields @@ -1435,7 +1468,7 @@ def __init__(self, ds, field_parameters, data_source=None): if data_source is not None: if data_source.ds != self.ds: raise RuntimeError("Attempted to construct a DataContainer with a data_source " - "from a different DataSet", ds, data_source.ds) + "from a different Dataset", ds, data_source.ds) if data_source._dimensionality < self._dimensionality: raise RuntimeError("Attempted to construct a DataContainer with a data_source " "of lower dimensionality (%u vs %u)" % @@ -1455,7 +1488,8 @@ def selector(self): raise YTDataSelectorNotImplemented(self._type_name) if self._data_source is not None: - self._selector = compose_selector(self, self._data_source.selector, sclass(self)) + self._selector = compose_selector( + self, self._data_source.selector, sclass(self)) else: self._selector = sclass(self) return self._selector @@ -1561,7 +1595,7 @@ def get_data(self, fields=None): for ftype, fname in fields_to_get: finfo = self.ds._get_field_info(ftype, fname) finfos[ftype, fname] = finfo - if finfo.particle_type: + if finfo.sampling_type == "particle": particles.append((ftype, fname)) elif (ftype, fname) not in fluids: fluids.append((ftype, fname)) @@ -1571,13 +1605,14 @@ def get_data(self, fields=None): read_fluids, gen_fluids = self.index._read_fluid_fields( fluids, self, self._current_chunk) for f, v in read_fluids.items(): - self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units) + self.field_data[f] = self.ds.arr(v, units = finfos[f].units) self.field_data[f].convert_to_units(finfos[f].output_units) read_particles, gen_particles = self.index._read_particle_fields( particles, self, self._current_chunk) + for f, v in read_particles.items(): - self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units) + self.field_data[f] = self.ds.arr(v, units = finfos[f].units) self.field_data[f].convert_to_units(finfos[f].output_units) fields_to_generate += gen_fluids + gen_particles @@ -1602,6 +1637,8 @@ def _generate_fields(self, fields_to_generate): fi = self.ds._get_field_info(*field) try: fd = self._generate_field(field) + if hasattr(fd, 'units'): + fd.units.registry = self.ds.unit_registry if fd is None: raise RuntimeError if fi.units is None: @@ -1631,7 +1668,7 @@ def _generate_fields(self, fields_to_generate): fd = self.ds.arr(fd, '') if fi.units != '': raise YTFieldUnitError(fi, fd.units) - except YTUnitConversionError: + except UnitConversionError: raise YTFieldUnitError(fi, fd.units) except UnitParseError: raise YTFieldUnitParseError(fi) @@ -1944,14 +1981,14 @@ def to_frb(self, width, resolution, center=None, height=None, if isinstance(w, tuple) and isinstance(u, tuple): height = u w, u = w - width = self.ds.quan(w, input_units = u) + width = self.ds.quan(w, units = u) elif not isinstance(width, YTArray): width = self.ds.quan(width, 'code_length') if height is None: height = width elif iterable(height): h, u = height - height = self.ds.quan(h, input_units = u) + height = self.ds.quan(h, units = u) elif not isinstance(height, YTArray): height = self.ds.quan(height, 'code_length') if not iterable(resolution): @@ -2484,8 +2521,12 @@ def extract_isocontours(self, field, value, filename = None, >>> verts = dd.extract_isocontours("Density", rho, ... "triangles.obj", True) """ + from yt.data_objects.static_output import ParticleDataset + from yt.frontends.stream.data_structures import StreamParticlesDataset verts = [] samples = [] + if isinstance(self.ds, (ParticleDataset, StreamParticlesDataset)): + raise NotImplementedError for block, mask in self.blocks: my_verts = self._extract_isocontours_from_grid( block, mask, field, value, sample_values) @@ -2659,7 +2700,24 @@ def extract_connected_sets(self, field, num_levels, min_val, max_val, {'contour_slices_%s' % contour_key: cids}) return cons, contours + def _get_bbox(self): + """ + Return the bounding box for this data container. + This generic version will return the bounds of the entire domain. + """ + return self.ds.domain_left_edge, self.ds.domain_right_edge + def get_bbox(self): + """ + Return the bounding box for this data container. + """ + if self.ds.geometry != "cartesian": + raise NotImplementedError("get_bbox is currently only implemented " + "for cartesian geometries!") + le, re = self._get_bbox() + le.convert_to_units("code_length") + re.convert_to_units("code_length") + return le, re def volume(self): """ @@ -2715,6 +2773,15 @@ def __init__(self, op, dobj1, dobj2, ds = None, field_parameters = None, sel_cls = getattr(yt.geometry.selection_routines, name) self._selector = sel_cls(self) + def _get_bbox(self): + le1, re1 = self.dobj1._get_bbox() + if self.op == "NOT": + return le1, re1 + else: + le2, re2 = self.dobj2._get_bbox() + return np.minimum(le1, le2), np.maximum(re1, re2) + + # Many of these items are set up specifically to ensure that # we are not breaking old pickle files. This means we must only call the # _reconstruct_object and that we cannot mandate any additional arguments to diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index 7089b833dfe..371b3ad665d 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -1,20 +1,3 @@ -""" -Quantities that can be derived from Enzo data that may also required additional -arguments. (Standard arguments -- such as the center of a distribution of -points -- are excluded here, and left to the EnzoDerivedFields.) - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import \ @@ -26,7 +9,6 @@ from yt.utilities.physical_constants import \ gravitational_constant_cgs from yt.utilities.physical_ratios import HUGE -from yt.extern.six import add_metaclass from yt.utilities.exceptions import \ YTParticleTypeNotFound @@ -34,8 +16,14 @@ def get_position_fields(field, data): axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]] - if field[0] in data.ds.particle_types: - position_fields = [(field[0], 'particle_position_%s' % d) + field = data._determine_fields(field)[0] + finfo = data.ds.field_info[field] + if finfo.sampling_type == 'particle': + if finfo.alias_field: + ftype = finfo.alias_name[0] + else: + ftype = finfo.name[0] + position_fields = [(ftype, 'particle_position_%s' % d) for d in axis_names] else: position_fields = axis_names @@ -48,8 +36,7 @@ def __init__(cls, name, b, d): if name != "DerivedQuantity": derived_quantity_registry[name] = cls -@add_metaclass(RegisteredDerivedQuantity) -class DerivedQuantity(ParallelAnalysisInterface): +class DerivedQuantity(ParallelAnalysisInterface, metaclass = RegisteredDerivedQuantity): num_vals = -1 def __init__(self, data_source): @@ -127,9 +114,9 @@ class WeightedAverageQuantity(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.weighted_average_quantity([("gas", "density"), + >>> print(ad.quantities.weighted_average_quantity([("gas", "density"), ... ("gas", "temperature")], - ... ("gas", "cell_mass")) + ... ("gas", "cell_mass"))) """ def count_values(self, fields, weight): @@ -166,7 +153,7 @@ class TotalQuantity(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.total_quantity([("gas", "cell_mass")]) + >>> print(ad.quantities.total_quantity([("gas", "cell_mass")])) """ def count_values(self, fields): @@ -198,18 +185,18 @@ class TotalMass(TotalQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.total_mass() + >>> print(ad.quantities.total_mass()) """ def __call__(self): self.data_source.ds.index fi = self.data_source.ds.field_info - if ("gas", "cell_mass") in fi: - gas = super(TotalMass, self).__call__([('gas', 'cell_mass')]) + if ("gas", "mass") in fi: + gas = super(TotalMass, self).__call__([('gas', 'mass')]) else: gas = self.data_source.ds.arr([0], 'g') - if ("all", "particle_mass") in fi: - part = super(TotalMass, self).__call__([('all', 'particle_mass')]) + if ("nbody", "particle_mass") in fi: + part = super(TotalMass, self).__call__([('nbody', 'particle_mass')]) else: part = self.data_source.ds.arr([0], 'g') return self.data_source.ds.arr([gas, part]) @@ -241,34 +228,31 @@ class CenterOfMass(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.center_of_mass() + >>> print(ad.quantities.center_of_mass()) """ - def count_values(self, use_gas = True, use_particles = False, particle_type="all"): - if use_particles and particle_type not in self.data_source.ds.particle_types: - raise YTParticleTypeNotFound(particle_type,self.data_source.ds) - use_gas &= \ - (("gas", "cell_mass") in self.data_source.ds.field_info) - use_particles &= \ - ((particle_type, "particle_mass") in self.data_source.ds.field_info) + def count_values(self, use_gas = True, use_particles = False, particle_type="nbody"): + finfo = self.data_source.ds.field_info + includes_gas = ("gas", "mass") in finfo + includes_particles = (particle_type, "particle_mass") in finfo + + self.use_gas = use_gas & includes_gas + self.use_particles = use_particles & includes_particles + self.num_vals = 0 - if use_gas: + if self.use_gas: self.num_vals += 4 - if use_particles: + if self.use_particles: self.num_vals += 4 - def process_chunk(self, data, use_gas = True, use_particles = False, particle_type="all"): - use_gas &= \ - (("gas", "cell_mass") in self.data_source.ds.field_info) - use_particles &= \ - ((particle_type, "particle_mass") in self.data_source.ds.field_info) + def process_chunk(self, data, use_gas = True, use_particles = False, particle_type="nbody"): vals = [] - if use_gas: + if self.use_gas: vals += [(data["gas", ax] * - data["gas", "cell_mass"]).sum(dtype=np.float64) + data["gas", "mass"]).sum(dtype=np.float64) for ax in 'xyz'] - vals.append(data["gas", "cell_mass"].sum(dtype=np.float64)) - if use_particles: + vals.append(data["gas", "mass"].sum(dtype=np.float64)) + if self.use_particles: vals += [(data[particle_type, "particle_position_%s" % ax] * data[particle_type, "particle_mass"]).sum(dtype=np.float64) for ax in 'xyz'] @@ -318,27 +302,27 @@ class BulkVelocity(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.bulk_velocity() + >>> print(ad.quantities.bulk_velocity()) """ - def count_values(self, use_gas = True, use_particles = False, particle_type= "all"): + def count_values(self, use_gas=True, use_particles=False, particle_type="nbody"): if use_particles and particle_type not in self.data_source.ds.particle_types: raise YTParticleTypeNotFound(particle_type,self.data_source.ds) # This is a list now self.num_vals = 0 if use_gas: self.num_vals += 4 - if use_particles: + if use_particles and 'nbody' in self.data_source.ds.particle_types: self.num_vals += 4 - def process_chunk(self, data, use_gas = True, use_particles = False, particle_type= "all"): + def process_chunk(self, data, use_gas=True, use_particles=False, particle_type="nbody"): vals = [] if use_gas: vals += [(data["gas", "velocity_%s" % ax] * - data["gas", "cell_mass"]).sum(dtype=np.float64) + data["gas", "mass"]).sum(dtype=np.float64) for ax in 'xyz'] - vals.append(data["gas", "cell_mass"].sum(dtype=np.float64)) - if use_particles: + vals.append(data["gas", "mass"].sum(dtype=np.float64)) + if use_particles and 'nbody' in data.ds.particle_types: vals += [(data[particle_type, "particle_velocity_%s" % ax] * data[particle_type, "particle_mass"]).sum(dtype=np.float64) for ax in 'xyz'] @@ -386,9 +370,9 @@ class WeightedVariance(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.weighted_variance([("gas", "density"), + >>> print(ad.quantities.weighted_variance([("gas", "density"), ... ("gas", "temperature")], - ... ("gas", "cell_mass")) + ... ("gas", "cell_mass"))) """ def count_values(self, fields, weight): @@ -397,18 +381,21 @@ def count_values(self, fields, weight): def __call__(self, fields, weight): fields = ensure_list(fields) + units = [self.data_source.ds._get_field_info(field).units + for field in fields] rv = super(WeightedVariance, self).__call__(fields, weight) + rv = [self.data_source.ds.arr(v, u) for v, u in zip(rv, units)] if len(rv) == 1: rv = rv[0] return rv def process_chunk(self, data, fields, weight): - my_weight = data[weight].sum(dtype=np.float64) + my_weight = data[weight].d.sum(dtype=np.float64) if my_weight == 0: return [0.0 for field in fields] + \ - [0.0 for field in fields] + [0.0] - my_means = [(data[field] * data[weight]).sum(dtype=np.float64) / my_weight + [0.0 for field in fields] + [0.0] + my_means = [(data[field].d * data[weight].d).sum(dtype=np.float64) / my_weight for field in fields] - my_var2s = [(data[weight] * (data[field] - + my_var2s = [(data[weight].d * (data[field].d - my_mean)**2).sum(dtype=np.float64) / my_weight for field, my_mean in zip(fields, my_means)] return my_means + my_var2s + [my_weight] @@ -421,10 +408,10 @@ def reduce_intermediate(self, values): my_mean = values[i] my_var2 = values[i + int(len(values) / 2)] all_mean = (my_weight * my_mean).sum(dtype=np.float64) / all_weight - rvals.append(self.data_source.ds.arr([(np.sqrt((my_weight * - (my_var2 + (my_mean - - all_mean)**2)).sum(dtype=np.float64) - / all_weight)), all_mean])) + ret = [(np.sqrt( + (my_weight * (my_var2 + (my_mean - all_mean)**2)).sum(dtype=np.float64) / + all_weight)), all_mean] + rvals.append(np.array(ret)) return rvals class AngularMomentumVector(DerivedQuantity): @@ -456,16 +443,16 @@ class AngularMomentumVector(DerivedQuantity): # Find angular momentum vector of galaxy in grid-based isolated galaxy dataset >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.angular_momentum_vector() + >>> print(ad.quantities.angular_momentum_vector()) # Find angular momentum vector of gas disk in particle-based dataset >>> ds = load("FIRE_M12i_ref11/snapshot_600.hdf5") >>> _, c = ds.find_max(('gas', 'density')) >>> sp = ds.sphere(c, (10, 'kpc')) - >>> print sp.quantities.angular_momentum_vector(use_gas=False, use_particles=True, particle_type='PartType0') + >>> print(sp.quantities.angular_momentum_vector(use_gas=False, use_particles=True, particle_type='PartType0')) """ - def count_values(self, use_gas=True, use_particles=True, particle_type = "all"): + def count_values(self, use_gas=True, use_particles=True, particle_type='all'): if use_particles and particle_type not in self.data_source.ds.particle_types: raise YTParticleTypeNotFound(particle_type,self.data_source.ds) num_vals = 0 @@ -473,7 +460,7 @@ def count_values(self, use_gas=True, use_particles=True, particle_type = "all"): self.data_source.ds.index self.particle_type = particle_type self.use_gas = use_gas & \ - (("gas", "cell_mass") in self.data_source.ds.field_info) + (("gas", "mass") in self.data_source.ds.field_info) self.use_particles = use_particles & \ ((self.particle_type, "particle_mass") in self.data_source.ds.field_info) if self.use_gas: @@ -486,9 +473,9 @@ def process_chunk(self, data, use_gas = True, use_particles = False, particle_ty rvals = [] if self.use_gas: rvals.extend([(data["gas", "specific_angular_momentum_%s" % axis] * - data["gas", "cell_mass"]).sum(dtype=np.float64) \ + data["gas", "mass"]).sum(dtype=np.float64) \ for axis in "xyz"]) - rvals.append(data["gas", "cell_mass"].sum(dtype=np.float64)) + rvals.append(data["gas", "mass"].sum(dtype=np.float64)) if self.use_particles: rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] * data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \ @@ -530,8 +517,8 @@ class Extrema(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.extrema([("gas", "density"), - ... ("gas", "temperature")]) + >>> print(ad.quantities.extrema([("gas", "density"), + ... ("gas", "temperature")])) """ def count_values(self, fields, non_zero): @@ -579,8 +566,8 @@ class SampleAtMaxFieldValues(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.sample_at_max_field_values(("gas", "density"), - ... ["temperature", "velocity_magnitude"]) + >>> print(ad.quantities.sample_at_max_field_values(("gas", "density"), + ... ["temperature", "velocity_magnitude"])) """ def count_values(self, field, sample_fields): @@ -625,7 +612,7 @@ class MaxLocation(SampleAtMaxFieldValues): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.max_location(("gas", "density")) + >>> print(ad.quantities.max_location(("gas", "density"))) """ def __call__(self, field): @@ -633,7 +620,8 @@ def __call__(self, field): self.data_source.index sample_fields = get_position_fields(field, self.data_source) rv = super(MaxLocation, self).__call__(field, sample_fields) - if len(rv) == 1: rv = rv[0] + if len(rv) == 1: + rv = rv[0] return rv class SampleAtMinFieldValues(SampleAtMaxFieldValues): @@ -654,8 +642,8 @@ class SampleAtMinFieldValues(SampleAtMaxFieldValues): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.sample_at_min_field_values(("gas", "density"), - ... ["temperature", "velocity_magnitude"]) + >>> print(ad.quantities.sample_at_min_field_values(("gas", "density"), + ... ["temperature", "velocity_magnitude"])) """ def _func(self, arr): @@ -676,7 +664,7 @@ class MinLocation(SampleAtMinFieldValues): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.min_location(("gas", "density")) + >>> print(ad.quantities.min_location(("gas", "density"))) """ def __call__(self, field): @@ -684,7 +672,8 @@ def __call__(self, field): self.data_source.index sample_fields = get_position_fields(field, self.data_source) rv = super(MinLocation, self).__call__(field, sample_fields) - if len(rv) == 1: rv = rv[0] + if len(rv) == 1: + rv = rv[0] return rv class SpinParameter(DerivedQuantity): @@ -721,17 +710,17 @@ class SpinParameter(DerivedQuantity): >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030") >>> ad = ds.all_data() - >>> print ad.quantities.spin_parameter() + >>> print(ad.quantities.spin_parameter()) """ def count_values(self, **kwargs): self.num_vals = 3 - def process_chunk(self, data, use_gas=True, use_particles=True, particle_type= "all"): + def process_chunk(self, data, use_gas=True, use_particles=True, particle_type="nbody"): if use_particles and particle_type not in self.data_source.ds.particle_types: raise YTParticleTypeNotFound(particle_type,self.data_source.ds) use_gas &= \ - (("gas", "cell_mass") in self.data_source.ds.field_info) + (("gas", "mass") in self.data_source.ds.field_info) use_particles &= \ ((particle_type, "particle_mass") in self.data_source.ds.field_info) e = data.ds.quan(0., "erg") @@ -739,9 +728,9 @@ def process_chunk(self, data, use_gas=True, use_particles=True, particle_type= " m = data.ds.quan(0., "g") if use_gas: e += (data["gas", "kinetic_energy"] * - data["gas", "cell_volume"]).sum(dtype=np.float64) + data["gas", "volume"]).sum(dtype=np.float64) j += data["gas", "angular_momentum_magnitude"].sum(dtype=np.float64) - m += data["gas", "cell_mass"].sum(dtype=np.float64) + m += data["gas", "mass"].sum(dtype=np.float64) if use_particles: e += (data[particle_type, "particle_velocity_magnitude"]**2 * data[particle_type, "particle_mass"]).sum(dtype=np.float64) diff --git a/yt/data_objects/field_data.py b/yt/data_objects/field_data.py index e350b9f0016..5aeb4914890 100644 --- a/yt/data_objects/field_data.py +++ b/yt/data_objects/field_data.py @@ -1,18 +1,3 @@ -""" -The YTFieldData object. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - class YTFieldData(dict): """ A Container object for field data, instead of just having it be a dict. diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index ddb09b66f5d..7dd1e7d6719 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -1,22 +1,6 @@ -""" -Python-based grid handler, not to be confused with the SWIG-handler - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import warnings import weakref import numpy as np -from six import string_types from yt.config import ytcfg from yt.data_objects.data_containers import \ @@ -93,7 +77,7 @@ def __getitem__(self, key): except YTFieldTypeNotFound: return tr finfo = self.ds._get_field_info(*fields[0]) - if not finfo.particle_type: + if not finfo.sampling_type == "particle": num_nodes = 2**sum(finfo.nodal_flag) new_shape = list(self.ActiveDimensions) if num_nodes > 1: @@ -278,7 +262,7 @@ def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, return cube def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False): - _old_api = isinstance(fields, (string_types, tuple)) + _old_api = isinstance(fields, (str, tuple)) if _old_api: message = ( 'get_vertex_centered_data() requires list of fields, rather than ' diff --git a/yt/data_objects/image_array.py b/yt/data_objects/image_array.py index 6886ff09675..02c33ecbf3f 100644 --- a/yt/data_objects/image_array.py +++ b/yt/data_objects/image_array.py @@ -1,16 +1,3 @@ -""" -ImageArray Class - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import warnings import numpy as np from yt.config import \ @@ -75,8 +62,14 @@ class ImageArray(YTArray): Numpy ndarray documentation appended: """ - def __new__(cls, input_array, input_units=None, registry=None, info=None): - obj = super(ImageArray, cls).__new__(cls, input_array, input_units, registry) + def __new__(cls, input_array, units=None, registry=None, info=None, + bypass_validation=False, input_units=None): + if input_units is not None: + warnings.warn("'input_units' is deprecated. Please use 'units'.") + units = input_units + obj = super(ImageArray, cls).__new__( + cls, input_array, units, registry, + bypass_validation=bypass_validation) if info is None: info = {} obj.info = info diff --git a/yt/data_objects/level_sets/api.py b/yt/data_objects/level_sets/api.py index 0ca73afa4e7..c14ab973caf 100644 --- a/yt/data_objects/level_sets/api.py +++ b/yt/data_objects/level_sets/api.py @@ -1,18 +1,3 @@ -""" -API for level_sets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .contour_finder import \ identify_contours diff --git a/yt/data_objects/level_sets/clump_handling.py b/yt/data_objects/level_sets/clump_handling.py index dff1f2c342b..920ee1c5d62 100644 --- a/yt/data_objects/level_sets/clump_handling.py +++ b/yt/data_objects/level_sets/clump_handling.py @@ -1,18 +1,3 @@ -""" -Clump finding helper classes - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import uuid @@ -235,17 +220,17 @@ def save_as_dataset(self, filename=None, fields=None): >>> new_ds = yt.load(fn) >>> print (ds.tree["clump", "cell_mass"]) 1296926163.91 Msun - >>> print ds.tree["grid", "density"] + >>> print(ds.tree["grid", "density"]) [ 2.54398434e-26 2.46620353e-26 2.25120154e-26 ..., 1.12879234e-25 1.59561490e-25 1.09824903e-24] g/cm**3 - >>> print ds.tree["all", "particle_mass"] + >>> print(ds.tree["all", "particle_mass"]) [ 4.25472446e+38 4.25472446e+38 4.25472446e+38 ..., 2.04238266e+38 2.04523901e+38 2.04770938e+38] g - >>> print ds.tree.children[0]["clump", "cell_mass"] + >>> print(ds.tree.children[0]["clump", "cell_mass"]) 909636495.312 Msun - >>> print ds.leaves[0]["clump", "cell_mass"] + >>> print(ds.leaves[0]["clump", "cell_mass"]) 3756566.99809 Msun - >>> print ds.leaves[0]["grid", "density"] + >>> print(ds.leaves[0]["grid", "density"]) [ 6.97820274e-24 6.58117370e-24 7.32046082e-24 6.76202430e-24 7.41184837e-24 6.76981480e-24 6.94287213e-24 6.56149658e-24 6.76584569e-24 6.94073710e-24 7.06713082e-24 7.22556526e-24 @@ -299,7 +284,7 @@ def save_as_dataset(self, filename=None, fields=None): field_data = {} need_grid_positions = False for f in self.base.data._determine_fields(fields) + contour_fields: - if ds.field_info[f].particle_type: + if ds.field_info[f].sampling_type == "particle": if f[0] not in ptypes: ptypes.append(f[0]) ftypes[f] = f[0] diff --git a/yt/data_objects/level_sets/clump_info_items.py b/yt/data_objects/level_sets/clump_info_items.py index 91bd788c111..1843430b192 100644 --- a/yt/data_objects/level_sets/clump_info_items.py +++ b/yt/data_objects/level_sets/clump_info_items.py @@ -1,18 +1,3 @@ -""" -ClumpInfoCallback and callbacks. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.operator_registry import \ diff --git a/yt/data_objects/level_sets/clump_tools.py b/yt/data_objects/level_sets/clump_tools.py index e7f28c7416c..e037d35f1c9 100644 --- a/yt/data_objects/level_sets/clump_tools.py +++ b/yt/data_objects/level_sets/clump_tools.py @@ -1,19 +1,3 @@ -""" -Clump tools for use with the yt Clump object - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np nar = np.array diff --git a/yt/data_objects/level_sets/clump_validators.py b/yt/data_objects/level_sets/clump_validators.py index e440c976b55..268306955e4 100644 --- a/yt/data_objects/level_sets/clump_validators.py +++ b/yt/data_objects/level_sets/clump_validators.py @@ -1,18 +1,3 @@ -""" -ClumpValidators and callbacks. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.lib.misc_utilities import \ diff --git a/yt/data_objects/level_sets/contour_finder.py b/yt/data_objects/level_sets/contour_finder.py index 5f9f41dec5b..b05bc12f8a1 100644 --- a/yt/data_objects/level_sets/contour_finder.py +++ b/yt/data_objects/level_sets/contour_finder.py @@ -1,18 +1,3 @@ -""" -This module contains a routine to search for topologically connected sets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from collections import defaultdict diff --git a/yt/data_objects/level_sets/tests/test_clump_finding.py b/yt/data_objects/level_sets/tests/test_clump_finding.py index 2cc563dfdf5..b0a77b2e0b1 100644 --- a/yt/data_objects/level_sets/tests/test_clump_finding.py +++ b/yt/data_objects/level_sets/tests/test_clump_finding.py @@ -1,19 +1,3 @@ -""" -Clump finder tests - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os import shutil diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 75a13af133b..dbe297e5d0c 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -1,26 +1,8 @@ -""" -Subsets of octrees - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from contextlib import contextmanager import numpy as np from yt.data_objects.data_containers import \ YTSelectionContainer -from yt.data_objects.field_data import \ - YTFieldData import yt.geometry.particle_deposit as particle_deposit import yt.geometry.particle_smooth as particle_smooth @@ -76,7 +58,7 @@ def __getitem__(self, key): except YTFieldTypeNotFound: return tr finfo = self.ds._get_field_info(*fields[0]) - if not finfo.particle_type: + if not finfo.sampling_type == "particle": # We may need to reshape the field, if it is being queried from # field_data. If it's already cached, it just passes through. if len(tr.shape) < 4: @@ -181,13 +163,17 @@ def deposit(self, positions, fields = None, method = None, raise YTParticleDepositionNotImplemented(method) nz = self.nz nvals = (nz, nz, nz, (self.domain_ind >= 0).sum()) + if np.max(self.domain_ind) >= nvals[-1]: + print ('nocts, domain_ind >= 0, max {} {} {}'.format( + self.oct_handler.nocts, nvals[-1], np.max(self.domain_ind))) + raise Exception() # We allocate number of zones, not number of octs op = cls(nvals, kernel_name) op.initialize() mylog.debug("Depositing %s (%s^3) particles into %s Octs", positions.shape[0], positions.shape[0]**0.3333333, nvals[-1]) - pos = np.asarray(positions.convert_to_units("code_length"), - dtype="float64") + positions.convert_to_units("code_length") + pos = positions.d # We should not need the following if we know in advance all our fields # need no casting. fields = [np.ascontiguousarray(f, dtype="float64") for f in fields] @@ -310,7 +296,7 @@ def smooth(self, positions, fields = None, index_fields = None, # This should ensure we get everything within one neighbor of home. particle_octree.n_ref = nneighbors * 2 particle_octree.add(morton) - particle_octree.finalize() + particle_octree.finalize(self.domain_id) pdom_ind = particle_octree.domain_ind(self.selector) else: particle_octree = self.oct_handler @@ -340,7 +326,6 @@ def smooth(self, positions, fields = None, index_fields = None, # error. with np.errstate(invalid='ignore'): vals = op.finalize() - if vals is None: return if isinstance(vals, list): vals = [np.asfortranarray(v) for v in vals] else: @@ -461,35 +446,6 @@ def select_particles(self, selector, x, y, z): mask = selector.select_points(x,y,z, 0.0) return mask -class ParticleOctreeSubset(OctreeSubset): - # Subclassing OctreeSubset is somewhat dubious. - # This is some subset of an octree. Note that the sum of subsets of an - # octree may multiply include data files. While we can attempt to mitigate - # this, it's unavoidable for many types of data storage on disk. - _type_name = 'indexed_octree_subset' - _con_args = ('data_files', 'ds', 'min_ind', 'max_ind') - domain_id = -1 - def __init__(self, base_region, data_files, ds, min_ind = 0, max_ind = 0, - over_refine_factor = 1): - # The first attempt at this will not work in parallel. - self._num_zones = 1 << (over_refine_factor) - self._oref = over_refine_factor - self.data_files = data_files - self.field_data = YTFieldData() - self.field_parameters = {} - self.ds = ds - self._index = self.ds.index - self.oct_handler = ds.index.oct_handler - self.min_ind = min_ind - if max_ind == 0: max_ind = (1 << 63) - self.max_ind = max_ind - self._last_mask = None - self._last_selector_id = None - self._current_particle_type = 'all' - self._current_fluid_type = self.ds.default_fluid_type - self.base_region = base_region - self.base_selector = base_region.selector - class OctreeSubsetBlockSlicePosition(object): def __init__(self, ind, block_slice): self.ind = ind diff --git a/yt/data_objects/particle_container.py b/yt/data_objects/particle_container.py new file mode 100644 index 00000000000..e4c06662ef2 --- /dev/null +++ b/yt/data_objects/particle_container.py @@ -0,0 +1,85 @@ +import contextlib + +from yt.data_objects.data_containers import \ + YTFieldData, \ + YTSelectionContainer +from yt.funcs import \ + ensure_list +from yt.utilities.exceptions import \ + YTNonIndexedDataContainer, \ + YTDataSelectorNotImplemented + +def _non_indexed(name): + def _func_non_indexed(self, *args, **kwargs): + raise YTNonIndexedDataContainer(self) + return _func_non_indexed + +class ParticleContainer(YTSelectionContainer): + _spatial = False + _type_name = 'particle_container' + _skip_add = True + _con_args = ('base_region', 'data_files', 'overlap_files') + + def __init__(self, base_region, data_files, overlap_files = [], + domain_id = -1): + self.field_data = YTFieldData() + self.field_parameters = {} + self.data_files = ensure_list(data_files) + self.overlap_files = ensure_list(overlap_files) + self.ds = self.data_files[0].ds + self._last_mask = None + self._last_selector_id = None + self._current_particle_type = 'all' + # self._current_fluid_type = self.ds.default_fluid_type + if hasattr(base_region, "base_selector"): + self.base_selector = base_region.base_selector + self.base_region = base_region.base_region + else: + self.base_region = base_region + self.base_selector = base_region.selector + self._octree = None + self._temp_spatial = False + if isinstance(base_region, ParticleContainer): + self._temp_spatial = base_region._temp_spatial + self._octree = base_region._octree + # To ensure there are not domains if global octree not used + self.domain_id = -1 + + @property + def selector(self): + raise YTDataSelectorNotImplemented(self.oc_type_name) + + def select_particles(self, selector, x, y, z): + mask = selector.select_points(x,y,z) + return mask + + @contextlib.contextmanager + def _expand_data_files(self): + old_data_files = self.data_files + old_overlap_files = self.overlap_files + self.data_files = list(set(self.data_files + self.overlap_files)) + self.data_files.sort() + self.overlap_files = [] + yield self + self.data_files = old_data_files + self.overlap_files = old_overlap_files + + def retrieve_ghost_zones(self, ngz, coarse_ghosts = False): + gz_oct = self.octree.retrieve_ghost_zones(ngz, coarse_ghosts = coarse_ghosts) + gz = ParticleContainer(gz_oct.base_region, gz_oct.data_files, + overlap_files = gz_oct.overlap_files, + selector_mask = gz_oct.selector_mask, + domain_id = gz_oct.domain_id) + gz._octree = gz_oct + return gz + + select_blocks = _non_indexed('select_blocks') + deposit = _non_indexed('deposit') + smooth = _non_indexed('smooth') + select_icoords = _non_indexed('select_icoords') + select_fcoords = _non_indexed('select_fcoords') + select_fwidth = _non_indexed('select_fwidth') + select_ires = _non_indexed('select_ires') + select = _non_indexed('select') + count = _non_indexed('count') + count_particles = _non_indexed('count_particles') diff --git a/yt/data_objects/particle_filters.py b/yt/data_objects/particle_filters.py index f7064f4b73b..aa81c9e2da6 100644 --- a/yt/data_objects/particle_filters.py +++ b/yt/data_objects/particle_filters.py @@ -1,19 +1,3 @@ -""" -This is a library for defining and using particle filters. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import copy from contextlib import contextmanager @@ -27,6 +11,8 @@ class DummyFieldInfo(object): particle_type = True + sampling_type = 'particle' + dfi = DummyFieldInfo() class ParticleFilter(object): diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 82910aacad8..4b75e02ced0 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -1,16 +1,3 @@ -""" -Particle trajectories -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.data_objects.field_data import YTFieldData from yt.utilities.lib.particle_mesh_operations import CICSample_3 from yt.utilities.parallel_tools.parallel_analysis_interface import \ @@ -59,7 +46,7 @@ class ParticleTrajectories(object): >>> ts = DatasetSeries(my_fns) >>> trajs = ts.particle_trajectories(indices, fields=fields) >>> for t in trajs : - >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min() + >>> print(t["particle_velocity_x"].max(), t["particle_velocity_x"].min()) """ def __init__(self, outputs, indices, fields=None, suppress_logging=False, ptype=None): diff --git a/yt/data_objects/particle_unions.py b/yt/data_objects/particle_unions.py index ec06b03ba14..1d1e0ca9964 100644 --- a/yt/data_objects/particle_unions.py +++ b/yt/data_objects/particle_unions.py @@ -1,22 +1,6 @@ -""" -These are particle union objects. These essentially alias one particle to -another, where the other can be one or several particle types. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .unions import Union class ParticleUnion(Union): + _union_type = "particle" def __init__(self, name, sub_types): super(ParticleUnion, self).__init__(name, sub_types) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index ad21233c28f..c1cee45b361 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -1,18 +1,3 @@ -""" -Profile classes, to deal with generating and obtaining profiles - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.fields.derived_field import DerivedField @@ -76,7 +61,6 @@ def save_state(*args, **kwargs): return tr return save_state - class ProfileFieldAccumulator(object): def __init__(self, n_fields, size): shape = size + (n_fields,) @@ -1151,12 +1135,12 @@ def create_profile(data_source, bin_fields, fields, n_bins=64, """ bin_fields = data_source._determine_fields(bin_fields) fields = ensure_list(fields) - is_pfield = [data_source.ds._get_field_info(f).particle_type + is_pfield = [data_source.ds._get_field_info(f).sampling_type == "particle" for f in bin_fields + fields] wf = None if weight_field is not None: wf = data_source.ds._get_field_info(weight_field) - is_pfield.append(wf.particle_type) + is_pfield.append(wf.sampling_type == "particle") wf = wf.name if len(bin_fields) > 1 and isinstance(accumulation, bool): @@ -1170,9 +1154,20 @@ def create_profile(data_source, bin_fields, fields, n_bins=64, override_bins = sanitize_field_tuple_keys(override_bins, data_source) if any(is_pfield) and not all(is_pfield): - raise YTIllDefinedProfile( - bin_fields, data_source._determine_fields(fields), wf, is_pfield) - elif len(bin_fields) == 1: + if hasattr(data_source.ds, '_sph_ptypes'): + is_local = [data_source.ds.field_info[f].sampling_type == "local" + for f in bin_fields + fields] + is_local_or_pfield = [pf or lf for (pf, lf) in + zip(is_pfield, is_local)] + if not all(is_local_or_pfield): + raise YTIllDefinedProfile( + bin_fields, data_source._determine_fields(fields), wf, + is_pfield) + else: + raise YTIllDefinedProfile( + bin_fields, data_source._determine_fields(fields), wf, + is_pfield) + if len(bin_fields) == 1: cls = Profile1D elif len(bin_fields) == 2 and all(is_pfield): if deposition == 'cic': @@ -1199,7 +1194,8 @@ def create_profile(data_source, bin_fields, fields, n_bins=64, raise NotImplementedError if weight_field is not None and cls == ParticleProfile: weight_field, = data_source._determine_fields([weight_field]) - if not data_source.ds._get_field_info(weight_field).particle_type: + wf = data_source.ds._get_field_info(weight_field) + if not wf.sampling_type == "particle": weight_field = None if not iterable(n_bins): n_bins = [n_bins] * len(bin_fields) diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index 4c210fe747a..c315601bbe4 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -1,19 +1,5 @@ -""" -An object that can live on the dataset to facilitate data access. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import weakref -from yt.extern.six import string_types from yt.funcs import obj_length from yt.units.yt_array import YTQuantity from yt.utilities.exceptions import YTDimensionalityError @@ -34,11 +20,11 @@ def __getitem__(self, item): # At first, we will only implement this as accepting a slice that is # (optionally) unitful corresponding to a specific set of coordinates # that result in a rectangular prism or a slice. - if isinstance(item, string_types): + if isinstance(item, str): # This is some field; we will instead pass this back to the # all_data object. return self.all_data[item] - if isinstance(item, tuple) and isinstance(item[1], string_types): + if isinstance(item, tuple) and isinstance(item[1], str): return self.all_data[item] if isinstance(item, slice): if obj_length(item.start) == 3 and obj_length(item.stop) == 3: diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index f26e6bcf328..8a5c743feed 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -1,42 +1,42 @@ -""" -Data containers based on geometric selection - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.data_objects.data_containers import \ YTSelectionContainer0D, YTSelectionContainer1D, \ YTSelectionContainer2D, YTSelectionContainer3D, YTSelectionContainer from yt.data_objects.static_output import Dataset -from yt.extern.six import string_types -from yt.funcs import ensure_list, iterable, validate_width_tuple, \ - fix_length, fix_axis, validate_3d_array, validate_float, \ - validate_iterable, validate_object, validate_axis, validate_center +from yt.frontends.sph.data_structures import \ + SPHDataset +from yt.funcs import \ + ensure_list, \ + iterable, \ + validate_width_tuple, \ + fix_length, \ + fix_axis, \ + mylog, \ + validate_3d_array, \ + validate_float, \ + validate_iterable, \ + validate_object, \ + validate_axis, \ + validate_center from yt.units.yt_array import \ + udot, \ + unorm, \ YTArray, \ YTQuantity from yt.utilities.exceptions import \ YTSphereTooSmall, \ YTIllDefinedCutRegion, \ - YTEllipsoidOrdering + YTEllipsoidOrdering, \ + YTException +from yt.utilities.lib.pixelization_routines import \ + SPHKernelInterpolationTable from yt.utilities.minimal_representation import \ MinimalSliceData from yt.utilities.math_utils import get_rotation_matrix from yt.utilities.orientation import Orientation from yt.geometry.selection_routines import points_in_cells -from yt.utilities.on_demand_imports import _scipy +from yt.utilities.on_demand_imports import _scipy, _miniball class YTPoint(YTSelectionContainer0D): @@ -115,7 +115,7 @@ class YTOrthoRay(YTSelectionContainer1D): >>> import yt >>> ds = yt.load("RedshiftOutput0005") >>> oray = ds.ortho_ray(0, (0.2, 0.74)) - >>> print oray["Density"] + >>> print(oray["Density"]) Note: The low-level data representation for rays are not guaranteed to be spatially ordered. In particular, with AMR datasets, higher resolution @@ -197,7 +197,7 @@ class YTRay(YTSelectionContainer1D): >>> import yt >>> ds = yt.load("RedshiftOutput0005") >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31)) - >>> print ray["Density"], ray["t"], ray["dts"] + >>> print(ray["Density"], ray["t"], ray["dts"]) Note: The low-level data representation for rays are not guaranteed to be spatially ordered. In particular, with AMR datasets, higher resolution @@ -209,8 +209,7 @@ class YTRay(YTSelectionContainer1D): >>> my_ray = ds.ray(...) >>> ray_sort = np.argsort(my_ray["t"]) >>> density = my_ray["density"][ray_sort] - -""" + """ _type_name = "ray" _con_args = ('start_point', 'end_point') _container_fields = ("t", "dts") @@ -236,12 +235,24 @@ def __init__(self, start_point, end_point, ds=None, self.end_point = \ self.ds.arr(end_point, 'code_length', dtype='float64') + if ((self.start_point < self.ds.domain_left_edge).any() or + (self.end_point > self.ds.domain_right_edge).any()): + mylog.warn( + 'Ray start or end is outside the domain. ' + + 'Returned data will only be for the ray section inside the domain.') self.vec = self.end_point - self.start_point self._set_center(self.start_point) self.set_field_parameter('center', self.start_point) self._dts, self._ts = None, None def _generate_container_field(self, field): + # What should we do with `ParticleDataset`? + if isinstance(self.ds, SPHDataset): + return self._generate_container_field_sph(field) + else: + return self._generate_container_field_grid(field) + + def _generate_container_field_grid(self, field): if self._current_chunk is None: self.index._identify_base_chunk(self) if field == "dts": @@ -251,6 +262,30 @@ def _generate_container_field(self, field): else: raise KeyError(field) + def _generate_container_field_sph(self, field): + if field not in ["dts", "t"]: + raise KeyError(field) + + length = unorm(self.vec) + pos = self[self.ds._sph_ptypes[0], "particle_position"] + r = pos - self.start_point + l = udot(r, self.vec/length) + + if field == "t": + return l / length + + hsml = self[self.ds._sph_ptypes[0], "smoothing_length"] + mass = self[self.ds._sph_ptypes[0], "particle_mass"] + dens = self[self.ds._sph_ptypes[0], "density"] + # impact parameter from particle to ray + b = np.sqrt(np.sum(r**2, axis=1) - l**2) + + # Use an interpolation table to evaluate the integrated 2D + # kernel from the dimensionless impact parameter b/hsml. + itab = SPHKernelInterpolationTable(self.ds.kernel_name) + dl = itab.interpolate_array(b / hsml) * mass / dens / hsml**2 + return dl / length + class YTSlice(YTSelectionContainer2D): """ This is a data object corresponding to a slice through the simulation @@ -288,7 +323,7 @@ class YTSlice(YTSelectionContainer2D): >>> import yt >>> ds = yt.load("RedshiftOutput0005") >>> slice = ds.slice(0, 0.25) - >>> print slice["Density"] + >>> print(slice["Density"]) """ _top_node = "/Slices" _type_name = "slice" @@ -395,7 +430,7 @@ class YTCuttingPlane(YTSelectionContainer2D): fields. data_source: optional Draw the selection from the provided data source rather than - all data associated with the data_set + all data associated with the dataset Notes ----- @@ -411,7 +446,7 @@ class YTCuttingPlane(YTSelectionContainer2D): >>> import yt >>> ds = yt.load("RedshiftOutput0005") >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6]) - >>> print cp["Density"] + >>> print(cp["Density"]) """ _plane = None _top_node = "/CuttingPlanes" @@ -634,6 +669,18 @@ def __init__(self, center, normal, radius, height, fields=None, self.radius = fix_length(radius, self.ds) self._d = -1.0 * np.dot(self._norm_vec, self.center) + def _get_bbox(self): + """ + Return the minimum bounding box for the disk. + """ + # http://www.iquilezles.org/www/articles/diskbbox/diskbbox.htm + pa = self.center + self._norm_vec*self.height + pb = self.center - self._norm_vec*self.height + a = pa - pb + db = self.radius*np.sqrt(1.0-a.d*a.d/np.dot(a,a)) + return np.minimum(pa-db, pb-db), np.maximum(pa+db, pb+db) + + class YTRegion(YTSelectionContainer3D): """A 3D region of data with an arbitrary center. @@ -667,15 +714,22 @@ def __init__(self, center, left_edge, right_edge, fields=None, YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) if not isinstance(left_edge, YTArray): - self.left_edge = self.ds.arr(left_edge, 'code_length') + self.left_edge = self.ds.arr(left_edge, 'code_length', dtype='float64') else: # need to assign this dataset's unit registry to the YTArray - self.left_edge = self.ds.arr(left_edge.copy()) + self.left_edge = self.ds.arr(left_edge.copy(), dtype='float64') if not isinstance(right_edge, YTArray): - self.right_edge = self.ds.arr(right_edge, 'code_length') + self.right_edge = self.ds.arr(right_edge, 'code_length', dtype='float64') else: # need to assign this dataset's unit registry to the YTArray - self.right_edge = self.ds.arr(right_edge.copy()) + self.right_edge = self.ds.arr(right_edge.copy(), dtype='float64') + + def _get_bbox(self): + """ + Return the minimum bounding box for the region. + """ + return self.left_edge, self.right_edge + class YTDataCollection(YTSelectionContainer3D): """ @@ -698,6 +752,7 @@ def __init__(self, obj_list, ds=None, field_parameters=None, dtype="int64") self._obj_list = obj_list + class YTSphere(YTSelectionContainer3D): """ A sphere of points defined by a *center* and a *radius*. @@ -740,6 +795,54 @@ def __init__(self, center, radius, ds=None, self.set_field_parameter("center", self.center) self.radius = radius + def _get_bbox(self): + """ + Return the minimum bounding box for the sphere. + """ + return -self.radius + self.center, self.radius + self.center + +class YTMinimalSphere(YTSelectionContainer3D): + """ + Build the smallest sphere that encompasses a set of points. + + Parameters + ---------- + points : YTArray + The points that the sphere will contain. + + Examples + -------- + + >>> import yt + >>> ds = yt.load("output_00080/info_00080.txt") + >>> points = ds.r['particle_position'] + >>> sphere = ds.minimal_sphere(points) + """ + _type_name = "sphere" + _override_selector_name = "minimal_sphere" + _con_args = ('center', 'radius') + + def __init__(self, points, ds=None, field_parameters=None, data_source=None): + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + validate_object(points, YTArray) + + points = fix_length(points, ds) + if len(points) < 2: + raise YTException("Not enough points. Expected at least 2, got %s" % len(points)) + mylog.debug('Building minimal sphere around points.') + mb = _miniball.Miniball(points) + if not mb.is_valid(): + raise YTException("Could not build valid sphere around points.") + + center = ds.arr(mb.center(), points.units) + radius = ds.quan(np.sqrt(mb.squared_radius()), points.units) + super(YTMinimalSphere, self).__init__(center, ds, field_parameters, data_source) + self.set_field_parameter('radius', radius) + self.set_field_parameter("center", self.center) + self.radius = radius + class YTEllipsoid(YTSelectionContainer3D): """ By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we @@ -830,6 +933,15 @@ def __init__(self, center, A, B, C, e0, tilt, fields=None, self.set_field_parameter('e1', e1) self.set_field_parameter('e2', e2) + def _get_bbox(self): + """ + Get the bounding box for the ellipsoid. NOTE that in this case + it is not the *minimum* bounding box. + """ + radius = self.ds.arr(np.max([self._A, self._B, self._C]), "code_length") + return -radius + self.center, radius + self.center + + class YTCutRegion(YTSelectionContainer3D): """ This is a data object designed to allow individuals to apply logical @@ -861,7 +973,7 @@ def __init__(self, data_source, conditionals, ds=None, validate_object(data_source, YTSelectionContainer) validate_iterable(conditionals) for condition in conditionals: - validate_object(condition, string_types) + validate_object(condition, str) validate_object(ds, Dataset) validate_object(field_parameters, dict) validate_object(base_object, YTSelectionContainer) @@ -1031,6 +1143,14 @@ def ires(self): def fwidth(self): return self.base_object.fwidth[self._cond_ind,:] + def _get_bbox(self): + """ + Get the bounding box for the cut region. Here we just use + the bounding box for the source region. + """ + return self.base_object._get_bbox() + + class YTIntersectionContainer3D(YTSelectionContainer3D): """ This is a more efficient method of selecting the intersection of multiple diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index efdfa78fcfa..48d7fdb414e 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1,19 +1,3 @@ -""" -Dataset and related data structures. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import functools import itertools import numpy as np @@ -23,8 +7,7 @@ import warnings from collections import defaultdict -from yt.extern.six import add_metaclass, string_types -from six.moves import cPickle +import pickle from yt.config import ytcfg from yt.fields.derived_field import \ @@ -34,8 +17,10 @@ from yt.funcs import \ mylog, \ set_intersection, \ - setdefaultattr, \ - ensure_list + ensure_list, \ + issue_deprecation_warning, \ + iterable, \ + setdefaultattr from yt.utilities.cosmology import \ Cosmology from yt.utilities.exceptions import \ @@ -49,16 +34,17 @@ ParameterFileStore, \ NoParameterShelf, \ output_type_registry +from yt.units import UnitContainer from yt.units.dimensions import current_mks -from yt.units.unit_object import Unit, unit_system_registry, \ - _define_unit +from yt.units.unit_object import Unit, define_unit +from yt.units.unit_systems import unit_system_registry from yt.units.unit_registry import UnitRegistry from yt.fields.derived_field import \ ValidateSpatial from yt.fields.fluid_fields import \ setup_gradient_fields from yt.fields.particle_fields import \ - add_volume_weighted_smoothed_field + DEP_MSG_SMOOTH_FIELD from yt.data_objects.particle_filters import \ filter_registry from yt.data_objects.particle_unions import \ @@ -69,11 +55,10 @@ MinimalDataset from yt.units.yt_array import \ YTArray, \ - YTQuantity, \ + YTQuantity +from yt.units import \ _wrap_display_ytarray -from yt.units.unit_systems import \ - create_code_unit_system, \ - _make_unit_system_copy +from yt.units.unit_systems import create_code_unit_system from yt.data_objects.region_expression import \ RegionExpression from yt.geometry.coordinates.api import \ @@ -163,8 +148,7 @@ def ireq(self, value): return ireq -@add_metaclass(RegisteredDataset) -class Dataset(object): +class Dataset(metaclass = RegisteredDataset): default_fluid_type = "gas" default_field = ("gas", "density") @@ -182,10 +166,11 @@ class Dataset(object): fields = requires_index("fields") _instantiated = False _particle_type_counts = None + _proj_type = 'quad_proj' _ionization_label_format = 'roman_numeral' def __new__(cls, filename=None, *args, **kwargs): - if not isinstance(filename, string_types): + if not isinstance(filename, str): obj = object.__new__(cls) # The Stream frontend uses a StreamHandler object to pass metadata # to __init__. @@ -195,7 +180,7 @@ def __new__(cls, filename=None, *args, **kwargs): obj.__init__(filename, *args, **kwargs) return obj apath = os.path.abspath(filename) - cache_key = (apath, cPickle.dumps(args), cPickle.dumps(kwargs)) + cache_key = (apath, pickle.dumps(args), pickle.dumps(kwargs)) if ytcfg.getboolean("yt","skip_dataset_cache"): obj = object.__new__(cls) elif cache_key not in _cached_datasets: @@ -244,7 +229,15 @@ def __init__(self, filename, dataset_type=None, file_style=None, self.no_cgs_equiv_length = False - self._create_unit_registry() + if unit_system == 'code': + # create a fake MKS unit system which we will override later to + # avoid chicken/egg issue of the unit registry needing a unit system + # but code units need a unit registry to define the code units on + used_unit_system = 'mks' + else: + used_unit_system = unit_system + + self._create_unit_registry(used_unit_system) self._parse_parameter_file() self.set_units() @@ -275,7 +268,7 @@ def _set_derived_attrs(self): for attr in ("center", "width", "left_edge", "right_edge"): n = "domain_%s" % attr v = getattr(self, n) - if not isinstance(v, YTArray): + if not isinstance(v, YTArray) and v is not None: # Note that we don't add on _ipython_display_ here because # everything is stored inside a MutableAttribute. v = self.arr(v, "code_length") @@ -506,6 +499,23 @@ def create_field_info(self): nfields = self.add_particle_union(pu) if nfields == 0: mylog.debug("zero common fields: skipping particle union 'all'") + if "nbody" not in self.particle_types: + mylog.debug("Creating Particle Union 'nbody'") + ptypes = list(self.particle_types_raw) + if hasattr(self, '_sph_ptypes'): + for sph_ptype in self._sph_ptypes: + if sph_ptype in ptypes: + ptypes.remove(sph_ptype) + if ptypes: + nbody_ptypes = [] + for ptype in ptypes: + if (ptype, 'particle_mass') in self.field_info: + nbody_ptypes.append(ptype) + pu = ParticleUnion("nbody", nbody_ptypes) + nfields = self.add_particle_union(pu) + if nfields == 0: + mylog.debug( + "zero common fields, skipping particle union 'nbody'") self.field_info.setup_extra_union_fields() mylog.debug("Loading field plugins.") self.field_info.load_all_plugins(self.default_fluid_type) @@ -635,7 +645,7 @@ def add_particle_filter(self, filter): # concatenation fields. n = getattr(filter, "name", filter) self.known_filters[n] = None - if isinstance(filter, string_types): + if isinstance(filter, str): used = False f = filter_registry.get(filter, None) if f is None: @@ -681,6 +691,13 @@ def _setup_filtered_type(self, filter): self.particle_types += (filter.name,) if filter.name not in self.filtered_particle_types: self.filtered_particle_types.append(filter.name) + if hasattr(self, '_sph_ptypes'): + if filter.filtered_type == self._sph_ptypes[0]: + mylog.warning("It appears that you are filtering on an SPH field " + "type. It is recommended to use 'gas' as the " + "filtered particle type in this case instead.") + if filter.filtered_type in (self._sph_ptypes + ("gas",)): + self._sph_ptypes = self._sph_ptypes + (filter.name,) new_fields = self._setup_particle_types([filter.name]) deps, _ = self.field_info.check_derived_fields(new_fields) self.field_dependencies.update(deps) @@ -719,18 +736,22 @@ def _get_field_info(self, ftype, fname = None): # the type of field it is. So we look at the field type and # determine if we need to change the type. fi = self._last_finfo = self.field_info[fname] - if fi.particle_type and self._last_freq[0] not in self.particle_types: - field = "all", field[1] - elif not fi.particle_type and self._last_freq[0] not in self.fluid_types: - field = self.default_fluid_type, field[1] + if fi.sampling_type == "particle" and self._last_freq[0] \ + not in self.particle_types: + field = "all", field[1] + elif not fi.sampling_type == "particle" and self._last_freq[0] \ + not in self.fluid_types: + field = self.default_fluid_type, field[1] self._last_freq = field return self._last_finfo # We also should check "all" for particles, which can show up if you're # mixing deposition/gas fields with particle fields. if guessing_type: - to_guess = ["all", self.default_fluid_type] \ - + list(self.fluid_types) \ - + list(self.particle_types) + if hasattr(self, '_sph_ptype'): + to_guess = [self.default_fluid_type, 'all'] + else: + to_guess = ['all', self.default_fluid_type] + to_guess += list(self.fluid_types) + list(self.particle_types) for ftype in to_guess: if (ftype, fname) in self.field_info: self._last_freq = (ftype, fname) @@ -752,6 +773,12 @@ def _setup_classes(self): self.object_types.sort() def _add_object_class(self, name, base): + # skip projection data objects that don't make sense + # for this type of data + if 'proj' in name and name != self._proj_type: + return + elif 'proj' in name: + name = 'proj' self.object_types.append(name) obj = functools.partial(base, ds=weakref.proxy(self)) obj.__doc__ = base.__doc__ @@ -765,6 +792,14 @@ def find_max(self, field): source = self.all_data() max_val, mx, my, mz = \ source.quantities.max_location(field) + # This is a hack to fix the fact that some non-cartesian datasets have + # dimensionless quantities, and we can't yet handle that. + if mx.units.is_dimensionless: + mx = self.quan(mx.v, "code_length") + if my.units.is_dimensionless: + my = self.quan(my.v, "code_length") + if mz.units.is_dimensionless: + mz = self.quan(mz.v, "code_length") center = self.arr([mx, my, mz], dtype="float64").to('code_length') mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", max_val, center[0], center[1], center[2]) @@ -832,6 +867,7 @@ def all_data(self, find_max=False, **kwargs): all_data is a wrapper to the Region object for creating a region which covers the entire simulation domain. """ + self.index if find_max: c = self.find_max("density")[1] else: c = (self.domain_right_edge + self.domain_left_edge)/2.0 return self.region(c, @@ -842,6 +878,9 @@ def box(self, left_edge, right_edge, **kwargs): box is a wrapper to the Region object for creating a region without having to specify a *center* value. It assumes the center is the midpoint between the left_edge and right_edge. + + Keyword arguments are passed to the initializer of the YTRegion object + (e.g. ds.region). """ # we handle units in the region data object # but need to check if left_edge or right_edge is a @@ -901,44 +940,55 @@ def relative_refinement(self, l0, l1): return self.refine_by**(l1-l0) def _assign_unit_system(self, unit_system): - current_mks_unit = None + if unit_system == "cgs": + current_mks_unit = None + else: + current_mks_unit = 'A' magnetic_unit = getattr(self, 'magnetic_unit', None) if magnetic_unit is not None: - # if the magnetic unit is in T, we need to create the code unit - # system as an MKS-like system - if current_mks in self.magnetic_unit.units.dimensions.free_symbols: - if unit_system == "code": - current_mks_unit = 'A' - elif unit_system == 'mks': - pass - else: - self.magnetic_unit = \ - self.magnetic_unit.to_equivalent('gauss', 'CGS') - self.unit_registry.modify("code_magnetic", self.magnetic_unit) - create_code_unit_system(self.unit_registry, - current_mks_unit=current_mks_unit) - if unit_system == "code": - unit_system = unit_system_registry[self.unit_registry.unit_system_id] - else: - sys_name = str(unit_system).lower() - unit_system = _make_unit_system_copy(self.unit_registry, sys_name) - self.unit_system = unit_system - - def _create_unit_registry(self): - self.unit_registry = UnitRegistry() + if unit_system == "mks": + if current_mks not in self.magnetic_unit.units.dimensions.free_symbols: + self.magnetic_unit = self.magnetic_unit.to('gauss').to('T') + self.unit_registry.modify("code_magnetic", self.magnetic_unit.value) + else: + # if the magnetic unit is in T, we need to create the code unit + # system as an MKS-like system + if current_mks in self.magnetic_unit.units.dimensions.free_symbols: + self.magnetic_unit = self.magnetic_unit.to('T').to('gauss') + # The following modification ensures that we get the conversion to + # cgs correct + self.unit_registry.modify("code_magnetic", + self.magnetic_unit.value*0.1**0.5) + + us = create_code_unit_system( + self.unit_registry, current_mks_unit=current_mks_unit) + if unit_system != "code": + us = unit_system_registry[str(unit_system).lower()] + self.unit_system = us + self.unit_registry.unit_system = self.unit_system + + def _create_unit_registry(self, unit_system): import yt.units.dimensions as dimensions - self.unit_registry.add("code_length", 1.0, dimensions.length) - self.unit_registry.add("code_mass", 1.0, dimensions.mass) - self.unit_registry.add("code_density", 1.0, dimensions.density) + # yt assumes a CGS unit system by default (for back compat reasons). + # Since unyt is MKS by default we specify the MKS values of the base + # units in the CGS system. So, for length, 1 cm = .01 m. And so on. + self.unit_registry = UnitRegistry(unit_system=unit_system) + self.unit_registry.add("code_length", .01, dimensions.length) + self.unit_registry.add("code_mass", .001, dimensions.mass) + self.unit_registry.add("code_density", 1000., dimensions.density) self.unit_registry.add("code_specific_energy", 1.0, dimensions.energy / dimensions.mass) self.unit_registry.add("code_time", 1.0, dimensions.time) - self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field) + if unit_system == "mks": + self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field) + else: + self.unit_registry.add("code_magnetic", 0.1**0.5, dimensions.magnetic_field_cgs) self.unit_registry.add("code_temperature", 1.0, dimensions.temperature) - self.unit_registry.add("code_pressure", 1.0, dimensions.pressure) - self.unit_registry.add("code_velocity", 1.0, dimensions.velocity) + self.unit_registry.add("code_pressure", 0.1, dimensions.pressure) + self.unit_registry.add("code_velocity", .01, dimensions.velocity) self.unit_registry.add("code_metallicity", 1.0, dimensions.dimensionless) + self.unit_registry.add("h", 1.0, dimensions.dimensionless, r"h") self.unit_registry.add("a", 1.0, dimensions.dimensionless) def set_units(self): @@ -953,9 +1003,10 @@ def set_units(self): # Comoving lengths for my_unit in ["m", "pc", "AU", "au"]: new_unit = "%scm" % my_unit - self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0] / - (1 + self.current_redshift), - length, "\\rm{%s}/(1+z)" % my_unit) + my_u = Unit(my_unit, registry=self.unit_registry) + self.unit_registry.add( + new_unit, my_u.base_value / (1 + self.current_redshift), + length, "\\rm{%s}/(1+z)" % my_unit, prefixable=True) self.unit_registry.modify('a', 1/(1+self.current_redshift)) self.set_code_units() @@ -1044,22 +1095,34 @@ def _override_code_units(self): mylog.info("Overriding %s_unit: %g %s.", unit, val[0], val[1]) setattr(self, "%s_unit" % unit, self.quan(val[0], val[1])) + _units = None + _unit_system_id = None + @property + def units(self): + current_uid = self.unit_registry.unit_system_id + if self._units is not None and self._unit_system_id == current_uid: + return self._units + self._unit_system_id = current_uid + self._units = UnitContainer(self.unit_registry) + return self._units + _arr = None @property def arr(self): """Converts an array into a :class:`yt.units.yt_array.YTArray` The returned YTArray will be dimensionless by default, but can be - cast to arbitrary units using the ``input_units`` keyword argument. + cast to arbitrary units using the ``units`` keyword argument. Parameters ---------- input_array : Iterable A tuple, list, or array to attach units to - input_units : String unit specification, unit symbol or astropy object + units: String unit specification, unit symbol or astropy object The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). + input_units : Deprecated in favor of 'units' dtype : string or NumPy dtype object The dtype of the returned array data @@ -1096,16 +1159,17 @@ def quan(self): """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity` The returned YTQuantity will be dimensionless by default, but can be - cast to arbitrary units using the ``input_units`` keyword argument. + cast to arbitrary units using the ``units`` keyword argument. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to - input_units : String unit specification, unit symbol or astropy object + units: String unit specification, unit symbol or astropy object The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). + input_units : Deprecated in favor of 'units' dtype : string or NumPy dtype object The dtype of the array data. @@ -1182,9 +1246,10 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): "force_override=True.", name) if kwargs.setdefault('particle_type', False): if sampling_type is not None and sampling_type != "particle": - raise RuntimeError("Clashing definition of 'sampling_type' and " - "'particle_type'. Note that 'particle_type' is " - "deprecated. Please just use 'sampling_type'.") + raise RuntimeError( + "Clashing definition of 'sampling_type' and " + "'particle_type'. Note that 'particle_type' is " + "deprecated. Please just use 'sampling_type'.") else: sampling_type = "particle" if sampling_type is None: @@ -1277,7 +1342,7 @@ def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic else: raise RuntimeError - units = self.field_info[ptype, deposit_field].units + units = self.field_info[ptype, deposit_field].output_units take_log = self.field_info[ptype, deposit_field].take_log name_map = {"sum": "sum", "std": "std", "cic": "cic", "weighted_mean": "avg", "nearest": "nn", "simple_smooth": "ss", "count": "count"} @@ -1304,7 +1369,7 @@ def _deposit_field(field, data): fields = [np.ascontiguousarray(f) for f in fields] d = data.deposit(pos, fields, method=method, kernel_name=kernel_name) - d = data.ds.arr(d, input_units=units) + d = data.ds.arr(d, units=units) if method == 'weighted_mean': d[np.isnan(d)] = 0.0 return d @@ -1323,6 +1388,8 @@ def add_smoothed_particle_field(self, smooth_field, kernel_name="cubic"): """Add a new smoothed particle field + WARNING: This method is deprecated since yt-4.0. + Creates a new smoothed field based on the particle *smooth_field*. Parameters @@ -1347,32 +1414,9 @@ def add_smoothed_particle_field(self, smooth_field, The field name tuple for the newly created field. """ - # The magical step - self.index - - # Parse arguments - if isinstance(smooth_field, tuple): - ptype, smooth_field = smooth_field[0], smooth_field[1] - else: - raise RuntimeError("smooth_field must be a tuple, received %s" % - smooth_field) - if method != "volume_weighted": - raise NotImplementedError("method must be 'volume_weighted'") - - # Prepare field names and registry to be used later - coord_name = "particle_position" - mass_name = "particle_mass" - smoothing_length_name = "smoothing_length" - if (ptype, smoothing_length_name) not in self.derived_field_list: - raise ValueError("%s not in derived_field_list" % - ((ptype, smoothing_length_name),)) - density_name = "density" - registry = self.field_info - - # Do the actual work - return add_volume_weighted_smoothed_field(ptype, coord_name, mass_name, - smoothing_length_name, density_name, smooth_field, registry, - nneighbors=nneighbors, kernel_name=kernel_name)[0] + issue_deprecation_warning( + "This method is deprecated. " + DEP_MSG_SMOOTH_FIELD + ) def add_gradient_fields(self, input_field): """Add gradient fields. @@ -1469,21 +1513,30 @@ def define_unit(self, symbol, value, tex_repr=None, offset=None, prefixable=Fals >>> two_weeks = YTQuantity(14.0, "days") >>> ds.define_unit("fortnight", two_weeks) """ - _define_unit(self.unit_registry, symbol, value, tex_repr=tex_repr, - offset=offset, prefixable=prefixable) + define_unit(symbol, value, tex_repr=tex_repr, offset=offset, + prefixable=prefixable, registry=self.unit_registry) def _reconstruct_ds(*args, **kwargs): datasets = ParameterFileStore() ds = datasets.get_ds_hash(*args) return ds +@functools.total_ordering class ParticleFile(object): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range = None): self.ds = ds self.io = weakref.proxy(io) self.filename = filename self.file_id = file_id + if range is None: + range = (None, None) + self.start, self.end = range self.total_particles = self.io._count_particles(self) + # Now we adjust our start/end, in case there are fewer particles than + # we realized + if self.start is None: + self.start = 0 + self.end = max(self.total_particles.values()) + self.start def select(self, selector): pass @@ -1491,22 +1544,47 @@ def select(self, selector): def count(self, selector): pass - def _calculate_offsets(self, fields): + def _calculate_offsets(self, fields, pcounts): pass def __lt__(self, other): - return self.filename < other.filename + if self.filename != other.filename: + return self.filename < other.filename + return self.start < other.start + def __eq__(self, other): + if self.filename != other.filename: + return False + return self.start == other.start + + def __hash__(self): + return hash((self.filename, self.file_id, self.start, self.end)) + class ParticleDataset(Dataset): _unit_base = None filter_bbox = False + _proj_type = 'particle_proj' def __init__(self, filename, dataset_type=None, file_style=None, units_override=None, unit_system="cgs", - n_ref=64, over_refine_factor=1): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor + index_order=None, index_filename=None): + self.index_order = validate_index_order(index_order) + self.index_filename = index_filename super(ParticleDataset, self).__init__( filename, dataset_type=dataset_type, file_style=file_style, units_override=units_override, unit_system=unit_system) + +def validate_index_order(index_order): + if index_order is None: + index_order = (7, 5) + elif not iterable(index_order): + index_order = (int(index_order), 1) + else: + if len(index_order) != 2: + raise RuntimeError( + 'Tried to load a dataset with index_order={}, but ' + 'index_order\nmust be an integer or a two-element tuple of ' + 'integers.'.format(index_order)) + index_order = tuple([int(o) for o in index_order]) + return index_order diff --git a/yt/data_objects/tests/test_bbox.py b/yt/data_objects/tests/test_bbox.py new file mode 100644 index 00000000000..005e12f98c5 --- /dev/null +++ b/yt/data_objects/tests/test_bbox.py @@ -0,0 +1,51 @@ +# Some tests for finding bounding boxes + +import numpy as np + +from yt.testing import \ + fake_amr_ds, \ + assert_equal, \ + assert_allclose_units + + +def test_object_bbox(): + ds = fake_amr_ds() + reg = ds.box(ds.domain_left_edge+0.5*ds.domain_width, + ds.domain_right_edge-0.5*ds.domain_width) + le, re = reg.get_bbox() + assert_equal(le, ds.domain_left_edge+0.5*ds.domain_width) + assert_equal(re, ds.domain_right_edge-0.5*ds.domain_width) + sp = ds.sphere("c", (0.1, "unitary")) + le, re = sp.get_bbox() + assert_equal(le, -sp.radius+sp.center) + assert_equal(re, sp.radius+sp.center) + dk = ds.disk("c", [1,1,0], (0.25, "unitary"), (0.25, "unitary")) + le, re = dk.get_bbox() + le0 = ds.arr([0.5-0.25*np.sqrt(2.0), 0.5-0.25*np.sqrt(2.0), 0.25], "code_length") + re0 = ds.arr([0.5+0.25*np.sqrt(2.0), 0.5+0.25*np.sqrt(2.0), 0.75], "code_length") + assert_allclose_units(le, le0) + assert_allclose_units(re, re0) + ep = ds.ellipsoid("c", 0.3, 0.2, 0.1, np.array([0.1, 0.1, 0.1]), 0.2) + le, re = ep.get_bbox() + assert_equal(le, -ds.quan(0.3, "code_length")+sp.center) + assert_equal(re, ds.quan(0.3, "code_length")+sp.center) + spb = ds.sphere(ds.domain_center-ds.quan(0.1, "code_length"), (0.1, "code_length")) + regb = ds.box(ds.domain_center, ds.domain_center+ds.quan(0.2,"code_length")) + br1 = spb & regb + br2 = spb | regb + br3 = spb ^ regb + br4 = ~regb + le1, re1 = br1.get_bbox() + le2, re2 = br2.get_bbox() + le3, re3 = br3.get_bbox() + le4, re4 = br4.get_bbox() + le0 = ds.arr([0.3, 0.3, 0.3], "code_length") + re0 = ds.arr([0.7, 0.7, 0.7], "code_length") + assert_allclose_units(le1, le0) + assert_allclose_units(re1, re0) + assert_allclose_units(le2, le0) + assert_allclose_units(re2, re0) + assert_allclose_units(le3, le0) + assert_allclose_units(re3, re0) + assert_equal(le4, regb.left_edge) + assert_equal(re4, regb.right_edge) diff --git a/yt/data_objects/tests/test_compose.py b/yt/data_objects/tests/test_compose.py index d6eedcb826f..c8a1ca66909 100644 --- a/yt/data_objects/tests/test_compose.py +++ b/yt/data_objects/tests/test_compose.py @@ -16,7 +16,7 @@ def setup(): # each cell from cell positions def _IDFIELD(field, data): width = data.ds.domain_right_edge - data.ds.domain_left_edge - min_dx = YTArray(1.0/8192, input_units='code_length', + min_dx = YTArray(1.0/8192, units='code_length', registry=data.ds.unit_registry) delta = width / min_dx x = data['x'] - min_dx / 2. diff --git a/yt/data_objects/tests/test_cutting_plane.py b/yt/data_objects/tests/test_cutting_plane.py index c1b822d62a7..7c938856aa6 100644 --- a/yt/data_objects/tests/test_cutting_plane.py +++ b/yt/data_objects/tests/test_cutting_plane.py @@ -39,14 +39,15 @@ def test_cutting_plane(): frb = cut.to_frb(width, 64) for cut_field in ['ones', 'density']: fi = ds._get_field_info("unknown", cut_field) - assert_equal(frb[cut_field].info['data_source'], + data = frb[cut_field] + assert_equal(data.info['data_source'], cut.__str__()) - assert_equal(frb[cut_field].info['axis'], 4) - assert_equal(frb[cut_field].info['field'], cut_field) - assert_equal(frb[cut_field].units, Unit(fi.units)) - assert_equal(frb[cut_field].info['xlim'], frb.bounds[:2]) - assert_equal(frb[cut_field].info['ylim'], frb.bounds[2:]) - assert_equal(frb[cut_field].info['length_to_cm'], + assert_equal(data.info['axis'], 4) + assert_equal(data.info['field'], cut_field) + assert_equal(data.units, Unit(fi.units)) + assert_equal(data.info['xlim'], frb.bounds[:2]) + assert_equal(data.info['ylim'], frb.bounds[2:]) + assert_equal(data.info['length_to_cm'], ds.length_unit.in_cgs()) - assert_equal(frb[cut_field].info['center'], cut.center) + assert_equal(data.info['center'], cut.center) teardown_func(fns) diff --git a/yt/data_objects/tests/test_data_containers.py b/yt/data_objects/tests/test_data_containers.py index 46b3b51185e..328211fc380 100644 --- a/yt/data_objects/tests/test_data_containers.py +++ b/yt/data_objects/tests/test_data_containers.py @@ -12,7 +12,7 @@ from yt.data_objects.particle_filters import particle_filter from yt.testing import assert_equal, fake_random_ds, fake_amr_ds,\ fake_particle_ds, requires_module -from yt.utilities.exceptions import YTFieldNotFound, YTException +from yt.utilities.exceptions import YTFieldNotFound class TestDataContainers(unittest.TestCase): @classmethod @@ -54,7 +54,7 @@ def test_yt_data_container(self): def test_write_out(self): filename = "sphere.txt" - ds = fake_particle_ds() + ds = fake_random_ds(16) sp = ds.sphere(ds.domain_center, 0.25) sp.write_out(filename, fields=["cell_volume"]) @@ -70,15 +70,8 @@ def test_write_out(self): assert_equal(keys, file_row_1) assert_array_equal(data, file_row_2) - # Test for exception - with assert_raises(YTException) as ex: - sp.write_out(filename, fields=["particle_position_x"]) - desired = ("Field type ['all'] of the supplied field ['particle_position_x']" - " is in consistent with field type 'gas'.") - assert_equal(str(ex.exception)[:50], desired[:50]) - def test_save_object(self): - ds = fake_particle_ds() + ds = fake_random_ds(16) sp = ds.sphere(ds.domain_center, 0.25) sp.save_object("my_sphere_1", filename="test_save_obj") obj = shelve.open("test_save_obj", protocol=-1) @@ -168,8 +161,8 @@ def fun(field, data): expected_size = (dd['io', 'particle_mass'].to('code_mass') > 0.5).sum() - fields_to_test = (f for f in ds.derived_field_list - if f[0] == 'massive') + fields_to_test = [f for f in ds.derived_field_list + if f[0] == 'massive'] def test_this(fname): data = dd[fname] diff --git a/yt/data_objects/tests/test_dataset_access.py b/yt/data_objects/tests/test_dataset_access.py index 811ac0c9d68..53aa828e2c5 100644 --- a/yt/data_objects/tests/test_dataset_access.py +++ b/yt/data_objects/tests/test_dataset_access.py @@ -1,8 +1,13 @@ import numpy as np from nose.tools import assert_raises -from yt.testing import assert_equal, fake_amr_ds, fake_particle_ds, \ - fake_random_ds, requires_file +from yt.testing import \ + assert_equal, \ + assert_almost_equal, \ + fake_amr_ds, \ + fake_particle_ds, \ + fake_random_ds, \ + requires_file from yt.utilities.answer_testing.framework import data_dir_load from yt.utilities.exceptions import YTDimensionalityError from yt.visualization.line_plot import LineBuffer @@ -24,9 +29,9 @@ def test_box_creation(): region = ds.region(center, left_edge, right_edge) for b in boxes: - assert_equal(b.left_edge, region.left_edge) - assert_equal(b.right_edge, region.right_edge) - assert_equal(b.center, region.center) + assert_almost_equal(b.left_edge, region.left_edge) + assert_almost_equal(b.right_edge, region.right_edge) + assert_almost_equal(b.center, region.center) def test_region_from_d(): ds = fake_amr_ds(fields=["density"]) @@ -127,8 +132,8 @@ def test_ray_from_r(): start = [(0.1,"cm"), 0.2, (0.3,"cm")] end = [(0.5,"cm"), (0.4,"cm"), 0.6] ray5 = ds.r[start:end] - start_arr = [ds.quan(0.1,"cm"), 0.2, ds.quan(0.3,"cm")] - end_arr = [ds.quan(0.5,"cm"), ds.quan(0.4,"cm"), 0.6] + start_arr = [ds.quan(0.1, "cm"), ds.quan(0.2, "cm"), ds.quan(0.3, "cm")] + end_arr = [ds.quan(0.5, "cm"), ds.quan(0.4, "cm"), ds.quan(0.6, "cm")] ray6 = ds.ray(start_arr, end_arr) assert_equal(ray5["density"], ray6["density"]) diff --git a/yt/data_objects/tests/test_derived_quantities.py b/yt/data_objects/tests/test_derived_quantities.py index 1a45beaeabc..481a7f5673f 100644 --- a/yt/data_objects/tests/test_derived_quantities.py +++ b/yt/data_objects/tests/test_derived_quantities.py @@ -1,11 +1,16 @@ +from __future__ import division + import numpy as np +import yt from yt.testing import \ fake_random_ds, \ + fake_sph_orientation_ds, \ fake_particle_ds, \ + assert_almost_equal, \ assert_equal, \ assert_rel_equal, \ - assert_almost_equal + requires_file from yt import particle_filter @@ -121,6 +126,58 @@ def test_sample_at_max_field_values(): assert_equal(ad["temperature"][mi], temp) assert_equal(ad["velocity_x"][mi], vm) +def test_in_memory_sph_derived_quantities(): + ds = fake_sph_orientation_ds() + ad = ds.all_data() + + ang_mom = ad.quantities.angular_momentum_vector() + assert_equal(ang_mom, [0, 0, 0]) + + bv = ad.quantities.bulk_velocity() + assert_equal(bv, [0, 0, 0]) + + com = ad.quantities.center_of_mass() + assert_equal(com, [1/7, (1+2)/7, (1+2+3)/7]) + + ex = ad.quantities.extrema(['x', 'y', 'z']) + for fex, ans in zip(ex, [[0, 1], [0, 2], [0, 3]]): + assert_equal(fex, ans) + + for d, v, l in zip('xyz', [1, 2, 3], [[1, 0, 0], [0, 2, 0], [0, 0, 3]]): + max_d, x, y, z = ad.quantities.max_location(d) + assert_equal(max_d, v) + assert_equal([x, y, z], l) + + for d in 'xyz': + min_d, x, y, z = ad.quantities.min_location(d) + assert_equal(min_d, 0) + assert_equal([x, y, z], [0, 0, 0]) + + tot_m = ad.quantities.total_mass() + assert_equal(tot_m, [7, 0]) + + weighted_av_z = ad.quantities.weighted_average_quantity('z', 'z') + assert_equal(weighted_av_z, 7/3) + +iso_collapse = "IsothermalCollapse/snap_505" +tipsy_gal = 'TipsyGalaxy/galaxy.00300' + +@requires_file(iso_collapse) +@requires_file(tipsy_gal) +def test_sph_datasets_derived_quantities(): + for fname in [tipsy_gal, iso_collapse]: + ds = yt.load(fname) + ad = ds.all_data() + use_particles = 'nbody' in ds.particle_types + ad.quantities.angular_momentum_vector() + ad.quantities.bulk_velocity(True, use_particles) + ad.quantities.center_of_mass(True, use_particles) + ad.quantities.extrema([('gas', 'density'), ('gas', 'temperature')]) + ad.quantities.min_location(('gas', 'density')) + ad.quantities.max_location(('gas', 'density')) + ad.quantities.total_mass() + ad.quantities.weighted_average_quantity(('gas', 'density'), ('gas', 'mass')) + def test_derived_quantities_with_particle_types(): ds = fake_particle_ds() diff --git a/yt/data_objects/tests/test_disks.py b/yt/data_objects/tests/test_disks.py index 75739b7f30a..f514011e4f8 100644 --- a/yt/data_objects/tests/test_disks.py +++ b/yt/data_objects/tests/test_disks.py @@ -19,7 +19,7 @@ def test_bad_disk_input(): with assert_raises(TypeError) as ex: ds.disk(ds.domain_center, [0, 0, 1], ds.domain_center, (20, 'kpc')) desired = ("Expected a numeric value (or size-1 array)," - " received 'yt.units.yt_array.YTArray' of length 3") + " received 'unyt.array.unyt_array' of length 3") assert_equal(str(ex.exception), desired) # Test invalid float @@ -34,7 +34,7 @@ def test_bad_disk_input(): ds.disk(ds.domain_center, [0, 0, 1], (10, 'kpc'), (20, 'kpc'), fields=YTQuantity(1, 'kpc')) desired = ("Expected an iterable object, received" - " 'yt.units.yt_array.YTQuantity'") + " 'unyt.array.unyt_quantity'") assert_equal(str(ex.exception), desired) # Test invalid object diff --git a/yt/data_objects/tests/test_image_array.py b/yt/data_objects/tests/test_image_array.py index cefa42c4a7f..f6bd81d8504 100644 --- a/yt/data_objects/tests/test_image_array.py +++ b/yt/data_objects/tests/test_image_array.py @@ -52,7 +52,7 @@ def setUp(self): os.chdir(self.tmpdir) def test_image_arry_units(self): - im_arr = ImageArray(dummy_image(0.3, 3), input_units='cm') + im_arr = ImageArray(dummy_image(0.3, 3), units='cm') assert str(im_arr.units) == 'cm' @@ -67,7 +67,7 @@ def test_image_array_hdf5(self): 'normal_vector': np.array([0., 1., 0.]), 'width': 0.245, 'type': 'rendering'} - im_arr = ImageArray(dummy_image(0.3, 3), input_units='cm', info=myinfo) + im_arr = ImageArray(dummy_image(0.3, 3), units='cm', info=myinfo) im_arr.save('test_3d_ImageArray', png=False) im = np.zeros([64, 128]) @@ -79,7 +79,7 @@ def test_image_array_hdf5(self): 'normal_vector': np.array([0., 1., 0.]), 'width': 0.245, 'type': 'rendering'} - im_arr = ImageArray(im, info=myinfo, input_units='cm') + im_arr = ImageArray(im, info=myinfo, units='cm') im_arr.save('test_2d_ImageArray', png=False) im_arr.save('test_2d_ImageArray_ds', png=False, dataset_name='Random_DS') diff --git a/yt/data_objects/tests/test_octree.py b/yt/data_objects/tests/test_octree.py new file mode 100644 index 00000000000..cf82fb97d12 --- /dev/null +++ b/yt/data_objects/tests/test_octree.py @@ -0,0 +1,97 @@ +from yt.testing import \ + fake_sph_grid_ds, assert_almost_equal +from yt.data_objects.construction_data_containers import YTOctree +import tempfile +import os +import shutil +import numpy as np + +def test_building_tree(): + ''' + Test function to build an octree and make sure correct number of particles + ''' + ds = fake_sph_grid_ds() + octree = ds.octree(n_ref=1) + assert(type(octree) == YTOctree) + assert(octree[('index', 'x')].shape[0] == 456) + +def test_saving_loading(): + ''' + This builds an octree, writes to file, reloads and ensure that the reloaded + octree is the same as the initial built tree. + ''' + tmpdir = tempfile.mkdtemp() + curdir = os.getcwd() + os.chdir(tmpdir) + + ds = fake_sph_grid_ds() + ds.tree_filename = tmpdir+"test.octree" + ds._file_hash = 1 + octree = ds.octree(n_ref=1) + + ds2 = fake_sph_grid_ds() + ds2.tree_filename = tmpdir+"test.octree" + ds2._file_hash = 1 + octree_loaded = ds2.octree(n_ref=1) + + assert(octree == octree_loaded) + assert(octree_loaded.loaded) + + os.chdir(curdir) + shutil.rmtree(tmpdir) + +def test_sph_interpolation_scatter(): + ''' + Just generate an octree, perform some SPH interpolation and check with some + answer testing + ''' + + ds = fake_sph_grid_ds(hsml_factor=26.0) + ds.use_sph_normalization = False + ds._sph_ptypes = ('io',) + octree = ds.octree(n_ref=5, over_refine_factor=0) + density = octree[('io', 'density')] + answers = np.array([1.00434706, 1.00434706, 1.00434706, 1.00434706, + 1.00434706, 1.00434706, 1.00434706, 0.7762907, + 0.89250848, 0.89250848, 0.97039088, 0.89250848, + 0.97039088, 0.97039088, 1.01156175]) + + assert_almost_equal(density.d, answers) + +def test_sph_interpolation_gather(): + ''' + Just generate an octree, perform some SPH interpolation and check with some + answer testing + ''' + ds = fake_sph_grid_ds(hsml_factor=26.0) + ds.index + ds.sph_smoothing_style = 'gather' + ds.num_neighbors = 5 + ds.use_sph_normalization = False + ds._sph_ptypes = ('io',) + octree = ds.octree(n_ref=5, over_refine_factor=0) + density = octree[('io', 'density')] + answers = np.array([0.59240874, 0.59240874, 0.59240874, 0.59240874, + 0.59240874, 0.59240874, 0.59240874, 0.10026846, + 0.77014968, 0.77014968, 0.96127825, 0.77014968, + 0.96127825, 0.96127825, 1.21183996]) + + assert_almost_equal(density.d, answers) + +def test_over_refine_factor(): + ''' + Ensure that the octree over refine factor is behaving as expected + ''' + ds = fake_sph_grid_ds() + octree = ds.octree(n_ref=1, over_refine_factor=2) + num_cells = octree[('index', 'x')].shape[0] + assert(num_cells == 3648) + +def test_density_factor(): + ''' + Ensure the dense tree functionality is working + ''' + ds = fake_sph_grid_ds() + octree = ds.octree(n_ref=1, density_factor=2) + num_cells = octree[('index', 'x')].shape[0] + assert(num_cells == 512) diff --git a/yt/data_objects/tests/test_particle_filter.py b/yt/data_objects/tests/test_particle_filter.py index 432f22808c0..365e3d68ae2 100644 --- a/yt/data_objects/tests/test_particle_filter.py +++ b/yt/data_objects/tests/test_particle_filter.py @@ -1,9 +1,15 @@ from __future__ import print_function +import numpy as np +import os +import shutil +import tempfile + from nose.tools import assert_raises from yt.data_objects.particle_filters import add_particle_filter, particle_filter -from yt.testing import assert_equal, fake_random_ds +from yt.testing import assert_equal, fake_random_ds, fake_sph_grid_ds +from yt.visualization.plot_window import ProjectionPlot from yt.utilities.exceptions import YTIllDefinedFilter, \ YTIllDefinedParticleFilter @@ -170,3 +176,24 @@ def heavy_stars(pfilter, data): grid['heavy_stars', 'particle_mass'].shape[0]) assert_equal(cg['heavy_stars', 'particle_mass'].shape[0], grid['heavy_stars', 'particle_mass'].shape[0]) + +def test_sph_particle_filter_plotting(): + ds = fake_sph_grid_ds() + + @particle_filter("central_gas", requires=["particle_position"], filtered_type="io") + def _filter(pfilter, data): + coords = np.abs(data[pfilter.filtered_type, "particle_position"]) + return ( + (coords[:, 0] < 1.6) & (coords[:, 1] < 1.6) & (coords[:, 2] < 1.6)) + + ds.add_particle_filter("central_gas") + + plot = ProjectionPlot(ds, 'z', ('central_gas', 'density')) + tmpdir = tempfile.mkdtemp() + curdir = os.getcwd() + os.chdir(tmpdir) + + plot.save() + + os.chdir(curdir) + shutil.rmtree(tmpdir) diff --git a/yt/data_objects/tests/test_pickle.py b/yt/data_objects/tests/test_pickle.py index 2fb951e9350..b1b53a7b9fa 100644 --- a/yt/data_objects/tests/test_pickle.py +++ b/yt/data_objects/tests/test_pickle.py @@ -1,18 +1,4 @@ -""" -Testsuite for pickling yt objects. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -from yt.extern.six.moves import cPickle +import pickle import os import tempfile from yt.testing \ @@ -44,11 +30,11 @@ def test_save_load_pickle(): # save object cpklfile = tempfile.NamedTemporaryFile(delete=False) - cPickle.dump(contours[1][0], cpklfile) + pickle.dump(contours[1][0], cpklfile) cpklfile.close() # load object - test_load = cPickle.load(open(cpklfile.name, "rb")) + test_load = pickle.load(open(cpklfile.name, "rb")) assert_equal.description = \ "%s: File was pickle-loaded successfully" % __name__ diff --git a/yt/data_objects/tests/test_profiles.py b/yt/data_objects/tests/test_profiles.py index 9173c320dab..2ca61f49d25 100644 --- a/yt/data_objects/tests/test_profiles.py +++ b/yt/data_objects/tests/test_profiles.py @@ -15,6 +15,7 @@ assert_raises,\ assert_rel_equal, \ fake_random_ds, \ + fake_sph_orientation_ds, \ requires_module from yt.utilities.exceptions import YTIllDefinedProfile from yt.visualization.profile_plotter import ProfilePlot, PhasePlot @@ -27,16 +28,15 @@ def test_profiles(): ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units) nv = ds.domain_dimensions.prod() dd = ds.all_data() - (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"]( - ["density", "temperature", "dinosaurs"]) rt, tt, dt = dd.quantities["TotalQuantity"]( ["density", "temperature", "dinosaurs"]) e1, e2 = 0.9, 1.1 for nb in [8, 16, 32, 64]: for input_units in ['mks', 'cgs']: - for ex in [rmi, rma, tmi, tma, dmi, dma]: - getattr(ex, 'convert_to_%s' % input_units)() + (rmi, rma), (tmi, tma), (dmi, dma) = [ + getattr(ex, 'in_%s' % input_units)() for ex in + dd.quantities["Extrema"](["density", "temperature", "dinosaurs"])] # We log all the fields or don't log 'em all. No need to do them # individually. for lf in [True, False]: @@ -305,6 +305,12 @@ def DM_in_cell_mass(field, data): assert not np.any(np.isnan(profile['gas', 'radial_velocity'])) +def test_profile_sph_data(): + ds = fake_sph_orientation_ds() + # test we create a profile without raising YTIllDefinedProfile + yt.create_profile(ds.all_data(), ['density', 'temperature'], + ['kinetic_energy'], weight_field=None) + def test_profile_override_limits(): ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units) diff --git a/yt/data_objects/tests/test_rays.py b/yt/data_objects/tests/test_rays.py index b27219d7ce2..756d62e0429 100644 --- a/yt/data_objects/tests/test_rays.py +++ b/yt/data_objects/tests/test_rays.py @@ -53,10 +53,8 @@ def test_ray(): assert_rel_equal(my_ray['dts'].sum(), unitary, 14) @requires_file('GadgetDiskGalaxy/snapshot_200.hdf5') -def test_ray_in_particle_octree(): +def test_ray_particle(): ds = load('GadgetDiskGalaxy/snapshot_200.hdf5') - start = ds.arr([31995.63476562, 31473.6640625, 28969.88671875], "code_length") - end = ds.arr([31995.63476562, 31473.6640625, 29219.88671875], "code_length") - ray = ds.ray(start, end) - ray["t"] - assert_equal(ray["dts"].sum(dtype="f8"), 1.0) + ray = ds.ray(ds.domain_left_edge, ds.domain_right_edge) + assert_equal(ray["t"].shape, (1451,)) + assert ray["dts"].sum(dtype="f8") > 0 diff --git a/yt/data_objects/tests/test_slice.py b/yt/data_objects/tests/test_slice.py index 7223639ca26..8e8a2e71c87 100644 --- a/yt/data_objects/tests/test_slice.py +++ b/yt/data_objects/tests/test_slice.py @@ -1,16 +1,3 @@ -""" -Tests for AMRSlice - - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- import os import numpy as np import tempfile diff --git a/yt/data_objects/tests/test_sph_data_objects.py b/yt/data_objects/tests/test_sph_data_objects.py new file mode 100644 index 00000000000..1ba56a70f35 --- /dev/null +++ b/yt/data_objects/tests/test_sph_data_objects.py @@ -0,0 +1,325 @@ +import numpy as np + +from yt.testing import \ + assert_equal, \ + fake_sph_orientation_ds, fake_sph_grid_ds +from yt import SlicePlot + +def test_point(): + ds = fake_sph_orientation_ds() + field_data = ds.stream_handler.fields['stream_file'] + ppos = [field_data['io', 'particle_position_%s' % d] for d in 'xyz'] + ppos = np.array(ppos).T + for pos in ppos: + for i in range(-1, 2): + offset = 0.1*np.array([i, 0, 0]) + pt = ds.point(pos + offset) + assert_equal(pt['gas', 'density'].shape[0], 1) + for j in range(-1, 2): + offset = 0.1*np.array([0, j, 0]) + pt = ds.point(pos + offset) + assert_equal(pt['gas', 'density'].shape[0], 1) + for k in range(-1, 2): + offset = 0.1*np.array([0, 0, k]) + pt = ds.point(pos + offset) + assert_equal(pt['gas', 'density'].shape[0], 1) + + +# The number of particles along each slice axis at that coordinate +SLICE_ANSWERS = { + ('x', 0): 6, + ('x', 0.5): 0, + ('x', 1): 1, + ('y', 0): 5, + ('y', 1): 1, + ('y', 2): 1, + ('z', 0): 4, + ('z', 1): 1, + ('z', 2): 1, + ('z', 3): 1, +} + +def test_slice(): + ds = fake_sph_orientation_ds() + for (ax, coord), answer in SLICE_ANSWERS.items(): + # test that we can still select particles even if we offset the slice + # within each particle's smoothing volume + for i in range(-1, 2): + sl = ds.slice(ax, coord + i*0.1) + assert_equal(sl['gas', 'density'].shape[0], answer) + + +REGION_ANSWERS = { + ((-4, -4, -4), (4, 4, 4)): 7, + ((0, 0, 0), (4, 4, 4)): 7, + ((1, 0, 0), (4, 4, 4)): 1, + ((0, 1, 0), (4, 4, 4)): 2, + ((0, 0, 1), (4, 4, 4)): 3, + ((0, 0, 0), (4, 4, 2)): 6, + ((0, 0, 0), (4, 4, 1)): 5, + ((0, 0, 0), (4, 1, 4)): 6, + ((0, 0, 0), (1, 1, 4)): 6, +} + +def test_region(): + ds = fake_sph_orientation_ds() + for (left_edge, right_edge), answer in REGION_ANSWERS.items(): + # test that regions enclosing a particle's smoothing region + # correctly select SPH particles + for i in range(-1, 2): + for j in range(-1, 2): + le = np.array([le + i*0.1 for le in left_edge]) + re = np.array([re + j*0.1 for re in right_edge]) + + # check if we went off the edge of the domain + whl = le < ds.domain_left_edge + le[whl] = ds.domain_left_edge[whl] + whr = re > ds.domain_right_edge + re[whr] = ds.domain_right_edge[whr] + + reg = ds.box(le, re) + assert_equal(reg['gas', 'density'].shape[0], answer) + + +SPHERE_ANSWERS = { + ((0, 0, 0), 4): 7, + ((0, 0, 0), 3): 7, + ((0, 0, 0), 2): 6, + ((0, 0, 0), 1): 4, + ((0, 0, 0), 0.5): 1, + ((1, 0, 0), 0.5): 1, + ((1, 0, 0), 1.0): 2, + ((0, 1, 0), 1.0): 3, + ((0, 0, 1), 1.0): 3, +} + +def test_sphere(): + ds = fake_sph_orientation_ds() + for (center, radius), answer in SPHERE_ANSWERS.items(): + # test that spheres enclosing a particle's smoothing region + # correctly select SPH particles + for i in range(-1, 2): + for j in range(-1, 2): + cent = np.array([c + i*0.1 for c in center]) + rad = radius + 0.1*j + sph = ds.sphere(cent, rad) + assert_equal(sph['gas', 'density'].shape[0], answer) + + +DISK_ANSWERS = { + ((0, 0, 0), (0, 0, 1), 4, 3): 7, + ((0, 0, 0), (0, 0, 1), 4, 2): 6, + ((0, 0, 0), (0, 0, 1), 4, 1): 5, + ((0, 0, 0), (0, 0, 1), 4, 0.5): 4, + ((0, 0, 0), (0, 1, 0), 4, 3): 7, + ((0, 0, 0), (0, 1, 0), 4, 2): 7, + ((0, 0, 0), (0, 1, 0), 4, 1): 6, + ((0, 0, 0), (0, 1, 0), 4, 0.5): 5, + ((0, 0, 0), (1, 0, 0), 4, 3): 7, + ((0, 0, 0), (1, 0, 0), 4, 2): 7, + ((0, 0, 0), (1, 0, 0), 4, 1): 7, + ((0, 0, 0), (1, 0, 0), 4, 0.5): 6, + ((0, 0, 0), (1, 1, 1), 1, 1): 4, + ((-0.5, -0.5, -0.5), (1, 1, 1), 4, 4): 7, +} + +def test_disk(): + ds = fake_sph_orientation_ds() + for (center, normal, radius, height), answer in DISK_ANSWERS.items(): + # test that disks enclosing a particle's smoothing region + # correctly select SPH particles + for i in range(-1, 2): + cent = np.array([c + i*0.1 for c in center]) + disk = ds.disk(cent, normal, radius, height) + assert_equal(disk['gas', 'density'].shape[0], answer) + + +RAY_ANSWERS = { + ((0, 0, 0), (3, 0, 0)): 2, + ((0, 0, 0), (0, 3, 0)): 3, + ((0, 0, 0), (0, 0, 3)): 4, + ((0, 1, 0), (0, 2, 0)): 2, + ((1, 0, 0), (0, 2, 0)): 2, + ((0.5, 0.5, 0.5), (0.5, 0.5, 3.5)): 0, +} + +def test_ray(): + ds = fake_sph_orientation_ds() + for (start_point, end_point), answer in RAY_ANSWERS.items(): + for i in range(-1, 2): + start = np.array([s + i*0.1 for s in start_point]) + end = np.array([e + i*0.1 for e in end_point]) + ray = ds.ray(start, end) + assert_equal(ray['gas', 'density'].shape[0], answer) + + +CUTTING_ANSWERS = { + ((1, 0, 0), (0, 0, 0)): 6, + ((0, 1, 0), (0, 0, 0)): 5, + ((0, 0, 1), (0, 0, 0)): 4, + ((1, 1, 1), (1./3, 1./3, 1./3)): 3, + ((1, 1, 1), (2./3, 2./3, 2./3)): 2, + ((1, 1, 1), (1, 1, 1)): 1, +} + +def test_cutting(): + ds = fake_sph_orientation_ds() + for (normal, center), answer in CUTTING_ANSWERS.items(): + for i in range(-1, 2): + cen = [c + 0.1*c for c in center] + cut = ds.cutting(normal, cen) + assert_equal(cut['gas', 'density'].shape[0], answer) + +def test_chained_selection(): + ds = fake_sph_orientation_ds() + + for (center, radius), answer in SPHERE_ANSWERS.items(): + sph = ds.sphere(center, radius) + region = ds.box(ds.domain_left_edge, ds.domain_right_edge, + data_source=sph) + assert_equal(region['gas', 'density'].shape[0], answer) + +def test_boolean_selection(): + ds = fake_sph_orientation_ds() + + sph = ds.sphere([0, 0, 0], 0.5) + + sph2 = ds.sphere([1, 0, 0], 0.5) + + reg = ds.all_data() + + neg = reg - sph + + assert_equal(neg['gas', 'density'].shape[0], 6) + + plus = sph + sph2 + + assert_equal(plus['gas', 'density'].shape[0], 2) + + intersect = sph & sph2 + + assert_equal(intersect['gas', 'density'].shape[0], 0) + + intersect = reg & sph2 + + assert_equal(intersect['gas', 'density'].shape[0], 1) + + exclusive = sph ^ sph2 + + assert_equal(exclusive['gas', 'density'].shape[0], 2) + + exclusive = sph ^ reg + + assert_equal(exclusive['gas', 'density'].shape[0], 6) + + intersect = ds.intersection([sph, sph2]) + + assert_equal(intersect['gas', 'density'].shape[0], 0) + + intersect = ds.intersection([reg, sph2]) + + assert_equal(intersect['gas', 'density'].shape[0], 1) + + union = ds.union([sph, sph2]) + + assert_equal(union['gas', 'density'].shape[0], 2) + + union = ds.union([sph, reg]) + + assert_equal(union['gas', 'density'].shape[0], 7) + +def test_arbitrary_grid(): + ds = fake_sph_grid_ds() + + # this loads up some sph data in a test grid + agrid = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3]) + + # the field should be equal to the density of a particle in every voxel + # which is 1. + dens = agrid['gas', 'density'] + answers = np.ones(shape=(3,3,3)) + + assert_equal(dens, answers) + +def test_compare_arbitrary_grid_slice(): + ds = fake_sph_orientation_ds() + c = np.array([0., 0., 0.]) + width = 1.5 + buff_size = 51 + field = ('gas', 'density') + + # buffer from arbitrary grid + ag = ds.arbitrary_grid(c - width / 2, + c + width / 2, + [buff_size]*3) + buff_ag = ag[field][:, :, int(np.floor(buff_size/2))].d.T + + # buffer from slice + p = SlicePlot(ds, 'z', field, center=c, width=width) + p.set_buff_size(51) + buff_slc = p.frb.data[field].d + + assert_equal(buff_slc, buff_ag) + +def test_gather_slice(): + ds = fake_sph_grid_ds() + ds.num_neighbors = 5 + field = ('gas', 'density') + + c = np.array([1.5, 1.5, 0.5]) + width = 3.0 + + p = SlicePlot(ds, 'z', field, center=c, width=width) + p.set_buff_size(3) + buff_scatter = p.frb.data[field].d + + ds.sph_smoothing_style = "gather" + + p = SlicePlot(ds, 'z', field, center=c, width=width) + p.set_buff_size(3) + buff_gather = p.frb.data[field].d + + assert_equal(buff_scatter, buff_gather) + +def test_gather_grid(): + ds = fake_sph_grid_ds() + ds.num_neighbors = 5 + field = ('gas', 'density') + + ag = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3]) + scatter = ag[field] + + ds.sph_smoothing_style = "gather" + ag = ds.arbitrary_grid([0, 0, 0], [3, 3, 3], dims=[3, 3, 3]) + gather = ag[field] + + assert_equal(gather, scatter) + +def test_covering_grid_scatter(): + ds = fake_sph_grid_ds() + field = ('gas', 'density') + buff_size = 8 + + ag = ds.arbitrary_grid(0, 3, [buff_size]*3) + ag_dens = ag[field].to('g*cm**-3').d + + cg = ds.covering_grid(3, 0, 8) + cg_dens = cg[field].to('g*cm**-3').d + + assert_equal(ag_dens, cg_dens) + +def test_covering_grid_gather(): + ds = fake_sph_grid_ds() + ds.sph_smoothing_style = 'gather' + ds.num_neighbors = 5 + field = ('gas', 'density') + buff_size = 8 + + ag = ds.arbitrary_grid(0, 3, [buff_size]*3) + ag_dens = ag[field].to('g*cm**-3').d + + cg = ds.covering_grid(3, 0, 8) + cg_dens = cg[field].to('g*cm**-3').d + + assert_equal(ag_dens, cg_dens) + diff --git a/yt/data_objects/tests/test_spheres.py b/yt/data_objects/tests/test_spheres.py index 3ca85007495..293fd29e52d 100644 --- a/yt/data_objects/tests/test_spheres.py +++ b/yt/data_objects/tests/test_spheres.py @@ -2,7 +2,10 @@ from numpy.testing import assert_array_equal from yt.data_objects.profiles import create_profile -from yt.testing import fake_random_ds, assert_equal, periodicity_cases +from yt.testing import fake_random_ds, assert_equal, periodicity_cases, \ + assert_raises, requires_module + +from yt.utilities.exceptions import YTException def setup(): @@ -77,3 +80,33 @@ def test_sphere_center(): sp1 = ds.sphere("min", (0.25, 'unitary')) sp2 = ds.sphere("min_density", (0.25, 'unitary')) assert_array_equal(sp1.center, sp2.center) + +@requires_module("MiniballCpp") +def test_minimal_sphere(): + ds = fake_random_ds(16, nprocs=8, particles=100) + + pos = ds.r['particle_position'] + sp1 = ds.minimal_sphere(pos) + + N0 = len(pos) + + # Check all particles have been found + N1 = len(sp1['particle_ones']) + assert_equal(N0, N1) + + # Check that any smaller sphere is missing some particles + sp2 = ds.sphere(sp1.center, sp1.radius*0.9) + N2 = len(sp2['particle_ones']) + assert N2 < N0 + +@requires_module("MiniballCpp") +def test_minimal_sphere_bad_inputs(): + ds = fake_random_ds(16, nprocs=8, particles=100) + pos = ds.r['particle_position'] + + ## Check number of points >= 2 + # -> should fail + assert_raises(YTException, ds.minimal_sphere, pos[:1, :]) + + # -> should not fail + ds.minimal_sphere(pos[:2, :]) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 55e6fee41c8..f44e0f09094 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -1,18 +1,3 @@ -""" -Time series analysis functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import inspect import functools import glob @@ -22,7 +7,6 @@ from functools import wraps -from yt.extern.six import add_metaclass, string_types from yt.convenience import load from yt.config import ytcfg from yt.data_objects.data_containers import data_object_registry @@ -143,7 +127,7 @@ class DatasetSeries(object): ... SlicePlot(ds, "x", "Density").save() ... >>> def print_time(ds): - ... print ds.current_time + ... print(ds.current_time) ... >>> ts = DatasetSeries( ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0", @@ -154,7 +138,7 @@ class DatasetSeries(object): """ def __new__(cls, outputs, *args, **kwargs): - if isinstance(outputs, string_types): + if isinstance(outputs, str): outputs = get_filenames_from_glob_pattern(outputs) ret = super(DatasetSeries, cls).__new__(cls) try: @@ -167,7 +151,7 @@ def __init__(self, outputs, parallel = True, setup_function = None, mixed_dataset_types = False, **kwargs): # This is needed to properly set _pre_outputs for Simulation subclasses. self._mixed_dataset_types = mixed_dataset_types - if iterable(outputs) and not isinstance(outputs, string_types): + if iterable(outputs) and not isinstance(outputs, str): self._pre_outputs = outputs[:] self.tasks = AnalysisTaskProxy(self) self.params = TimeSeriesParametersContainer(self) @@ -183,7 +167,7 @@ def __init__(self, outputs, parallel = True, setup_function = None, def __iter__(self): # We can make this fancier, but this works for o in self._pre_outputs: - if isinstance(o, string_types): + if isinstance(o, str): ds = self._load(o, **self.kwargs) self._setup_function(ds) yield ds @@ -199,7 +183,7 @@ def __getitem__(self, key): parallel=self.parallel, **self.kwargs) o = self._pre_outputs[key] - if isinstance(o, string_types): + if isinstance(o, str): o = self._load(o, **self.kwargs) self._setup_function(o) return o @@ -260,7 +244,7 @@ def piter(self, storage = None, dynamic = False): This demonstrates how one might store results: >>> def print_time(ds): - ... print ds.current_time + ... print(ds.current_time) ... >>> ts = DatasetSeries("DD*/DD*.index", ... setup_function = print_time ) @@ -271,7 +255,7 @@ def piter(self, storage = None, dynamic = False): ... sto.result = (v, c) ... >>> for i, (v, c) in sorted(my_storage.items()): - ... print "% 4i %0.3e" % (i, v) + ... print("% 4i %0.3e" % (i, v)) ... This shows how to dispatch 4 processors to each dataset: @@ -305,7 +289,7 @@ def piter(self, storage = None, dynamic = False): if storage is not None: sto, output = output - if isinstance(output, string_types): + if isinstance(output, str): ds = self._load(output, **self.kwargs) self._setup_function(ds) else: @@ -374,7 +358,7 @@ def from_filenames(cls, filenames, parallel = True, setup_function = None, -------- >>> def print_time(ds): - ... print ds.current_time + ... print(ds.current_time) ... >>> ts = DatasetSeries.from_filenames( ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0", @@ -385,13 +369,13 @@ def from_filenames(cls, filenames, parallel = True, setup_function = None, """ - if isinstance(filenames, string_types): + if isinstance(filenames, str): filenames = get_filenames_from_glob_pattern(filenames) # This will crash with a less informative error if filenames is not # iterable, but the plural keyword should give users a clue... for fn in filenames: - if not isinstance(fn, string_types): + if not isinstance(fn, str): raise YTOutputNotIdentified("DataSeries accepts a list of " "strings, but " "received {0}".format(fn)) @@ -455,7 +439,7 @@ def particle_trajectories(self, indices, fields=None, suppress_logging=False, pt >>> ts = DatasetSeries(my_fns) >>> trajs = ts.particle_trajectories(indices, fields=fields) >>> for t in trajs : - >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min() + >>> print(t["particle_velocity_x"].max(), t["particle_velocity_x"].min()) Note ---- @@ -507,8 +491,7 @@ def __init__(cls, name, b, d): simulation_time_series_registry[code_name] = cls mylog.debug("Registering simulation: %s as %s", code_name, cls) -@add_metaclass(RegisteredSimulationTimeSeries) -class SimulationTimeSeries(DatasetSeries): +class SimulationTimeSeries(DatasetSeries, metaclass = RegisteredSimulationTimeSeries): def __init__(self, parameter_filename, find_outputs=False): """ Base class for generating simulation time series types. diff --git a/yt/data_objects/unions.py b/yt/data_objects/unions.py index 94fed3391e3..7f874f740c6 100644 --- a/yt/data_objects/unions.py +++ b/yt/data_objects/unions.py @@ -1,23 +1,7 @@ -""" -Union structures which can be used to form unions of particles, meshes, -etc. Union is the base class from which trivial named union classes -can be derived - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.funcs import ensure_list class Union(object): + _union_type = "" def __init__(self, name, sub_types): self.name = name self.sub_types = ensure_list(sub_types) @@ -26,6 +10,11 @@ def __iter__(self): for st in self.sub_types: yield st + def __repr__(self): + return "{} Union: '{}' composed of: {}".format( + self._union_type.capitalize(), self.name, self.sub_types) + class MeshUnion(Union): + _union_type = "mesh" def __init__(self, name, sub_types): super(MeshUnion, self).__init__(name, sub_types) diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/unstructured_mesh.py index aac34812da7..636f8f40100 100644 --- a/yt/data_objects/unstructured_mesh.py +++ b/yt/data_objects/unstructured_mesh.py @@ -1,18 +1,3 @@ -""" -Unstructured mesh base container. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog diff --git a/yt/extern/parameterized.py b/yt/extern/parameterized.py index 4b838fc50eb..ef015ba6862 100644 --- a/yt/extern/parameterized.py +++ b/yt/extern/parameterized.py @@ -6,14 +6,8 @@ from nose.tools import nottest from unittest import TestCase -from . import six - -if six.PY3: - def new_instancemethod(f, *args): - return f -else: - import new - new_instancemethod = new.instancemethod +def new_instancemethod(f, *args): + return f _param = namedtuple("param", "args kwargs") @@ -68,7 +62,7 @@ def from_decorator(cls, args): """ if isinstance(args, param): return args - if isinstance(args, six.string_types): + if isinstance(args, str): args = (args, ) return cls(*args) @@ -208,7 +202,7 @@ def parameterized_expand_wrapper(f): for num, args in enumerate(get_input()): p = param.from_decorator(args) name_suffix = "_%s" %(num, ) - if len(p.args) > 0 and isinstance(p.args[0], six.string_types): + if len(p.args) > 0 and isinstance(p.args[0], str): name_suffix += "_" + cls.to_safe_name(p.args[0]) name = base_name + name_suffix frame_locals[name] = cls.param_as_standalone_func(p, f, name) diff --git a/yt/extern/peewee.py b/yt/extern/peewee.py index 1daaedb1b3a..6e4890eb1a7 100644 --- a/yt/extern/peewee.py +++ b/yt/extern/peewee.py @@ -12,7 +12,6 @@ import os import re import time -from yt.extern.six import string_types try: import sqlite3 @@ -772,7 +771,7 @@ def count(self): def group_by(self, clause): model = self.query_context - if isinstance(clause, string_types): + if isinstance(clause, str): fields = (clause,) elif isinstance(clause, (list, tuple)): fields = clause @@ -802,7 +801,7 @@ def order_by(self, field_or_string): ) def parse_select_query(self, alias_map): - if isinstance(self.query, string_types): + if isinstance(self.query, str): if self.query in ('*', self.model._meta.pk_name) and self.use_aliases(): return '%s.%s' % (alias_map[self.model], self.query) return self.query @@ -1109,7 +1108,7 @@ class DateTimeField(Field): db_field = 'datetime' def python_value(self, value): - if isinstance(value, string_types): + if isinstance(value, str): value = value.rsplit('.', 1)[0] return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6]) return value diff --git a/yt/extern/six.py b/yt/extern/six.py deleted file mode 100644 index 30f9a121686..00000000000 --- a/yt/extern/six.py +++ /dev/null @@ -1,751 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2014 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import functools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.7.3" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -stringIOReplace = '' - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize - - from io import StringIO - stringIOReplace = StringIO -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - # This is a bit ugly, but it avoids running this again. - delattr(obj.__class__, self.name) - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), - MovedModule("winreg", "_winreg"), -] -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) -else: - def iterkeys(d, **kw): - return iter(d.iterkeys(**kw)) - - def itervalues(d, **kw): - return iter(d.itervalues(**kw)) - - def iteritems(d, **kw): - return iter(d.iteritems(**kw)) - - def iterlists(d, **kw): - return iter(d.iterlists(**kw)) - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - def u(s): - return s - unichr = chr - if sys.version_info[1] <= 1: - def int2byte(i): - return bytes((i,)) - else: - # This is about 2x faster than the implementation above on 3.2+ - int2byte = operator.methodcaller("to_bytes", 1, "big") - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO -else: - def b(s): - return s - # Workaround for standalone backslash - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - def byte2int(bs): - return ord(bs[0]) - def indexbytes(buf, i): - return ord(buf[i]) - def iterbytes(buf): - return (ord(byte) for byte in buf) - import StringIO - StringIO = BytesIO = StringIO.StringIO -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped): - def wrapper(f): - f = functools.wraps(wrapped)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/yt/exthook.py b/yt/exthook.py index a1417cb8820..f6eee8afc8d 100644 --- a/yt/exthook.py +++ b/yt/exthook.py @@ -22,7 +22,6 @@ # This source code was originally in flask/exthook.py import sys import os -from .extern.six import reraise class ExtensionImporter(object): @@ -79,7 +78,7 @@ def load_module(self, fullname): # we swallow it and try the next choice. The skipped frame # is the one from __import__ above which we don't care about if self.is_important_traceback(realname, tb): - reraise(exc_type, exc_value, tb.tb_next) + raise exc_value.with_traceback(tb.tb_next) continue module = sys.modules[fullname] = sys.modules[realname] if '.' not in modname: diff --git a/yt/fields/angular_momentum.py b/yt/fields/angular_momentum.py index ea0f9cb3fe7..b94ee50e0dd 100644 --- a/yt/fields/angular_momentum.py +++ b/yt/fields/angular_momentum.py @@ -1,20 +1,3 @@ -""" -The basic field info container resides here. These classes, code specific and -universal, are the means by which we access fields across YT, both derived and -native. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .derived_field import \ @@ -40,39 +23,41 @@ def _specific_angular_momentum_x(field, data): rv = obtain_position_vector(data) units = rv.units rv = np.rollaxis(rv, 0, len(rv.shape)) - rv = data.ds.arr(rv, input_units=units) - return rv[...,1] * zv - rv[...,2] * yv + rv = data.ds.arr(rv, units=units) + return rv[..., 1] * zv - rv[..., 2] * yv def _specific_angular_momentum_y(field, data): xv, yv, zv = obtain_relative_velocity_vector(data) rv = obtain_position_vector(data) units = rv.units rv = np.rollaxis(rv, 0, len(rv.shape)) - rv = data.ds.arr(rv, input_units=units) - return rv[...,2] * xv - rv[...,0] * zv + rv = data.ds.arr(rv, units=units) + return rv[..., 2] * xv - rv[..., 0] * zv def _specific_angular_momentum_z(field, data): xv, yv, zv = obtain_relative_velocity_vector(data) rv = obtain_position_vector(data) units = rv.units rv = np.rollaxis(rv, 0, len(rv.shape)) - rv = data.ds.arr(rv, input_units=units) - return rv[...,0] * yv - rv[...,1] * xv + rv = data.ds.arr(rv, units=units) + return rv[..., 0] * yv - rv[..., 1] * xv registry.add_field((ftype, "specific_angular_momentum_x"), - sampling_type="cell", + sampling_type="local", function=_specific_angular_momentum_x, units=unit_system["specific_angular_momentum"], validators=[ValidateParameter("center"), ValidateParameter("bulk_velocity")]) + registry.add_field((ftype, "specific_angular_momentum_y"), - sampling_type="cell", + sampling_type="local", function=_specific_angular_momentum_y, units=unit_system["specific_angular_momentum"], validators=[ValidateParameter("center"), ValidateParameter("bulk_velocity")]) + registry.add_field((ftype, "specific_angular_momentum_z"), - sampling_type="cell", + sampling_type="local", function=_specific_angular_momentum_z, units=unit_system["specific_angular_momentum"], validators=[ValidateParameter("center"), @@ -82,30 +67,33 @@ def _specific_angular_momentum_z(field, data): unit_system["specific_angular_momentum"], ftype=ftype) def _angular_momentum_x(field, data): - return data[ftype, "cell_mass"] \ + return data[ftype, "mass"] \ * data[ftype, "specific_angular_momentum_x"] + registry.add_field((ftype, "angular_momentum_x"), - sampling_type="cell", + sampling_type="local", function=_angular_momentum_x, units=unit_system["angular_momentum"], validators=[ValidateParameter('center'), ValidateParameter("bulk_velocity")]) def _angular_momentum_y(field, data): - return data[ftype, "cell_mass"] \ + return data[ftype, "mass"] \ * data[ftype, "specific_angular_momentum_y"] + registry.add_field((ftype, "angular_momentum_y"), - sampling_type="cell", + sampling_type="local", function=_angular_momentum_y, units=unit_system["angular_momentum"], validators=[ValidateParameter('center'), ValidateParameter("bulk_velocity")]) def _angular_momentum_z(field, data): - return data[ftype, "cell_mass"] \ + return data[ftype, "mass"] \ * data[ftype, "specific_angular_momentum_z"] + registry.add_field((ftype, "angular_momentum_z"), - sampling_type="cell", + sampling_type="local", function=_angular_momentum_z, units=unit_system["angular_momentum"], validators=[ValidateParameter('center'), diff --git a/yt/fields/api.py b/yt/fields/api.py index cfc4fee898b..0819b2ba558 100644 --- a/yt/fields/api.py +++ b/yt/fields/api.py @@ -1,17 +1,3 @@ -""" -API for yt.fields - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .field_plugin_registry import \ register_field_plugin, \ field_plugins diff --git a/yt/fields/astro_fields.py b/yt/fields/astro_fields.py index 3bbaad32453..b9f0bf43b0d 100644 --- a/yt/fields/astro_fields.py +++ b/yt/fields/astro_fields.py @@ -1,38 +1,15 @@ -""" -Astronomy and astrophysics fields. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .derived_field import \ ValidateParameter -from .field_exceptions import \ - NeedsParameter from .field_plugin_registry import \ register_field_plugin -from yt.utilities.physical_constants import \ - mh, \ - me, \ - sigma_thompson, \ - clight, \ - kboltz, \ - G @register_field_plugin def setup_astro_fields(registry, ftype = "gas", slice_info = None): unit_system = registry.ds.unit_system + pc = registry.ds.units.physical_constants # slice_info would be the left, the right, and the factor. # For example, with the old Enzo-ZEUS fields, this would be: # slice(None, -2, None) @@ -50,102 +27,83 @@ def _dynamical_time(field, data): """ sqrt(3 pi / (16 G rho)) """ - return np.sqrt(3.0 * np.pi / (16.0 * data[ftype, "density"] * G)) + return np.sqrt(3.0 * np.pi / (16.0 * pc.G * data[ftype, "density"])) - registry.add_field((ftype, "dynamical_time"), sampling_type="cell", + registry.add_field((ftype, "dynamical_time"), + sampling_type="local", function=_dynamical_time, units=unit_system["time"]) def _jeans_mass(field, data): - MJ_constant = (((5.0 * kboltz) / (G * mh)) ** (1.5)) * \ - (3.0 / (4.0 * np.pi)) ** (0.5) - u = ((data[ftype, "temperature"] / - data[ftype, "mean_molecular_weight"])**(1.5)) * \ - (data[ftype, "density"]**(-0.5)) * MJ_constant + MJ_constant = (((5.0 * pc.kboltz) / (pc.G * pc.mh)) ** 1.5) * \ + (3.0 / (4.0 * np.pi)) ** 0.5 + u = (MJ_constant * \ + ((data[ftype, "temperature"] / + data[ftype, "mean_molecular_weight"])**1.5) * \ + (data[ftype, "density"]**(-0.5))) return u - registry.add_field((ftype, "jeans_mass"), sampling_type="cell", + registry.add_field((ftype, "jeans_mass"), + sampling_type="local", function=_jeans_mass, units=unit_system["mass"]) - def _chandra_emissivity(field, data): - logT0 = np.log10(data[ftype, "temperature"].to_ndarray().astype(np.float64)) - 7 - # we get rid of the units here since this is a fit and not an - # analytical expression - return data.ds.arr(data[ftype, "number_density"].to_ndarray().astype(np.float64)**2 - * (10**(- 0.0103 * logT0**8 + 0.0417 * logT0**7 - - 0.0636 * logT0**6 + 0.1149 * logT0**5 - - 0.3151 * logT0**4 + 0.6655 * logT0**3 - - 1.1256 * logT0**2 + 1.0026 * logT0**1 - - 0.6984 * logT0) - + data[ftype, "metallicity"].to_ndarray() * - 10**( 0.0305 * logT0**11 - 0.0045 * logT0**10 - - 0.3620 * logT0**9 + 0.0513 * logT0**8 - + 1.6669 * logT0**7 - 0.3854 * logT0**6 - - 3.3604 * logT0**5 + 0.4728 * logT0**4 - + 4.5774 * logT0**3 - 2.3661 * logT0**2 - - 1.6667 * logT0**1 - 0.2193 * logT0)), - "") # add correct units here - - registry.add_field((ftype, "chandra_emissivity"), sampling_type="cell", - function=_chandra_emissivity, - units="") # add correct units here - def _emission_measure(field, data): - if data.has_field_parameter("X_H"): - X_H = data.get_field_parameter("X_H") - else: - X_H = 0.76 - nenh = data["density"]/mh - nenh *= nenh - nenh *= 0.5*(1.+X_H)*X_H*data["cell_volume"] - return nenh - - registry.add_field((ftype, "emission_measure"), sampling_type="cell", + dV = data[ftype, "mass"]/data[ftype, "density"] + nenhdV = data[ftype, "H_nuclei_density"]*dV + nenhdV *= data[ftype, "El_number_density"] + return nenhdV + + registry.add_field((ftype, "emission_measure"), + sampling_type="local", function=_emission_measure, units=unit_system["number_density"]) - def _xray_emissivity(field, data): - # old scaling coefficient was 2.168e60 - return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2 - * data[ftype, "temperature"].to_ndarray()**0.5, - "") # add correct units here - - registry.add_field((ftype, "xray_emissivity"), sampling_type="cell", - function=_xray_emissivity, - units="") # add correct units here - def _mazzotta_weighting(field, data): # Spectroscopic-like weighting field for galaxy clusters # Only useful as a weight_field for temperature, metallicity, velocity - ret = data["density"]/mh - ret *= ret*data["kT"]**-0.75 + ret = data[ftype, "El_number_density"].d**2 + ret *= data[ftype, "kT"].d**-0.75 return ret - registry.add_field((ftype,"mazzotta_weighting"), sampling_type="cell", + registry.add_field((ftype,"mazzotta_weighting"), + sampling_type="local", function=_mazzotta_weighting, - units="keV**-0.75*cm**-6") + units="") + + def _optical_depth(field, data): + return data[ftype, "El_number_density"]*pc.sigma_thompson + + registry.add_field((ftype, "optical_depth"), sampling_type="local", + function=_optical_depth, units=unit_system["length"]**-1) def _sz_kinetic(field, data): - scale = 0.88 * sigma_thompson / mh / clight - vel_axis = data.get_field_parameter("axis") - if vel_axis > 2: - raise NeedsParameter(["axis"]) - vel = data[ftype, "velocity_%s" % ({0: "x", 1: "y", 2: "z"}[vel_axis])] # minus sign is because radial velocity is WRT viewer # See issue #1225 - return -scale * vel * data[ftype, "density"] + return -data[ftype, "velocity_los"]*data[ftype, "optical_depth"]/pc.clight - registry.add_field((ftype, "sz_kinetic"), sampling_type="cell", + registry.add_field((ftype, "sz_kinetic"), + sampling_type="local", function=_sz_kinetic, units=unit_system["length"]**-1, validators=[ ValidateParameter("axis", {'axis': [0, 1, 2]})]) def _szy(field, data): - scale = 0.88 / mh * kboltz / (me * clight*clight) * sigma_thompson - return scale * data[ftype, "density"] * data[ftype, "temperature"] + kT = data[ftype, "kT"]/(pc.me*pc.clight*pc.clight) + return data[ftype, "optical_depth"] * kT - registry.add_field((ftype, "szy"), sampling_type="cell", + registry.add_field((ftype, "szy"), + sampling_type="local", function=_szy, units=unit_system["length"]**-1) + + def _entropy(field, data): + mgammam1 = -2./3. + tr = data[ftype, "kT"] * data[ftype, "El_number_density"]**mgammam1 + return data.apply_units(tr, field.units) + + registry.add_field((ftype, "entropy"), + sampling_type="local", + units="keV*cm**2", + function=_entropy) diff --git a/yt/fields/astro_simulations.py b/yt/fields/astro_simulations.py index 98f0ca9c8f6..a531eec51c9 100644 --- a/yt/fields/astro_simulations.py +++ b/yt/fields/astro_simulations.py @@ -1,18 +1,3 @@ -""" -Fields we expect to find in astrophysical simulations. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .domain_context import DomainContext # Here's how this all works: diff --git a/yt/fields/cosmology_fields.py b/yt/fields/cosmology_fields.py index efd37bfa867..bfaa998b49e 100644 --- a/yt/fields/cosmology_fields.py +++ b/yt/fields/cosmology_fields.py @@ -1,19 +1,3 @@ -""" -Cosmology related fields. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .derived_field import \ ValidateParameter from .field_exceptions import \ @@ -22,9 +6,6 @@ from .field_plugin_registry import \ register_field_plugin -from yt.utilities.physical_constants import \ - speed_of_light_cgs - @register_field_plugin def setup_cosmology_fields(registry, ftype = "gas", slice_info = None): unit_system = registry.ds.unit_system @@ -45,14 +26,16 @@ def _matter_density(field, data): return data[ftype, "density"] + \ data[ftype, "dark_matter_density"] - registry.add_field((ftype, "matter_density"), sampling_type="cell", + registry.add_field((ftype, "matter_density"), + sampling_type="local", function=_matter_density, units=unit_system["density"]) def _matter_mass(field, data): return data[ftype, "matter_density"] * data["index", "cell_volume"] - registry.add_field((ftype, "matter_mass"), sampling_type="cell", + registry.add_field((ftype, "matter_mass"), + sampling_type="local", function=_matter_mass, units=unit_system["mass"]) @@ -65,7 +48,8 @@ def _overdensity(field, data): return data[ftype, "matter_density"] / \ co.critical_density(data.ds.current_redshift) - registry.add_field((ftype, "overdensity"), sampling_type="cell", + registry.add_field((ftype, "overdensity"), + sampling_type="local", function=_overdensity, units="") @@ -83,7 +67,8 @@ def _baryon_overdensity(field, data): return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \ (1.0 + data.ds.current_redshift)**3 - registry.add_field((ftype, "baryon_overdensity"), sampling_type="cell", + registry.add_field((ftype, "baryon_overdensity"), + sampling_type="local", function=_baryon_overdensity, units="", validators=[ValidateParameter("omega_baryon")]) @@ -100,7 +85,8 @@ def _matter_overdensity(field, data): co.critical_density(0.0) / \ (1.0 + data.ds.current_redshift)**3 - registry.add_field((ftype, "matter_overdensity"), sampling_type="cell", + registry.add_field((ftype, "matter_overdensity"), + sampling_type="local", function=_matter_overdensity, units="") @@ -113,7 +99,8 @@ def _virial_radius_fraction(field, data): ret = data["radius"] / virial_radius return ret - registry.add_field(("index", "virial_radius_fraction"), sampling_type="cell", + registry.add_field(("index", "virial_radius_fraction"), + sampling_type="local", function=_virial_radius_fraction, validators=[ValidateParameter("virial_radius")], units="") @@ -126,6 +113,7 @@ def _weak_lensing_convergence(field, data): not data.ds.cosmological_simulation: raise NeedsConfiguration("cosmological_simulation", 1) co = data.ds.cosmology + pc = data.ds.units.physical_constants observer_redshift = data.get_field_parameter('observer_redshift') source_redshift = data.get_field_parameter('source_redshift') @@ -138,11 +126,12 @@ def _weak_lensing_convergence(field, data): # removed the factor of 1 / a to account for the fact that we are projecting # with a proper distance. - return (1.5 * (co.hubble_constant / speed_of_light_cgs)**2 * (dl * dls / ds) * \ + return (1.5 * (co.hubble_constant / pc.clight)**2 * (dl * dls / ds) * \ data[ftype, "matter_overdensity"]).in_units("1/cm") - registry.add_field((ftype, "weak_lensing_convergence"), sampling_type="cell", + registry.add_field((ftype, "weak_lensing_convergence"), + sampling_type="local", function=_weak_lensing_convergence, units=unit_system["length"]**-1, - validators=[ValidateParameter("observer_redshift"), - ValidateParameter("source_redshift")]) + validators=[ValidateParameter("observer_redshift"), + ValidateParameter("source_redshift")]) diff --git a/yt/fields/derived_field.py b/yt/fields/derived_field.py index 3ca6e76061d..906d1fa6c44 100644 --- a/yt/fields/derived_field.py +++ b/yt/fields/derived_field.py @@ -1,24 +1,11 @@ -""" -Derived field base class. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import contextlib import inspect import re import warnings -from yt.extern.six import string_types, PY2 from yt.funcs import \ - ensure_list + ensure_list, \ + VisibleDeprecationWarning from .field_exceptions import \ NeedsGridType, \ NeedsOriginalGrid, \ @@ -140,7 +127,7 @@ def __init__(self, name, sampling_type, function, units=None, # handle units if units is None: self.units = '' - elif isinstance(units, string_types): + elif isinstance(units, str): if units.lower() == 'auto': if dimensions is None: raise RuntimeError("To set units='auto', please specify the dimensions " @@ -160,7 +147,7 @@ def __init__(self, name, sampling_type, function, units=None, output_units = self.units self.output_units = output_units - if isinstance(dimensions, string_types): + if isinstance(dimensions, str): dimensions = getattr(ytdims, dimensions) self.dimensions = dimensions @@ -179,8 +166,28 @@ def _copy_def(self): @property def particle_type(self): + warnings.warn("particle_type has been deprecated, " + "check for field.sampling_type == 'particle' instead.", + VisibleDeprecationWarning, stacklevel=2) return self.sampling_type in ("discrete", "particle") + @property + def is_sph_field(self): + if self.sampling_type == "cell": + return False + is_sph_field = False + if self.alias_field: + name = self.alias_name + else: + name = self.name + if hasattr(self.ds, '_sph_ptypes'): + is_sph_field |= name[0] in (self.ds._sph_ptypes + ('gas',)) + return is_sph_field + + @property + def local_sampling(self): + return self.sampling_type in ('discrete', 'particle', 'local') + def get_units(self): if self.ds is not None: u = Unit(self.units, registry=self.ds.unit_registry) @@ -291,15 +298,25 @@ def get_label(self, projected=False): data_label += r"$" return data_label + @property + def alias_field(self): + func_name = self._function.__name__ + if func_name == "_TranslationFunc": + return True + return False + + @property + def alias_name(self): + if self.alias_field: + return self._function.alias_name + return None + def __repr__(self): - if PY2: - func_name = self._function.func_name - else: - func_name = self._function.__name__ + func_name = self._function.__name__ if self._function == NullFunc: s = "On-Disk Field " elif func_name == "_TranslationFunc": - s = "Alias Field for \"%s\" " % (self._function.alias_name,) + s = "Alias Field for \"%s\" " % (self.alias_name,) else: s = "Derived Field " if isinstance(self.name, tuple): @@ -309,7 +326,7 @@ def __repr__(self): s += "(units: %s" % self.units if self.display_name is not None: s += ", display_name: '%s'" % (self.display_name) - if self.particle_type: + if self.sampling_type == "particle": s += ", particle field" s += ")" return s @@ -409,6 +426,7 @@ def __init__(self, parameters, parameter_values=None): FieldValidator.__init__(self) self.parameters = ensure_list(parameters) self.parameter_values = parameter_values + def __call__(self, data): doesnt_have = [] for p in self.parameters: diff --git a/yt/fields/domain_context.py b/yt/fields/domain_context.py index 9b144a64129..3a9c851e544 100644 --- a/yt/fields/domain_context.py +++ b/yt/fields/domain_context.py @@ -1,19 +1,3 @@ -""" -Domain context base class - -Currently we largely apply this to the fields that get loaded. Presumably -different analysis operations could be the subject of this type of examination -as well. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - domain_context_registry = {} class DomainContext(object): diff --git a/yt/fields/field_aliases.py b/yt/fields/field_aliases.py index d027c639eeb..aae10484e34 100644 --- a/yt/fields/field_aliases.py +++ b/yt/fields/field_aliases.py @@ -1,18 +1,3 @@ -""" -Some old field names. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - _field_name_aliases = [ ("GridLevel", "grid_level"), diff --git a/yt/fields/field_detector.py b/yt/fields/field_detector.py index 8f80518b1e4..bad8c112282 100644 --- a/yt/fields/field_detector.py +++ b/yt/fields/field_detector.py @@ -1,24 +1,25 @@ -""" -The field detector. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from collections import defaultdict from yt.units.yt_array import YTArray from .field_exceptions import \ NeedsGridType +fp_units = { + 'bulk_velocity' : 'cm/s', + 'center' : 'cm', + 'normal' : '', + 'cp_x_vec': '', + 'cp_y_vec': '', + 'cp_z_vec': '', + 'x_hat': '', + 'y_hat': '', + 'z_hat': '', + 'omega_baryon': '', + 'virial_radius': 'cm', + 'observer_redshift': '', + 'source_redshift': '', +} + class FieldDetector(defaultdict): Level = 1 NumberOfParticles = 1 @@ -123,7 +124,7 @@ def __missing__(self, item): if not self.flat: self[item] = vv else: self[item] = vv.ravel() return self[item] - elif finfo is not None and finfo.particle_type: + elif finfo is not None and finfo.sampling_type == "particle": if "particle_position" in (item, item[1]) or \ "particle_velocity" in (item, item[1]) or \ "particle_magnetic_field" in (item, item[1]) or \ @@ -161,6 +162,11 @@ def _debug(self): return def deposit(self, *args, **kwargs): + from yt.frontends.stream.data_structures import StreamParticlesDataset + from yt.data_objects.static_output import ParticleDataset + if kwargs['method'] == 'mesh_id': + if isinstance(self.ds, (StreamParticlesDataset, ParticleDataset)): + raise ValueError return np.random.random((self.nd, self.nd, self.nd)) def mesh_sampling_particle_field(self, *args, **kwargs): @@ -181,49 +187,38 @@ def particle_operation(self, *args, **kwargs): def _read_data(self, field_name): self.requested.append(field_name) finfo = self.ds._get_field_info(*field_name) - if finfo.particle_type: + if finfo.sampling_type == "particle": self.requested.append(field_name) return np.ones(self.NumberOfParticles) return YTArray(defaultdict.__missing__(self, field_name), - input_units=finfo.units, + units=finfo.units, registry=self.ds.unit_registry) - fp_units = { - 'bulk_velocity' : 'cm/s', - 'bulk_magnetic_field': 'G', - 'center' : 'cm', - 'normal' : '', - 'cp_x_vec': '', - 'cp_y_vec': '', - 'cp_z_vec': '', - 'x_hat': '', - 'y_hat': '', - 'z_hat': '', - 'omega_baryon': '', - 'virial_radius': 'cm', - 'observer_redshift': '', - 'source_redshift': '', - } - def get_field_parameter(self, param, default = 0.0): if self.field_parameters and param in self.field_parameters: return self.field_parameters[param] self.requested_parameters.append(param) if param in ['center', 'normal'] or param.startswith('bulk'): - return self.ds.arr( - np.random.random(3) * 1e-2, self.fp_units[param]) + if param == 'bulk_magnetic_field': + if self.ds.unit_system.has_current_mks: + unit = "T" + else: + unit = "G" + else: + unit = fp_units[param] + return self.ds.arr(np.random.random(3) * 1e-2, unit) elif param in ['surface_height']: return self.ds.quan(0.0, 'code_length') elif param in ['axis']: return 0 elif param.startswith("cp_"): ax = param[3] - rv = self.ds.arr((0.0, 0.0, 0.0), self.fp_units[param]) + rv = self.ds.arr((0.0, 0.0, 0.0), fp_units[param]) rv['xyz'.index(ax)] = 1.0 return rv elif param.endswith("_hat"): ax = param[0] - rv = YTArray((0.0, 0.0, 0.0), self.fp_units[param]) + rv = YTArray((0.0, 0.0, 0.0), fp_units[param]) rv['xyz'.index(ax)] = 1.0 return rv elif param == "fof_groups": @@ -237,7 +232,7 @@ def get_field_parameter(self, param, default = 0.0): id = 1 def apply_units(self, arr, units): - return self.ds.arr(arr, input_units = units) + return self.ds.arr(arr, units = units) def has_field_parameter(self, param): return param in self.field_parameters @@ -251,14 +246,14 @@ def fcoords(self): fc.shape = (self.nd*self.nd*self.nd, 3) else: fc = fc.transpose() - return self.ds.arr(fc, input_units = "code_length") + return self.ds.arr(fc, units = "code_length") @property def fcoords_vertex(self): fc = np.random.random((self.nd, self.nd, self.nd, 8, 3)) if self.flat: fc.shape = (self.nd*self.nd*self.nd, 8, 3) - return self.ds.arr(fc, input_units = "code_length") + return self.ds.arr(fc, units = "code_length") @property def icoords(self): @@ -283,5 +278,5 @@ def fwidth(self): fw = np.ones((self.nd**3, 3), dtype="float64") / self.nd if not self.flat: fw.shape = (self.nd, self.nd, self.nd, 3) - return self.ds.arr(fw, input_units = "code_length") + return self.ds.arr(fw, units = "code_length") diff --git a/yt/fields/field_exceptions.py b/yt/fields/field_exceptions.py index 001d46a668e..c5cdef171f1 100644 --- a/yt/fields/field_exceptions.py +++ b/yt/fields/field_exceptions.py @@ -1,18 +1,3 @@ -""" -Field-related exceptions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - class ValidationException(Exception): pass diff --git a/yt/fields/field_functions.py b/yt/fields/field_functions.py index 8e69dbcdcdf..cfbe7a283fe 100644 --- a/yt/fields/field_functions.py +++ b/yt/fields/field_functions.py @@ -1,18 +1,3 @@ -""" -General field-related functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.lib.misc_utilities import \ diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index f8d2c8293d9..b56b11d6b96 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -1,24 +1,7 @@ -""" -The basic field info container resides here. These classes, code specific and -universal, are the means by which we access fields across YT, both derived and -native. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from numbers import Number as numeric_type +import warnings -from yt.extern.six import string_types from yt.funcs import mylog, only_on_root from yt.geometry.geometry_handler import \ is_curvilinear @@ -38,7 +21,6 @@ particle_vector_functions, \ particle_scalar_functions, \ standard_particle_fields, \ - add_volume_weighted_smoothed_field, \ sph_whitelist_fields def tupleize(inp): @@ -89,7 +71,7 @@ def setup_fluid_index_fields(self): self.alias((ftype, f), ("index", f)) def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64): - skip_output_units = ("code_length") + skip_output_units = ("code_length",) for f, (units, aliases, dn) in sorted(self.known_particle_fields): units = self.ds.field_units.get((ptype, f), units) output_units = units @@ -150,30 +132,32 @@ def setup_extra_union_fields(self, ptype="all"): for units, field in self.extra_union_fields: add_union_field(self, ptype, field, units) - def setup_smoothed_fields(self, ptype, num_neighbors = 64, ftype = "gas"): + def setup_smoothed_fields(self, ptype, num_neighbors=64, ftype="gas"): # We can in principle compute this, but it is not yet implemented. - if (ptype, "density") not in self: + if (ptype, "density") not in self or not hasattr(self.ds, '_sph_ptypes'): return - if (ptype, "smoothing_length") in self: - sml_name = "smoothing_length" - else: - sml_name = None new_aliases = [] for ptype2, alias_name in list(self): if ptype2 != ptype: continue if alias_name not in sph_whitelist_fields: - continue - fn = add_volume_weighted_smoothed_field( - ptype, "particle_position", "particle_mass", - sml_name, "density", alias_name, self, - num_neighbors) - if 'particle_' in alias_name: - alias_name = alias_name.replace('particle_', '') - new_aliases.append(((ftype, alias_name), fn[0])) - for alias, source in new_aliases: - #print "Aliasing %s => %s" % (alias, source) - self.alias(alias, source) + if alias_name.startswith('particle_'): + pass + else: + continue + uni_alias_name = alias_name + if 'particle_position_' in alias_name: + uni_alias_name = alias_name.replace('particle_position_', '') + elif 'particle_' in alias_name: + uni_alias_name = alias_name.replace('particle_', '') + new_aliases.append( + ((ftype, uni_alias_name), (ptype, alias_name), ) + ) + new_aliases.append( + ((ptype, uni_alias_name), (ptype, alias_name), ) + ) + for alias, source in new_aliases: + self.alias(alias, source) # Collect the names for all aliases if geometry is curvilinear def get_aliases_gallery(self): @@ -208,7 +192,7 @@ def setup_fluid_aliases(self, ftype='gas'): # field *name* is in there, then the field *tuple*. units = self.ds.field_units.get(field[1], units) units = self.ds.field_units.get(field, units) - if not isinstance(units, string_types) and args[0] != "": + if not isinstance(units, str) and args[0] != "": units = "((%s)*%s)" % (args[0], units) if isinstance(units, (numeric_type, np.number, np.ndarray)) and \ args[0] == "" and units != 1.0: @@ -217,7 +201,7 @@ def setup_fluid_aliases(self, ftype='gas'): units = "" elif units == 1.0: units = "" - self.add_output_field(field, sampling_type="cell", units = units, + self.add_output_field(field, sampling_type="cell",units = units, display_name = display_name) axis_names = self.ds.coordinates.axis_order for alias in aliases: @@ -297,7 +281,18 @@ def create_function(f): self[name] = DerivedField(name, sampling_type, function, **kwargs) return + particle_field = False if sampling_type == 'particle': + particle_field = True + + if kwargs.get('particle_type', False): + warnings.warn( + 'The particle_type keyword argument of add_field has been ' + 'deprecated. Please set sampling_type="particle" instead.', + stacklevel=2) + particle_field = True + + if particle_field: ftype = 'all' else: ftype = self.ds.default_fluid_type diff --git a/yt/fields/field_plugin_registry.py b/yt/fields/field_plugin_registry.py index 8d729cd96fe..39327f80771 100644 --- a/yt/fields/field_plugin_registry.py +++ b/yt/fields/field_plugin_registry.py @@ -1,18 +1,3 @@ -""" -This is a semi-global field plugin registry. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - field_plugins = {} def register_field_plugin(func): diff --git a/yt/fields/field_type_container.py b/yt/fields/field_type_container.py index 5989631f1cd..31eee8db606 100644 --- a/yt/fields/field_type_container.py +++ b/yt/fields/field_type_container.py @@ -5,7 +5,6 @@ import weakref import textwrap import inspect -from yt.extern.six import string_types from yt.fields.derived_field import \ DerivedField @@ -52,7 +51,7 @@ def __contains__(self, obj): ob = None if isinstance(obj, FieldNameContainer): ob = obj.field_type - elif isinstance(obj, string_types): + elif isinstance(obj, str): ob = obj return ob in self.field_types @@ -104,7 +103,7 @@ def __contains__(self, obj): elif isinstance(obj, tuple): if self.field_type == obj[0] and obj in self.ds.field_info: return True - elif isinstance(obj, string_types): + elif isinstance(obj, str): if (self.field_type, obj) in self.ds.field_info: return True return False diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 5999d70f507..0a213e43409 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -1,18 +1,3 @@ -""" -Here are some fields that are specific to fluids. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.units.unit_object import Unit @@ -32,9 +17,8 @@ create_magnitude_field, \ create_vector_fields -from yt.utilities.physical_constants import \ - mh, \ - kboltz +from yt.utilities.chemical_formulas import \ + default_mu from yt.utilities.lib.misc_utilities import \ obtain_relative_velocity_vector @@ -43,6 +27,7 @@ @register_field_plugin def setup_fluid_fields(registry, ftype = "gas", slice_info = None): + pc = registry.ds.units.physical_constants # slice_info would be the left, the right, and the factor. # For example, with the old Enzo-ZEUS fields, this would be: # slice(None, -2, None) @@ -58,8 +43,15 @@ def setup_fluid_fields(registry, ftype = "gas", slice_info = None): unit_system = registry.ds.unit_system - create_vector_fields(registry, "velocity", unit_system["velocity"], ftype, slice_info) - create_vector_fields(registry, "magnetic_field", unit_system["magnetic_field"], ftype, slice_info) + if unit_system.name == 'cgs': + mag_units = "magnetic_field_cgs" + else: + mag_units = "magnetic_field_mks" + + create_vector_fields(registry, "velocity", unit_system["velocity"], ftype, + slice_info) + create_vector_fields(registry, "magnetic_field", unit_system[mag_units], + ftype, slice_info) def _cell_mass(field, data): return data[ftype, "density"] * data[ftype, "cell_volume"] @@ -68,12 +60,14 @@ def _cell_mass(field, data): sampling_type="cell", function=_cell_mass, units=unit_system["mass"]) + registry.alias((ftype, "mass"), (ftype, "cell_mass")) def _sound_speed(field, data): tr = data.ds.gamma * data[ftype, "pressure"] / data[ftype, "density"] return np.sqrt(tr) + registry.add_field((ftype, "sound_speed"), - sampling_type="cell", + sampling_type="local", function=_sound_speed, units=unit_system["velocity"]) @@ -81,8 +75,9 @@ def _radial_mach_number(field, data): """ Radial component of M{|v|/c_sound} """ tr = data[ftype, "radial_velocity"] / data[ftype, "sound_speed"] return np.abs(tr) + registry.add_field((ftype, "radial_mach_number"), - sampling_type="cell", + sampling_type="local", function=_radial_mach_number, units = "") @@ -90,7 +85,7 @@ def _kin_energy(field, data): v = obtain_relative_velocity_vector(data) return 0.5 * data[ftype, "density"] * (v**2).sum(axis=0) registry.add_field((ftype, "kinetic_energy"), - sampling_type="cell", + sampling_type="local", function=_kin_energy, units=unit_system["pressure"], validators=[ValidateParameter('bulk_velocity')]) @@ -98,8 +93,9 @@ def _kin_energy(field, data): def _mach_number(field, data): """ M{|v|/c_sound} """ return data[ftype, "velocity_magnitude"] / data[ftype, "sound_speed"] + registry.add_field((ftype, "mach_number"), - sampling_type="cell", + sampling_type="local", function=_mach_number, units = "") @@ -125,60 +121,57 @@ def _pressure(field, data): return tr registry.add_field((ftype, "pressure"), - sampling_type="cell", + sampling_type="local", function=_pressure, units=unit_system["pressure"]) def _kT(field, data): - return (kboltz*data[ftype, "temperature"]).in_units("keV") + return (pc.kboltz*data[ftype, "temperature"]).in_units("keV") + registry.add_field((ftype, "kT"), - sampling_type="cell", + sampling_type="local", function=_kT, units="keV", display_name="Temperature") - def _entropy(field, data): - mw = data.get_field_parameter("mu") - if mw is None: - mw = 1.0 - mw *= mh - gammam1 = 2./3. - tr = data[ftype,"kT"] / ((data[ftype, "density"]/mw)**gammam1) - return data.apply_units(tr, field.units) - registry.add_field((ftype, "entropy"), - sampling_type="cell", - units="keV*cm**2", - function=_entropy) - def _metallicity(field, data): return data[ftype, "metal_density"] / data[ftype, "density"] registry.add_field((ftype, "metallicity"), - sampling_type="cell", + sampling_type="local", function=_metallicity, units="Zsun") def _metal_mass(field, data): - return data[ftype, "metal_density"] * data[ftype, "cell_volume"] + Z = data[ftype, "metallicity"].to("dimensionless") + return Z*data[ftype, "mass"] + registry.add_field((ftype, "metal_mass"), - sampling_type="cell", + sampling_type="local", function=_metal_mass, units=unit_system["mass"]) - def _number_density(field, data): - field_data = np.zeros_like(data["gas", "%s_number_density" % \ - data.ds.field_info.species_names[0]]) - for species in data.ds.field_info.species_names: - field_data += data["gas", "%s_number_density" % species] - return field_data + if len(registry.ds.field_info.species_names) > 0: + def _number_density(field, data): + field_data = np.zeros_like(data["gas", "%s_number_density" % \ + data.ds.field_info.species_names[0]]) + for species in data.ds.field_info.species_names: + field_data += data["gas", "%s_number_density" % species] + return field_data + else: + def _number_density(field, data): + mu = getattr(data.ds, "mu", default_mu) + return data[ftype, "density"]/(pc.mh*mu) + registry.add_field((ftype, "number_density"), - sampling_type="cell", - function = _number_density, + sampling_type="local", + function=_number_density, units=unit_system["number_density"]) - + def _mean_molecular_weight(field, data): - return (data[ftype, "density"] / (mh * data[ftype, "number_density"])) + return data[ftype, "density"] / (pc.mh * data[ftype, "number_density"]) + registry.add_field((ftype, "mean_molecular_weight"), - sampling_type="cell", + sampling_type="local", function=_mean_molecular_weight, units="") @@ -190,7 +183,7 @@ def _mean_molecular_weight(field, data): create_averaged_field(registry, "density", unit_system["density"], ftype=ftype, slice_info=slice_info, - weight="cell_mass") + weight="mass") def setup_gradient_fields(registry, grad_field, field_units, slice_info = None): @@ -231,10 +224,11 @@ def func(field, data): for axi, ax in enumerate(registry.ds.coordinates.axis_order): f = grad_func(axi, ax) registry.add_field((ftype, "%s_gradient_%s" % (fname, ax)), - sampling_type="cell", - function = f, - validators = [ValidateSpatial(1, [grad_field])], - units = grad_units) + sampling_type="local", + function=f, + validators=[ValidateSpatial(1, [grad_field])], + units=grad_units) + create_magnitude_field(registry, "%s_gradient" % fname, grad_units, ftype=ftype, validators = [ValidateSpatial(1, [grad_field])]) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 3bc05e8cafc..83b1adb180e 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -1,18 +1,3 @@ -""" -Complex fluid fields. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.fields.derived_field import \ @@ -139,6 +124,7 @@ def _vorticity_z(field, data): function=eval("_%s" % n), units=unit_system["frequency"], validators=vort_validators) + create_magnitude_field(registry, "vorticity", unit_system["frequency"], ftype=ftype, slice_info=slice_info, validators=vort_validators) @@ -152,6 +138,7 @@ def _vorticity_stretching_y(field, data): return data[ftype, "velocity_divergence"] * data[ftype, "vorticity_y"] def _vorticity_stretching_z(field, data): return data[ftype, "velocity_divergence"] * data[ftype, "vorticity_z"] + for ax in 'xyz': n = "vorticity_stretching_%s" % ax registry.add_field((ftype, n), @@ -174,6 +161,7 @@ def _vorticity_growth_y(field, data): def _vorticity_growth_z(field, data): return -data[ftype, "vorticity_stretching_z"] - \ data[ftype, "baroclinic_vorticity_z"] + for ax in 'xyz': n = "vorticity_growth_%s" % ax registry.add_field((ftype, n), @@ -462,4 +450,3 @@ def _shear_mach(field, data): units="", validators=[ValidateSpatial(1, vs_fields), ValidateParameter('bulk_velocity')]) - diff --git a/yt/fields/geometric_fields.py b/yt/fields/geometric_fields.py index 91aa2d4ae77..3b8fa041fa3 100644 --- a/yt/fields/geometric_fields.py +++ b/yt/fields/geometric_fields.py @@ -1,19 +1,3 @@ -""" -Geometric fields live here. Not coordinate ones, but ones that perform -transformations. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .derived_field import \ @@ -46,7 +30,8 @@ def _radius(field, data): """ return get_radius(data, "", field.name[0]) - registry.add_field(("index", "radius"), sampling_type="cell", + registry.add_field(("index", "radius"), + sampling_type="cell", function=_radius, validators=[ValidateParameter("center")], units=unit_system["length"]) @@ -59,7 +44,8 @@ def _grid_level(field, data): return data._reshape_vals(arr) return arr - registry.add_field(("index", "grid_level"), sampling_type="cell", + registry.add_field(("index", "grid_level"), + sampling_type="cell", function=_grid_level, units="", validators=[ValidateSpatial(0)]) @@ -76,7 +62,8 @@ def _grid_indices(field, data): return data._reshape_vals(arr) return arr - registry.add_field(("index", "grid_indices"), sampling_type="cell", + registry.add_field(("index", "grid_indices"), + sampling_type="cell", function=_grid_indices, units="", validators=[ValidateSpatial(0)], @@ -87,7 +74,8 @@ def _ones_over_dx(field, data): return np.ones(data["index", "ones"].shape, dtype="float64")/data["index", "dx"] - registry.add_field(("index", "ones_over_dx"), sampling_type="cell", + registry.add_field(("index", "ones_over_dx"), + sampling_type="cell", function=_ones_over_dx, units=unit_system["length"]**-1, display_field=False) @@ -97,7 +85,8 @@ def _zeros(field, data): arr = np.zeros(data["index", "ones"].shape, dtype='float64') return data.apply_units(arr, field.units) - registry.add_field(("index", "zeros"), sampling_type="cell", + registry.add_field(("index", "zeros"), + sampling_type="cell", function=_zeros, units="", display_field=False) @@ -109,7 +98,8 @@ def _ones(field, data): return data._reshape_vals(arr) return data.apply_units(arr, field.units) - registry.add_field(("index", "ones"), sampling_type="cell", + registry.add_field(("index", "ones"), + sampling_type="cell", function=_ones, units="", display_field=False) @@ -132,7 +122,9 @@ def _morton_index(field, data): data["index", "z"].ravel(), LE, RE) morton.shape = data["index", "x"].shape return morton.view("f8") - registry.add_field(("index", "morton_index"), sampling_type="cell", function=_morton_index, + registry.add_field(("index", "morton_index"), + sampling_type="cell", + function=_morton_index, units = "") def _spherical_radius(field, data): @@ -144,7 +136,8 @@ def _spherical_radius(field, data): coords = get_periodic_rvec(data) return data.ds.arr(get_sph_r(coords), "code_length").in_base(unit_system.name) - registry.add_field(("index", "spherical_radius"), sampling_type="cell", + registry.add_field(("index", "spherical_radius"), + sampling_type="cell", function=_spherical_radius, validators=[ValidateParameter("center")], units=unit_system["length"]) @@ -153,7 +146,8 @@ def _spherical_r(field, data): """This field is deprecated and will be removed in a future release""" return data['index', 'spherical_radius'] - registry.add_field(("index", "spherical_r"), sampling_type="cell", + registry.add_field(("index", "spherical_r"), + sampling_type="cell", function=_spherical_r, validators=[ValidateParameter("center")], units=unit_system["length"]) @@ -171,7 +165,8 @@ def _spherical_theta(field, data): coords = get_periodic_rvec(data) return get_sph_theta(coords, normal) - registry.add_field(("index", "spherical_theta"), sampling_type="cell", + registry.add_field(("index", "spherical_theta"), + sampling_type="cell", function=_spherical_theta, validators=[ValidateParameter("center"), ValidateParameter("normal")], @@ -190,7 +185,8 @@ def _spherical_phi(field, data): coords = get_periodic_rvec(data) return get_sph_phi(coords, normal) - registry.add_field(("index", "spherical_phi"), sampling_type="cell", + registry.add_field(("index", "spherical_phi"), + sampling_type="cell", function=_spherical_phi, validators=[ValidateParameter("center"), ValidateParameter("normal")], @@ -206,7 +202,8 @@ def _cylindrical_radius(field, data): coords = get_periodic_rvec(data) return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_base(unit_system.name) - registry.add_field(("index", "cylindrical_radius"), sampling_type="cell", + registry.add_field(("index", "cylindrical_radius"), + sampling_type="cell", function=_cylindrical_radius, validators=[ValidateParameter("center"), ValidateParameter("normal")], @@ -216,7 +213,8 @@ def _cylindrical_r(field, data): """This field is deprecated and will be removed in a future release""" return data['index', 'cylindrical_radius'] - registry.add_field(("index", "cylindrical_r"), sampling_type="cell", + registry.add_field(("index", "cylindrical_r"), + sampling_type="cell", function=_cylindrical_r, validators=[ValidateParameter("center")], units=unit_system["length"]) @@ -231,7 +229,8 @@ def _cylindrical_z(field, data): coords = get_periodic_rvec(data) return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_base(unit_system.name) - registry.add_field(("index", "cylindrical_z"), sampling_type="cell", + registry.add_field(("index", "cylindrical_z"), + sampling_type="cell", function=_cylindrical_z, validators=[ValidateParameter("center"), ValidateParameter("normal")], @@ -250,7 +249,8 @@ def _cylindrical_theta(field, data): coords = get_periodic_rvec(data) return get_cyl_theta(coords, normal) - registry.add_field(("index", "cylindrical_theta"), sampling_type="cell", + registry.add_field(("index", "cylindrical_theta"), + sampling_type="cell", function=_cylindrical_theta, validators=[ValidateParameter("center"), ValidateParameter("normal")], @@ -260,7 +260,8 @@ def _disk_angle(field, data): """This field is dprecated and will be removed in a future release""" return data["index", "spherical_theta"] - registry.add_field(("index", "disk_angle"), sampling_type="cell", + registry.add_field(("index", "disk_angle"), + sampling_type="cell", function=_disk_angle, take_log=False, display_field=False, @@ -272,7 +273,8 @@ def _height(field, data): """This field is deprecated and will be removed in a future release""" return data["index", "cylindrical_z"] - registry.add_field(("index", "height"), sampling_type="cell", + registry.add_field(("index", "height"), + sampling_type="cell", function=_height, validators=[ValidateParameter("center"), ValidateParameter("normal")], diff --git a/yt/fields/interpolated_fields.py b/yt/fields/interpolated_fields.py index 1ac4ea1dcb4..28c08e1afc0 100644 --- a/yt/fields/interpolated_fields.py +++ b/yt/fields/interpolated_fields.py @@ -1,18 +1,3 @@ -""" -Fields from interpolating data tables. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.local_fields import add_field from yt.utilities.linear_interpolators import \ diff --git a/yt/fields/local_fields.py b/yt/fields/local_fields.py index a748002d57d..425b505cfa9 100644 --- a/yt/fields/local_fields.py +++ b/yt/fields/local_fields.py @@ -1,17 +1,3 @@ -""" -This is a container for storing local fields defined on each load of yt. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import warnings from yt.utilities.logger import \ @@ -43,8 +29,10 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): else: sampling_type = "particle" if sampling_type is None: - warnings.warn("Because 'sampling_type' not specified, yt will " - "assume a cell 'sampling_type'") + warnings.warn( + "Because 'sampling_type' is not specified, yt will " + "assume a 'cell' sampling_type for the %s field" % (name, ), + stacklevel=3) sampling_type = "cell" return super(LocalFieldInfoContainer, self).add_field(name, sampling_type, function, **kwargs) diff --git a/yt/fields/magnetic_field.py b/yt/fields/magnetic_field.py index 6899592ca7a..d776a6929dc 100644 --- a/yt/fields/magnetic_field.py +++ b/yt/fields/magnetic_field.py @@ -1,25 +1,7 @@ -""" -Magnetic field ... er, fields. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.units import dimensions -from yt.units.unit_object import Unit from yt.units.yt_array import ustack -from yt.utilities.physical_constants import mu_0 -from yt.funcs import handle_mks_cgs from yt.fields.derived_field import \ ValidateParameter @@ -31,19 +13,25 @@ get_sph_theta_component, \ get_sph_phi_component -mag_factors = {dimensions.magnetic_field_cgs: 4.0*np.pi, - dimensions.magnetic_field_mks: mu_0} - @register_field_plugin -def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None): - unit_system = registry.ds.unit_system +def setup_magnetic_field_fields(registry, ftype="gas", slice_info=None): + ds = registry.ds + + unit_system = ds.unit_system + pc = registry.ds.units.physical_constants axis_names = registry.ds.coordinates.axis_order - if (ftype,"magnetic_field_%s" % axis_names[0]) not in registry: + if (ftype, "magnetic_field_%s" % axis_names[0]) not in registry: return - u = registry[ftype,"magnetic_field_%s" % axis_names[0]].units + u = registry[ftype, "magnetic_field_%s" % axis_names[0]].units + + def mag_factors(dims): + if dims == dimensions.magnetic_field_cgs: + return 4.0*np.pi + elif dims == dimensions.magnetic_field_mks: + return ds.units.physical_constants.mu_0 def _magnetic_field_strength(field, data): xm = "relative_magnetic_field_%s" % axis_names[0] @@ -52,35 +40,35 @@ def _magnetic_field_strength(field, data): B2 = (data[ftype, xm])**2 + (data[ftype, ym])**2 + (data[ftype, zm])**2 - return handle_mks_cgs(np.sqrt(B2), field.units) + return np.sqrt(B2) registry.add_field((ftype,"magnetic_field_strength"), - sampling_type="cell", + sampling_type="local", function=_magnetic_field_strength, validators=[ValidateParameter('bulk_magnetic_field')], units=u) def _magnetic_energy(field, data): B = data[ftype,"magnetic_field_strength"] - return 0.5*B*B/mag_factors[B.units.dimensions] - - registry.add_field((ftype, "magnetic_energy"), sampling_type="cell", - function=_magnetic_energy, - units=unit_system["pressure"]) + return 0.5*B*B/mag_factors(B.units.dimensions) + registry.add_field((ftype, "magnetic_energy"), + sampling_type="local", + function=_magnetic_energy, + units=unit_system["pressure"]) def _plasma_beta(field,data): return data[ftype,'pressure']/data[ftype,'magnetic_energy'] - - registry.add_field((ftype, "plasma_beta"), sampling_type="cell", - function=_plasma_beta, - units="") + registry.add_field((ftype, "plasma_beta"), + sampling_type="local", + function=_plasma_beta, + units="") def _magnetic_pressure(field,data): return data[ftype,'magnetic_energy'] - - registry.add_field((ftype, "magnetic_pressure"), sampling_type="cell", - function=_magnetic_pressure, - units=unit_system["pressure"]) + registry.add_field((ftype, "magnetic_pressure"), + sampling_type="local", + function=_magnetic_pressure, + units=unit_system["pressure"]) if registry.ds.geometry == "cartesian": def _magnetic_field_poloidal(field,data): @@ -107,8 +95,7 @@ def _magnetic_field_toroidal(field,data): elif registry.ds.geometry == "cylindrical": def _magnetic_field_poloidal(field, data): - bm = handle_mks_cgs( - data.get_field_parameter("bulk_magnetic_field"), field.units) + bm = data.get_field_parameter("bulk_magnetic_field") r = data["index", "r"] z = data["index", "z"] d = np.sqrt(r*r+z*z) @@ -119,21 +106,19 @@ def _magnetic_field_poloidal(field, data): def _magnetic_field_toroidal(field, data): ax = axis_names.find('theta') - bm = handle_mks_cgs( - data.get_field_parameter("bulk_magnetic_field"), field.units) + bm = data.get_field_parameter("bulk_magnetic_field") return data[ftype,"magnetic_field_theta"] - bm[ax] elif registry.ds.geometry == "spherical": + def _magnetic_field_poloidal(field, data): ax = axis_names.find('theta') - bm = handle_mks_cgs( - data.get_field_parameter("bulk_magnetic_field"), field.units) + bm = data.get_field_parameter("bulk_magnetic_field") return data[ftype,"magnetic_field_theta"] - bm[ax] def _magnetic_field_toroidal(field, data): ax = axis_names.find('phi') - bm = handle_mks_cgs( - data.get_field_parameter("bulk_magnetic_field"), field.units) + bm = data.get_field_parameter("bulk_magnetic_field") return data[ftype,"magnetic_field_phi"] - bm[ax] else: @@ -143,15 +128,17 @@ def _magnetic_field_toroidal(field, data): _magnetic_field_toroidal = None _magnetic_field_poloidal = None + registry.add_field((ftype, "magnetic_field_poloidal"), - sampling_type="cell", + sampling_type="local", function=_magnetic_field_poloidal, units=u, validators=[ValidateParameter("normal"), ValidateParameter("bulk_magnetic_field")]) + registry.add_field((ftype, "magnetic_field_toroidal"), - sampling_type="cell", + sampling_type="local", function=_magnetic_field_toroidal, units=u, validators=[ValidateParameter("normal"), @@ -159,15 +146,37 @@ def _magnetic_field_toroidal(field, data): def _alfven_speed(field,data): B = data[ftype,'magnetic_field_strength'] - return B/np.sqrt(mag_factors[B.units.dimensions]*data[ftype,'density']) - registry.add_field((ftype, "alfven_speed"), sampling_type="cell", function=_alfven_speed, + return B/np.sqrt(mag_factors(B.units.dimensions)*data[ftype,'density']) + + registry.add_field((ftype, "alfven_speed"), + sampling_type="local", + function=_alfven_speed, units=unit_system["velocity"]) def _mach_alfven(field,data): return data[ftype,'velocity_magnitude']/data[ftype,'alfven_speed'] - registry.add_field((ftype, "mach_alfven"), sampling_type="cell", function=_mach_alfven, + + registry.add_field((ftype, "mach_alfven"), + sampling_type="local", + function=_mach_alfven, units="dimensionless") + b_units = registry.ds.quan(1.0, u).units + if dimensions.current_mks in b_units.dimensions.free_symbols: + rm_scale = pc.qp.to("C", "SI")**3/(4.0*np.pi*pc.eps_0) + else: + rm_scale = pc.qp**3/pc.clight + rm_scale *= registry.ds.quan(1.0, "rad")/(2.0*np.pi*pc.me**2*pc.clight**3) + rm_units = registry.ds.quan(1.0, "rad/m**2").units/unit_system["length"] + + def _rotation_measure(field, data): + return rm_scale*data[ftype, "magnetic_field_los"]*data[ftype, "El_number_density"] + + registry.add_field((ftype, "rotation_measure"), sampling_type="local", + function=_rotation_measure, units=rm_units, + validators=[ + ValidateParameter("axis", {'axis': [0, 1, 2]})]) + def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"): r""" This routine sets up special aliases between dataset-specific magnetic @@ -209,7 +218,7 @@ def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"): unit_system = registry.ds.unit_system if isinstance(ds_fields, list): # If ds_fields is a list, we assume a grid dataset - sampling_type = "cell" + sampling_type = "local" ds_fields = [(ds_ftype, fd) for fd in ds_fields] ds_field = ds_fields[0] else: @@ -220,26 +229,18 @@ def setup_magnetic_field_aliases(registry, ds_ftype, ds_fields, ftype="gas"): return # Figure out the unit conversion to use - from_units = Unit(registry[ds_field].units, - registry=registry.ds.unit_registry) - if dimensions.current_mks in unit_system.base_units: + if unit_system.base_units[dimensions.current_mks] is not None: to_units = unit_system["magnetic_field_mks"] - equiv = "SI" else: to_units = unit_system["magnetic_field_cgs"] - equiv = "CGS" - if from_units.dimensions == to_units.dimensions: - convert = lambda x: x.in_units(to_units) - else: - convert = lambda x: x.to_equivalent(to_units, equiv) units = unit_system[to_units.dimensions] # Add fields - if sampling_type == "cell": + if sampling_type in ["cell", "local"]: # Grid dataset case def mag_field(fd): def _mag_field(field, data): - return convert(data[fd]) + return data[fd].to(field.units) return _mag_field for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields): registry.add_field((ftype,"magnetic_field_%s" % ax), @@ -250,10 +251,14 @@ def _mag_field(field, data): # Particle dataset case def mag_field(ax): def _mag_field(field, data): - return convert(data[ds_field][:, 'xyz'.index(ax)]) + return data[ds_field][:, 'xyz'.index(ax)] return _mag_field for ax in registry.ds.coordinates.axis_order: - registry.add_field((ftype, "particle_magnetic_field_%s" % ax), + fname = "particle_magnetic_field_%s" % ax + registry.add_field((ds_ftype, fname), sampling_type=sampling_type, function=mag_field(ax), units=units) + sph_ptypes = getattr(registry.ds, "_sph_ptypes", tuple()) + if ds_ftype in sph_ptypes: + registry.alias((ftype, "magnetic_field_%s" % ax), (ds_ftype, fname)) diff --git a/yt/fields/my_plugin_fields.py b/yt/fields/my_plugin_fields.py index 1a26bf5f219..31e144e12e1 100644 --- a/yt/fields/my_plugin_fields.py +++ b/yt/fields/my_plugin_fields.py @@ -1,18 +1,3 @@ -""" -This is a container for storing fields defined in the my_plugins.py file. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .field_plugin_registry import \ register_field_plugin diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index 066cdfe2365..3f731614262 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -1,19 +1,3 @@ -""" -These are common particle fields. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.fields.derived_field import \ @@ -42,6 +26,10 @@ get_sph_theta, get_sph_phi, \ modify_reference_frame +from yt.utilities.lib.misc_utilities import \ + obtain_relative_velocity_vector, \ + obtain_position_vector + from .vector_operations import \ create_magnitude_field @@ -49,13 +37,11 @@ get_radius sph_whitelist_fields = ( - 'particle_velocity_x', - 'particle_velocity_y', - 'particle_velocity_z', 'density', 'temperature', 'metallicity', 'thermal_energy', + 'smoothing_length', 'H_fraction', 'He_fraction', 'C_fraction', @@ -103,15 +89,14 @@ def particle_deposition_functions(ptype, coord_name, mass_name, registry): def particle_count(field, data): pos = data[ptype, coord_name] d = data.deposit(pos, method = "count") - d = data.ds.arr(d, input_units = "cm**-3") return data.apply_units(d, field.units) registry.add_field(("deposit", "%s_count" % ptype), - sampling_type="cell", - function = particle_count, - validators = [ValidateSpatial()], - units = '', - display_name = r"\mathrm{%s Count}" % ptype_dn) + sampling_type="cell", + function = particle_count, + validators = [ValidateSpatial()], + units = '', + display_name = r"\mathrm{%s Count}" % ptype_dn) def particle_mass(field, data): pos = data[ptype, coord_name] @@ -121,26 +106,28 @@ def particle_mass(field, data): return data.apply_units(d, field.units) registry.add_field(("deposit", "%s_mass" % ptype), - sampling_type="cell", - function = particle_mass, - validators = [ValidateSpatial()], - display_name = r"\mathrm{%s Mass}" % ptype_dn, - units = unit_system["mass"]) + sampling_type="cell", + function = particle_mass, + validators = [ValidateSpatial()], + display_name = r"\mathrm{%s Mass}" % ptype_dn, + units = unit_system["mass"]) def particle_density(field, data): - pos = data[ptype, coord_name].convert_to_units("code_length") - mass = data[ptype, mass_name].convert_to_units("code_mass") + pos = data[ptype, coord_name] + pos.convert_to_units("code_length") + mass = data[ptype, mass_name] + mass.convert_to_units("code_mass") d = data.deposit(pos, [mass], method = "sum") d = data.ds.arr(d, "code_mass") d /= data["index", "cell_volume"] return d registry.add_field(("deposit", "%s_density" % ptype), - sampling_type="cell", - function = particle_density, - validators = [ValidateSpatial()], - display_name = r"\mathrm{%s Density}" % ptype_dn, - units = unit_system["density"]) + sampling_type="cell", + function = particle_density, + validators = [ValidateSpatial()], + display_name = r"\mathrm{%s Density}" % ptype_dn, + units = unit_system["density"]) def particle_cic(field, data): pos = data[ptype, coord_name] @@ -150,11 +137,11 @@ def particle_cic(field, data): return d registry.add_field(("deposit", "%s_cic" % ptype), - sampling_type="cell", - function = particle_cic, - validators = [ValidateSpatial()], - display_name = r"\mathrm{%s CIC Density}" % ptype_dn, - units = unit_system["density"]) + sampling_type="cell", + function = particle_cic, + validators = [ValidateSpatial()], + display_name = r"\mathrm{%s CIC Density}" % ptype_dn, + units = unit_system["density"]) def _get_density_weighted_deposit_field(fname, units, method): def _deposit_field(field, data): @@ -170,7 +157,7 @@ def _deposit_field(field, data): top[bottom == 0] = 0.0 bnz = bottom.nonzero() top[bnz] /= bottom[bnz] - d = data.ds.arr(top, input_units=units) + d = data.ds.arr(top, units=units) return d return _deposit_field @@ -179,16 +166,22 @@ def _deposit_field(field, data): function = _get_density_weighted_deposit_field( "particle_velocity_%s" % ax, "code_velocity", method) registry.add_field( - ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)), sampling_type="cell", - function=function, units=unit_system["velocity"], take_log=False, + ("deposit", ("%s_"+name+"_velocity_%s") % (ptype, ax)), + sampling_type="cell", + function=function, + units=unit_system["velocity"], + take_log=False, validators=[ValidateSpatial(0)]) for method, name in zip(("cic", "sum"), ("cic", "nn")): function = _get_density_weighted_deposit_field( "age", "code_time", method) registry.add_field( - ("deposit", ("%s_"+name+"_age") % (ptype)), sampling_type="cell", - function=function, units=unit_system["time"], take_log=False, + ("deposit", ("%s_"+name+"_age") % (ptype)), + sampling_type="cell", + function=function, + units=unit_system["time"], + take_log=False, validators=[ValidateSpatial(0)]) # Now some translation functions. @@ -212,10 +205,10 @@ def particle_mesh_ids(field, data): data.deposit(pos, [ids], method = "mesh_id") return data.apply_units(ids, "") registry.add_field((ptype, "mesh_id"), - sampling_type="particle", - function = particle_mesh_ids, - validators = [ValidateSpatial()], - units = '') + sampling_type="particle", + function = particle_mesh_ids, + validators = [ValidateSpatial()], + units = '') return list(set(registry.keys()).difference(orig)) @@ -236,9 +229,13 @@ def _particle_position(field, data): for axi, ax in enumerate("xyz"): v, p = _get_coord_funcs(axi, ptype) registry.add_field((ptype, "particle_velocity_%s" % ax), - sampling_type="particle", function = v, units = "code_velocity") + sampling_type="particle", + function = v, + units = "code_velocity") registry.add_field((ptype, "particle_position_%s" % ax), - sampling_type="particle", function = p, units = "code_length") + sampling_type="particle", + function = p, + units = "code_length") def particle_vector_functions(ptype, coord_names, vel_names, registry): @@ -250,13 +247,14 @@ def _get_vec_func(_ptype, names): def particle_vectors(field, data): v = [data[_ptype, name].in_units(field.units) for name in names] - c = np.column_stack(v) - return data.apply_units(c, field.units) + return data.ds.arr(np.column_stack(v), v[0].units) return particle_vectors + registry.add_field((ptype, "particle_position"), sampling_type="particle", function=_get_vec_func(ptype, coord_names), units = "code_length") + registry.add_field((ptype, "particle_velocity"), sampling_type="particle", function=_get_vec_func(ptype, vel_names), @@ -284,10 +282,10 @@ def _particle_velocity_magnitude(field, data): data[ptype, 'relative_%s' % (svel % 'z')]**2) registry.add_field((ptype, "particle_velocity_magnitude"), - sampling_type="particle", - function=_particle_velocity_magnitude, - take_log=False, - units=unit_system["velocity"]) + sampling_type="particle", + function=_particle_velocity_magnitude, + take_log=False, + units=unit_system["velocity"]) def _particle_specific_angular_momentum(field, data): """Calculate the angular of a particle velocity. @@ -305,10 +303,10 @@ def _particle_specific_angular_momentum(field, data): registry.add_field((ptype, "particle_specific_angular_momentum"), - sampling_type="particle", - function=_particle_specific_angular_momentum, - units=unit_system["specific_angular_momentum"], - validators=[ValidateParameter("center")]) + sampling_type="particle", + function=_particle_specific_angular_momentum, + units=unit_system["specific_angular_momentum"], + validators=[ValidateParameter("center")]) def _get_spec_ang_mom_comp(axi, ax, _ptype): def _particle_specific_angular_momentum_component(field, data): @@ -338,14 +336,15 @@ def _particle_angular_momentum(field, data): return am.T registry.add_field((ptype, "particle_angular_momentum"), - sampling_type="particle", - function=_particle_angular_momentum, - units=unit_system["angular_momentum"], - validators=[ValidateParameter("center")]) + sampling_type="particle", + function=_particle_angular_momentum, + units=unit_system["angular_momentum"], + validators=[ValidateParameter("center")]) create_magnitude_field(registry, "particle_angular_momentum", unit_system["angular_momentum"], - ftype=ptype, particle_type=True) + sampling_type='particle', + ftype=ptype) def _particle_radius(field, data): """The spherical radius component of the particle positions @@ -355,26 +354,21 @@ def _particle_radius(field, data): """ return get_radius(data, "particle_position_", field.name[0]) - registry.add_field( - (ptype, "particle_radius"), - sampling_type="particle", - function=_particle_radius, - units=unit_system["length"], - validators=[ValidateParameter("center")]) + registry.add_field((ptype, "particle_radius"), + sampling_type="particle", + function=_particle_radius, + units=unit_system["length"], + validators=[ValidateParameter("center")]) def _relative_particle_position(field, data): """The cartesian particle positions in a rotated reference frame - Relative to the coordinate system defined by the *normal* vector and - *center* field parameters. + Relative to the coordinate system defined by *center* field parameter. Note that the orientation of the x and y axes are arbitrary. """ - normal = data.get_field_parameter('normal') - center = data.get_field_parameter('center') - pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T - L, pos = modify_reference_frame(center, normal, P=pos) - return pos + field_names = [(ptype, 'particle_position_%s' % ax) for ax in "xyz"] + return obtain_position_vector(data, field_names=field_names).T def _particle_position_relative(field, data): if not isinstance(data, FieldDetector): @@ -400,19 +394,13 @@ def _particle_position_relative(field, data): def _relative_particle_velocity(field, data): """The vector particle velocities in an arbitrary coordinate system - Relative to the coordinate system defined by the *normal* vector, - *bulk_velocity* vector and *center* field parameters. + Relative to the coordinate system defined by the *bulk_velocity* + vector field parameter. Note that the orientation of the x and y axes are arbitrary. """ - normal = data.get_field_parameter('normal') - center = data.get_field_parameter('center') - bv = data.get_field_parameter("bulk_velocity") - vel = data.ds.arr( - [data[ptype, svel % ax] - bv[iax] - for iax, ax in enumerate("xyz")]).T - L, vel = modify_reference_frame(center, normal, V=vel) - return vel + field_names = [(ptype, 'particle_velocity_%s' % ax) for ax in "xyz"] + return obtain_relative_velocity_vector(data, field_names=field_names).T def _particle_velocity_relative(field, data): if not isinstance(data, FieldDetector): @@ -435,7 +423,6 @@ def _particle_velocity_relative(field, data): validators=[ValidateParameter("normal"), ValidateParameter("center")]) - def _get_coord_funcs_relative(axi, _ptype): def _particle_pos_rel(field, data): return data[_ptype, "relative_particle_position"][:, axi] @@ -445,7 +432,9 @@ def _particle_vel_rel(field, data): for axi, ax in enumerate("xyz"): v, p = _get_coord_funcs_relative(axi, ptype) registry.add_field((ptype, "particle_velocity_relative_%s" % ax), - sampling_type="particle", function = v, units = "code_velocity") + sampling_type="particle", + function=v, + units="code_velocity") registry.add_field((ptype, "particle_position_relative_%s" % ax), sampling_type="particle", function = p, units = "code_length") registry.add_field((ptype, "relative_particle_velocity_%s" % ax), @@ -456,22 +445,22 @@ def _particle_vel_rel(field, data): # this is just particle radius but we add it with an alias for the sake of # consistent naming registry.add_field((ptype, "particle_position_spherical_radius"), - sampling_type="particle", - function=_particle_radius, - units=unit_system["length"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_radius, + units=unit_system["length"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_spherical_position_radius(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_position_spherical_radius'] registry.add_field((ptype, "particle_spherical_position_radius"), - sampling_type="particle", - function=_particle_spherical_position_radius, - units=unit_system["length"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_position_radius, + units=unit_system["length"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_position_spherical_theta(field, data): """The spherical theta coordinate of the particle positions. @@ -483,23 +472,23 @@ def _particle_position_spherical_theta(field, data): pos = data['relative_particle_position'].T return data.ds.arr(get_sph_theta(pos, normal), "") - registry.add_field( - (ptype, "particle_position_spherical_theta"), - sampling_type="particle", - function=_particle_position_spherical_theta, - units="", - validators=[ValidateParameter("center"), ValidateParameter("normal")]) + registry.add_field((ptype, "particle_position_spherical_theta"), + sampling_type="particle", + function=_particle_position_spherical_theta, + units="", + validators=[ValidateParameter("center"), + ValidateParameter("normal")]) def _particle_spherical_position_theta(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_position_spherical_theta'] registry.add_field((ptype, "particle_spherical_position_theta"), - sampling_type="particle", - function=_particle_spherical_position_theta, - units="", - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_position_theta, + units="", + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_position_spherical_phi(field, data): """The spherical phi component of the particle positions @@ -511,23 +500,23 @@ def _particle_position_spherical_phi(field, data): pos = data['relative_particle_position'].T return data.ds.arr(get_sph_phi(pos, normal), "") - registry.add_field( - (ptype, "particle_position_spherical_phi"), - sampling_type="particle", - function=_particle_position_spherical_phi, - units="", - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_position_spherical_phi"), + sampling_type="particle", + function=_particle_position_spherical_phi, + units="", + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_spherical_position_phi(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_position_spherical_phi'] registry.add_field((ptype, "particle_spherical_position_phi"), - sampling_type="particle", - function=_particle_spherical_position_phi, - units="", - validators=[ValidateParameter("center"), - ValidateParameter("normal")]) + sampling_type="particle", + function=_particle_spherical_position_phi, + units="", + validators=[ValidateParameter("center"), + ValidateParameter("normal")]) def _particle_velocity_spherical_radius(field, data): """The spherical radius component of the particle velocities in an @@ -545,31 +534,31 @@ def _particle_velocity_spherical_radius(field, data): return sphr registry.add_field((ptype, "particle_velocity_spherical_radius"), - sampling_type="particle", - function=_particle_velocity_spherical_radius, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_velocity_spherical_radius, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_spherical_velocity_radius(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_velocity_spherical_radius'] registry.add_field((ptype, "particle_spherical_velocity_radius"), - sampling_type="particle", - function=_particle_spherical_velocity_radius, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_velocity_radius, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) # particel_velocity_spherical_radius is simply aliased to # "particle_radial_velocity" for convenience registry.add_field((ptype, "particle_radial_velocity"), - sampling_type="particle", - function=_particle_spherical_velocity_radius, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_velocity_radius, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_velocity_spherical_theta(field, data): """The spherical theta component of the particle velocities in an @@ -586,23 +575,23 @@ def _particle_velocity_spherical_theta(field, data): spht = get_sph_theta_component(vel, theta, phi, normal) return spht - registry.add_field( - (ptype, "particle_velocity_spherical_theta"), - sampling_type="particle", - function=_particle_velocity_spherical_theta, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_velocity_spherical_theta"), + sampling_type="particle", + function=_particle_velocity_spherical_theta, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_spherical_velocity_theta(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_velocity_spherical_theta'] registry.add_field((ptype, "particle_spherical_velocity_theta"), - sampling_type="particle", - function=_particle_spherical_velocity_theta, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_velocity_theta, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_velocity_spherical_phi(field, data): """The spherical phi component of the particle velocities @@ -617,23 +606,23 @@ def _particle_velocity_spherical_phi(field, data): sphp = get_sph_phi_component(vel, phi, normal) return sphp - registry.add_field( - (ptype, "particle_velocity_spherical_phi"), - sampling_type="particle", - function=_particle_velocity_spherical_phi, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_velocity_spherical_phi"), + sampling_type="particle", + function=_particle_velocity_spherical_phi, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_spherical_velocity_phi(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_spherical_velocity_theta'] registry.add_field((ptype, "particle_spherical_velocity_phi"), - sampling_type="particle", - function=_particle_spherical_velocity_phi, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_spherical_velocity_phi, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_position_cylindrical_radius(field, data): """The cylindrical radius component of the particle positions @@ -643,14 +632,15 @@ def _particle_position_cylindrical_radius(field, data): """ normal = data.get_field_parameter('normal') pos = data['relative_particle_position'].T - return data.ds.arr(get_cyl_r(pos, normal), 'code_length') + pos.convert_to_units("code_length") + return data.ds.arr(get_cyl_r(pos, normal), "code_length") - registry.add_field( - (ptype, "particle_position_cylindrical_radius"), - sampling_type="particle", - function=_particle_position_cylindrical_radius, - units=unit_system["length"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_position_cylindrical_radius"), + sampling_type="particle", + function=_particle_position_cylindrical_radius, + units=unit_system["length"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_position_cylindrical_theta(field,data): """The cylindrical theta component of the particle positions @@ -662,12 +652,12 @@ def _particle_position_cylindrical_theta(field,data): pos = data['relative_particle_position'].T return data.ds.arr(get_cyl_theta(pos, normal), "") - registry.add_field( - (ptype, "particle_position_cylindrical_theta"), - sampling_type="particle", - function=_particle_position_cylindrical_theta, - units="", - validators=[ValidateParameter("center"), ValidateParameter("normal")]) + registry.add_field((ptype, "particle_position_cylindrical_theta"), + sampling_type="particle", + function=_particle_position_cylindrical_theta, + units="", + validators=[ValidateParameter("center"), + ValidateParameter("normal")]) def _particle_position_cylindrical_z(field,data): """The cylindrical z component of the particle positions @@ -677,14 +667,15 @@ def _particle_position_cylindrical_z(field,data): """ normal = data.get_field_parameter('normal') pos = data['relative_particle_position'].T - return data.ds.arr(get_cyl_z(pos, normal), 'code_length') + pos.convert_to_units("code_length") + return data.ds.arr(get_cyl_z(pos, normal), "code_length") - registry.add_field( - (ptype, "particle_position_cylindrical_z"), - sampling_type="particle", - function=_particle_position_cylindrical_z, - units=unit_system["length"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_position_cylindrical_z"), + sampling_type="particle", + function=_particle_position_cylindrical_z, + units=unit_system["length"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_velocity_cylindrical_radius(field, data): """The cylindrical radius component of the particle velocities @@ -699,12 +690,12 @@ def _particle_velocity_cylindrical_radius(field, data): cylr = get_cyl_r_component(vel, theta, normal) return cylr - registry.add_field( - (ptype, "particle_velocity_cylindrical_radius"), - sampling_type="particle", - function=_particle_velocity_cylindrical_radius, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_velocity_cylindrical_radius"), + sampling_type="particle", + function=_particle_velocity_cylindrical_radius, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_velocity_cylindrical_theta(field, data): """The cylindrical theta component of the particle velocities @@ -719,23 +710,23 @@ def _particle_velocity_cylindrical_theta(field, data): cylt = get_cyl_theta_component(vel, theta, normal) return cylt - registry.add_field( - (ptype, "particle_velocity_cylindrical_theta"), - sampling_type="particle", - function=_particle_velocity_cylindrical_theta, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_velocity_cylindrical_theta"), + sampling_type="particle", + function=_particle_velocity_cylindrical_theta, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_cylindrical_velocity_theta(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, 'particle_velocity_cylindrical_theta'] registry.add_field((ptype, "particle_cylindrical_velocity_theta"), - sampling_type="particle", - function=_particle_cylindrical_velocity_theta, - units="cm/s", - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_cylindrical_velocity_theta, + units="cm/s", + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_velocity_cylindrical_z(field, data): """The cylindrical z component of the particle velocities @@ -748,23 +739,23 @@ def _particle_velocity_cylindrical_z(field, data): cylz = get_cyl_z_component(vel, normal) return cylz - registry.add_field( - (ptype, "particle_velocity_cylindrical_z"), - sampling_type="particle", - function=_particle_velocity_cylindrical_z, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")]) + registry.add_field((ptype, "particle_velocity_cylindrical_z"), + sampling_type="particle", + function=_particle_velocity_cylindrical_z, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def _particle_cylindrical_velocity_z(field, data): """This field is deprecated and will be removed in a future release""" return data[ptype, "particle_velocity_cylindrical_z"] registry.add_field((ptype, "particle_cylindrical_velocity_z"), - sampling_type="particle", - function=_particle_cylindrical_velocity_z, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), - ValidateParameter("center")]) + sampling_type="particle", + function=_particle_cylindrical_velocity_z, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), + ValidateParameter("center")]) def add_particle_average(registry, ptype, field_name, @@ -790,54 +781,23 @@ def _pfunc_avg(field, data): units = field_units) return fn +DEP_MSG_SMOOTH_FIELD = ( + "Since yt-4.0, it's no longer necessary to add a field specifically for " + "smoothing, because the global octree is removed. The old behavior of " + "interpolating onto a grid structure can be recovered through data objects " + "like ds.arbitrary_grid, ds.covering_grid, and most closely ds.octree. The " + "visualization machinery now treats SPH fields properly by smoothing onto " + "pixel locations. See this page to learn more: " + "https://yt-project.org/doc/yt4differences.html" +) + def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name, smoothing_length_name, density_name, smoothed_field, registry, nneighbors = 64, kernel_name = 'cubic'): - unit_system = registry.ds.unit_system - if kernel_name == 'cubic': - field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field)) - else: - field_name = ("deposit", "%s_%s_smoothed_%s" % (ptype, kernel_name, - smoothed_field)) - field_units = registry[ptype, smoothed_field].units - def _vol_weight(field, data): - pos = data[ptype, coord_name] - pos = pos.convert_to_units("code_length") - mass = data[ptype, mass_name].in_base(unit_system.name) - dens = data[ptype, density_name].in_base(unit_system.name) - quan = data[ptype, smoothed_field] - if hasattr(quan, "units"): - quan = quan.convert_to_units(field_units) - - if smoothing_length_name is None: - hsml = np.zeros(quan.shape, dtype='float64') - 1 - hsml = data.apply_units(hsml, "code_length") - else: - hsml = data[ptype, smoothing_length_name] - hsml.convert_to_units("code_length") - # This is for applying cutoffs, similar to in the SPLASH paper. - smooth_cutoff = data["index","cell_volume"]**(1./3) - smooth_cutoff.convert_to_units("code_length") - # volume_weighted smooth operations return lists of length 1. - rv = data.smooth(pos, [mass, hsml, dens, quan], - index_fields=[smooth_cutoff], - method="volume_weighted", - create_octree=True, - nneighbors=nneighbors, - kernel_name=kernel_name)[0] - rv[np.isnan(rv)] = 0.0 - # Now some quick unit conversions. - # This should be used when seeking a non-normalized value: - rv /= hsml.uq**3 / hsml.uq.in_base(unit_system.name).uq**3 - rv = data.apply_units(rv, field_units) - return rv - registry.add_field(field_name, - sampling_type="cell", - function = _vol_weight, - validators = [ValidateSpatial(0)], - units = field_units) - registry.find_dependencies((field_name,)) - return [field_name] + issue_deprecation_warning( + "This function is deprecated. " + DEP_MSG_SMOOTH_FIELD + ) + def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64): field_name = (ptype, "nearest_neighbor_distance_%s" % (nneighbors)) diff --git a/yt/fields/species_fields.py b/yt/fields/species_fields.py index 06346349b02..5fb5d4dedf7 100644 --- a/yt/fields/species_fields.py +++ b/yt/fields/species_fields.py @@ -1,34 +1,19 @@ -""" -Fields based on species of molecules or atoms. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import re -from yt.utilities.physical_constants import \ - amu_cgs +from yt.fields.field_detector import \ + FieldDetector +from yt.frontends.sph.data_structures import \ + ParticleDataset +from yt.funcs import \ + issue_deprecation_warning from yt.utilities.physical_ratios import \ - primordial_H_mass_fraction - + _primordial_mass_fraction from yt.utilities.chemical_formulas import \ ChemicalFormula from .field_plugin_registry import \ register_field_plugin -_primordial_mass_fraction = \ - {"H": primordial_H_mass_fraction, - "He" : (1 - primordial_H_mass_fraction)} # See YTEP-0003 for details, but we want to ensure these fields are all # populated: @@ -45,17 +30,23 @@ def _frac(field, data): / data[ftype, "density"] return _frac -def _create_mass_func(ftype, species): +def _mass_from_cell_volume_and_density(ftype, species): def _mass(field, data): return data[ftype, "%s_density" % species] \ * data["index", "cell_volume"] return _mass +def _mass_from_particle_mass_and_fraction(ftype, species): + def _mass(field, data): + return data[ftype, "%s_fraction" % species] \ + * data[ftype, 'particle_mass'] + return _mass + def _create_number_density_func(ftype, species): formula = ChemicalFormula(species) - weight = formula.weight # This is in AMU - weight *= amu_cgs def _number_density(field, data): + weight = formula.weight # This is in AMU + weight *= data.ds.units.physical_constants.amu_cgs return data[ftype, "%s_density" % species] \ / weight return _number_density @@ -66,8 +57,7 @@ def _density(field, data): * data[ftype,'density'] return _density -def add_species_field_by_density(registry, ftype, species, - particle_type = False): +def add_species_field_by_density(registry, ftype, species): """ This takes a field registry, a fluid type, and a species name and then adds the other fluids based on that. This assumes that the field @@ -75,27 +65,31 @@ def add_species_field_by_density(registry, ftype, species, """ unit_system = registry.ds.unit_system - registry.add_field((ftype, "%s_fraction" % species), sampling_type="cell", + registry.add_field((ftype, "%s_fraction" % species), + sampling_type="local", function = _create_fraction_func(ftype, species), - particle_type = particle_type, units = "") - registry.add_field((ftype, "%s_mass" % species), sampling_type="cell", + if isinstance(registry.ds, ParticleDataset): + _create_mass_func = _mass_from_particle_mass_and_fraction + else: + _create_mass_func = _mass_from_cell_volume_and_density + registry.add_field((ftype, "%s_mass" % species), + sampling_type="local", function = _create_mass_func(ftype, species), - particle_type = particle_type, units = unit_system["mass"]) - registry.add_field((ftype, "%s_number_density" % species), sampling_type="cell", + registry.add_field((ftype, "%s_number_density" % species), + sampling_type="local", function = _create_number_density_func(ftype, species), - particle_type = particle_type, units = unit_system["number_density"]) return [(ftype, "%s_number_density" % species), (ftype, "%s_density" % species), (ftype, "%s_mass" % species)] -def add_species_field_by_fraction(registry, ftype, species, - particle_type = False): + +def add_species_field_by_fraction(registry, ftype, species): """ This takes a field registry, a fluid type, and a species name and then adds the other fluids based on that. This assumes that the field @@ -103,19 +97,23 @@ def add_species_field_by_fraction(registry, ftype, species, """ unit_system = registry.ds.unit_system - registry.add_field((ftype, "%s_density" % species), sampling_type="cell", + registry.add_field((ftype, "%s_density" % species), + sampling_type="local", function = _create_density_func(ftype, species), - particle_type = particle_type, units = unit_system["density"]) - registry.add_field((ftype, "%s_mass" % species), sampling_type="cell", + if isinstance(registry.ds, ParticleDataset): + _create_mass_func = _mass_from_particle_mass_and_fraction + else: + _create_mass_func = _mass_from_cell_volume_and_density + registry.add_field((ftype, "%s_mass" % species), + sampling_type="local", function = _create_mass_func(ftype, species), - particle_type = particle_type, units = unit_system["mass"]) - registry.add_field((ftype, "%s_number_density" % species), sampling_type="cell", + registry.add_field((ftype, "%s_number_density" % species), + sampling_type="local", function = _create_number_density_func(ftype, species), - particle_type = particle_type, units = unit_system["number_density"]) return [(ftype, "%s_number_density" % species), @@ -140,32 +138,89 @@ def add_species_aliases(registry, ftype, alias_species, species): registry.alias((ftype, "%s_mass" % alias_species), (ftype, "%s_mass" % species)) -def add_nuclei_density_fields(registry, ftype, - particle_type = False): +def add_deprecated_species_aliases(registry, ftype, alias_species, species): + """ + Add the species aliases but with deprecation warnings. + """ + + for suffix in ["density", "fraction", "number_density", "mass"]: + add_deprecated_species_alias( + registry, ftype, alias_species, species, suffix) + +def add_deprecated_species_alias(registry, ftype, alias_species, species, + suffix): + """ + Add a deprecated species alias field. + """ + + unit_system = registry.ds.unit_system + if suffix == "fraction": + my_units = "" + else: + my_units = unit_system[suffix] + + def _dep_field(field, data): + if not isinstance(data, FieldDetector): + issue_deprecation_warning( + ("The \"%s_%s\" field is deprecated. " + + "Please use \"%s_%s\" instead.") % + (alias_species, suffix, species, suffix)) + return data[ftype, "%s_%s" % (species, suffix)] + + registry.add_field((ftype, "%s_%s" % (alias_species, suffix)), + sampling_type="local", + function=_dep_field, + units=my_units) + +def add_nuclei_density_fields(registry, ftype): unit_system = registry.ds.unit_system elements = _get_all_elements(registry.species_names) for element in elements: registry.add_field((ftype, "%s_nuclei_density" % element), - sampling_type="cell", - function = _nuclei_density, - particle_type = particle_type, - units = unit_system["number_density"]) + sampling_type="local", + function=_nuclei_density, + units=unit_system["number_density"]) + # Here, we add default nuclei and number density fields for H and + # He if they are not defined above. This assumes full ionization! for element in ["H", "He"]: if element in elements: continue - registry.add_field((ftype, "%s_nuclei_density" % element), - sampling_type="cell", - function = _default_nuclei_density, - particle_type = particle_type, - units = unit_system["number_density"]) + registry.add_field((ftype, "%s_nuclei_density" % element), + sampling_type="local", + function=_default_nuclei_density, + units=unit_system["number_density"]) + if element == "H": + registry.alias((ftype, "H_p1_number_density"), + (ftype, "H_nuclei_density")) + + if element == "He": + registry.alias((ftype, "He_p2_number_density"), + (ftype, "He_nuclei_density")) + + if (ftype, "El_number_density") not in registry: + registry.add_field((ftype, "El_number_density"), + sampling_type="local", + function=_default_nuclei_density, + units=unit_system["number_density"]) + def _default_nuclei_density(field, data): ftype = field.name[0] element = field.name[1][:field.name[1].find("_")] - return data[ftype, "density"] * _primordial_mass_fraction[element] / \ - ChemicalFormula(element).weight / amu_cgs - + amu_cgs = data.ds.units.physical_constants.amu_cgs + if element == "El": + # This assumes full ionization! + muinv = 1.0*_primordial_mass_fraction["H"] / \ + ChemicalFormula("H").weight + muinv += 2.0*_primordial_mass_fraction["He"] / \ + ChemicalFormula("He").weight + else: + muinv = _primordial_mass_fraction[element] / \ + ChemicalFormula(element).weight + return data[ftype, "density"] * muinv / amu_cgs + + def _nuclei_density(field, data): ftype = field.name[0] element = field.name[1][:field.name[1].find("_")] @@ -173,11 +228,11 @@ def _nuclei_density(field, data): nuclei_mass_field = "%s_nuclei_mass_density" % element if (ftype, nuclei_mass_field) in data.ds.field_info: return data[(ftype, nuclei_mass_field)] / \ - ChemicalFormula(element).weight / amu_cgs + ChemicalFormula(element).weight / data.ds.units.physical_constants.amu_cgs metal_field = "%s_metallicity" % element if (ftype, metal_field) in data.ds.field_info: return data[ftype, "density"] * data[(ftype, metal_field)] / \ - ChemicalFormula(element).weight / amu_cgs + ChemicalFormula(element).weight / data.ds.units.physical_constants.amu_cgs field_data = np.zeros_like(data[ftype, "%s_number_density" % data.ds.field_info.species_names[0]]) @@ -212,11 +267,9 @@ def _get_element_multiple(compound, element): return 1 return int(my_split[loc + 1]) + @register_field_plugin def setup_species_fields(registry, ftype = "gas", slice_info = None): - # We have to check what type of field this is -- if it's particles, then we - # set particle_type to True. - particle_type = ftype not in registry.ds.fluid_types for species in registry.species_names: # These are all the species we should be looking for fractions or # densities of. @@ -227,11 +280,15 @@ def setup_species_fields(registry, ftype = "gas", slice_info = None): else: # Skip it continue - func(registry, ftype, species, particle_type) - # Adds aliases for all neutral species from their raw "MM_" - # species to "MM_p0_" species to be explicit. - # See YTEP-0003 for more details. - if (ChemicalFormula(species).charge == 0): - alias_species = "%s_p0" % species.split('_')[0] - add_species_aliases(registry, "gas", alias_species, species) - add_nuclei_density_fields(registry, ftype, particle_type=particle_type) + func(registry, ftype, species) + + # Add aliases of X_p0_ to X_. + # These are deprecated and will be removed soon. + if ChemicalFormula(species).charge == 0: + alias_species = species.split("_")[0] + if (ftype, "{}_density".format(alias_species)) in registry: + continue + add_deprecated_species_aliases( + registry, "gas", alias_species, species) + + add_nuclei_density_fields(registry, ftype) diff --git a/yt/fields/tests/test_angular_momentum.py b/yt/fields/tests/test_angular_momentum.py index 71d2c44dd42..0d52afbd589 100644 --- a/yt/fields/tests/test_angular_momentum.py +++ b/yt/fields/tests/test_angular_momentum.py @@ -1,11 +1,3 @@ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ diff --git a/yt/fields/tests/test_field_name_container.py b/yt/fields/tests/test_field_name_container.py index 771e1ba917c..0eddb7693d2 100644 --- a/yt/fields/tests/test_field_name_container.py +++ b/yt/fields/tests/test_field_name_container.py @@ -4,6 +4,8 @@ requires_file def do_field_type(ft): + assert (dir(ft) == sorted(dir(ft))) + assert (sorted(dir(ft)) == sorted(f.name[1] for f in ft)) for field_name in dir(ft): f = getattr(ft, field_name) assert ((ft.field_type, field_name) == f.name) @@ -19,6 +21,8 @@ def do_field_type(ft): @requires_file(enzotiny) def test_field_name_container(): ds = load(enzotiny) + assert (dir(ds.fields) == sorted(dir(ds.fields))) + assert (sorted(ft.field_type for ft in ds.fields) == sorted(dir(ds.fields))) for field_type in dir(ds.fields): assert (field_type in ds.fields) ft = getattr(ds.fields, field_type) diff --git a/yt/fields/tests/test_fields.py b/yt/fields/tests/test_fields.py index fa8914f844d..b00e7385c26 100644 --- a/yt/fields/tests/test_fields.py +++ b/yt/fields/tests/test_fields.py @@ -105,12 +105,12 @@ def __call__(self): g.clear_data() g.field_parameters.update(sp) r1 = field._function(field, g) - if field.particle_type: + if field.sampling_type == 'particle': assert_equal(v1.shape[0], g.NumberOfParticles) else: assert_array_equal(r1.shape, v1.shape) for ax in 'xyz': - assert_array_equal(g[ax].shape, v1.shape) + assert_array_equal(g['index', ax].shape, v1.shape) with field.unit_registry(g): res = field._function(field, g) assert_array_equal(v1.shape, res.shape) @@ -127,6 +127,9 @@ def get_base_ds(nprocs): pfields, punits = [], [] for fname, (code_units, aliases, dn) in StreamFieldInfo.known_particle_fields: + if fname == 'smoothing_lenth': + # we test SPH fields elsewhere + continue pfields.append(fname) punits.append(code_units) @@ -203,15 +206,6 @@ def test_add_deposited_particle_field(): # The sum should equal the number of cells that have particles assert_equal(ret.sum(), np.count_nonzero(ad[("deposit", "io_count")])) -@requires_file('GadgetDiskGalaxy/snapshot_200.hdf5') -def test_add_smoothed_particle_field(): - ds = load('GadgetDiskGalaxy/snapshot_200.hdf5') - fn = ds.add_smoothed_particle_field(('PartType0', 'particle_ones')) - assert_equal(fn, ('deposit', 'PartType0_smoothed_particle_ones')) - dd = ds.sphere('center', (500, 'code_length')) - ret = dd[fn] - assert_almost_equal(ret.sum(), 638.5652315154682) - def test_add_gradient_fields(): ds = get_base_ds(1) gfields = ds.add_gradient_fields(("gas","density")) @@ -234,10 +228,7 @@ def test_add_gradient_fields(): assert str(ret.units) == "1/cm" def test_add_gradient_fields_curvilinear(): - def _dimensionful_density(field, data): - return data.apply_units(data["Density"], "g/cm**3") - ds = fake_amr_ds(geometry="spherical") - ds.add_field(("gas", "density"), _dimensionful_density, units="g/cm**3", sampling_type="cell") + ds = fake_amr_ds(fields = ["density"], geometry="spherical") gfields = ds.add_gradient_fields(("gas", "density")) gfields += ds.add_gradient_fields(("index", "ones")) field_list = [('gas', 'density_gradient_r'), @@ -373,9 +364,8 @@ def density_alias(field, data): def pmass_alias(field, data): return data['particle_mass'] - ds.add_field('particle_mass_alias', function=pmass_alias, - sampling_type='particle', - units='g') + ds.add_field('particle_mass_alias', function=pmass_alias, + units='g', sampling_type='particle') ds.field_info['particle_mass_alias'] ds.field_info['all', 'particle_mass_alias'] diff --git a/yt/fields/tests/test_fields_plugins.py b/yt/fields/tests/test_fields_plugins.py index 7635d2560c4..d59e78ea8b1 100644 --- a/yt/fields/tests/test_fields_plugins.py +++ b/yt/fields/tests/test_fields_plugins.py @@ -1,11 +1,3 @@ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import sys import unittest @@ -19,13 +11,36 @@ def _myfunc(field, data): return np.random.random(data['density'].shape) add_field('random', dimensions='dimensionless', - function=_myfunc, units='auto', sampling_type='cell') + function=_myfunc, units='auto', sampling_type='local') constant = 3 def myfunc(): return constant*4 foobar = 17 ''' +def setUpModule(): + my_plugin_name = ytcfg.get('yt', 'pluginfilename') + # In the following order if pluginfilename is: an absolute path, located in + # the CONFIG_DIR, located in an obsolete config dir. + old_config_dir = os.path.join(os.path.expanduser('~'), '.yt') + for base_prefix in ('', CONFIG_DIR, old_config_dir): + potential_plugin_file = os.path.join(base_prefix, my_plugin_name) + if os.path.isfile(potential_plugin_file): + os.rename(potential_plugin_file, + potential_plugin_file + '.bak_test') + + plugin_file = os.path.join(CONFIG_DIR, my_plugin_name) + with open(plugin_file, 'w') as fh: + fh.write(TEST_PLUGIN_FILE) + + +def tearDownModule(): + from yt.fields.my_plugin_fields import my_plugins_fields + my_plugins_fields.clear() + my_plugin_name = ytcfg.get('yt', 'pluginfilename') + plugin_file = os.path.join(CONFIG_DIR, my_plugin_name) + os.remove(plugin_file) + class TestPluginFile(unittest.TestCase): @classmethod diff --git a/yt/fields/tests/test_magnetic_fields.py b/yt/fields/tests/test_magnetic_fields.py index 9b7e84c4949..56b3dbc66c7 100644 --- a/yt/fields/tests/test_magnetic_fields.py +++ b/yt/fields/tests/test_magnetic_fields.py @@ -30,9 +30,9 @@ def test_magnetic_fields(): dd2 = ds2.all_data() dd3 = ds3.all_data() - assert ds1.fields.gas.magnetic_field_strength.units == "gauss" - assert ds1.fields.gas.magnetic_field_poloidal.units == "gauss" - assert ds1.fields.gas.magnetic_field_toroidal.units == "gauss" + assert ds1.fields.gas.magnetic_field_strength.units == "G" + assert ds1.fields.gas.magnetic_field_poloidal.units == "G" + assert ds1.fields.gas.magnetic_field_toroidal.units == "G" assert ds2.fields.gas.magnetic_field_strength.units == "T" assert ds2.fields.gas.magnetic_field_poloidal.units == "T" assert ds2.fields.gas.magnetic_field_toroidal.units == "T" @@ -52,7 +52,7 @@ def test_magnetic_fields(): emag3 = (dd3["magnetic_field_x"]**2 + dd3["magnetic_field_y"]**2 + - dd3["magnetic_field_z"]**2)/(8.0*np.pi) + dd3["magnetic_field_z"]**2)/(2.0*mu_0) emag3.convert_to_units("code_pressure") assert_almost_equal(emag1, dd1["magnetic_energy"]) diff --git a/yt/fields/tests/test_particle_fields.py b/yt/fields/tests/test_particle_fields.py new file mode 100644 index 00000000000..91885189aad --- /dev/null +++ b/yt/fields/tests/test_particle_fields.py @@ -0,0 +1,19 @@ +from yt.testing import \ + assert_allclose_units, \ + requires_file +from yt.utilities.answer_testing.framework import \ + data_dir_load + +g30 = "IsolatedGalaxy/galaxy0030/galaxy0030" +@requires_file(g30) +def test_relative_particle_fields(): + ds = data_dir_load(g30) + offset = ds.arr([0.1,-0.2,0.3], "code_length") + c = ds.domain_center+offset + sp = ds.sphere(c, (10, "kpc")) + bv = ds.arr([1.,2.,3.], "code_velocity") + sp.set_field_parameter("bulk_velocity", bv) + assert_allclose_units(sp["relative_particle_position"], + sp["particle_position"]-c) + assert_allclose_units(sp["relative_particle_velocity"], + sp["particle_velocity"]-bv) diff --git a/yt/fields/tests/test_species_fields.py b/yt/fields/tests/test_species_fields.py new file mode 100644 index 00000000000..59ad4150c82 --- /dev/null +++ b/yt/fields/tests/test_species_fields.py @@ -0,0 +1,39 @@ +from yt.testing import requires_file, \ + assert_allclose_units, assert_equal +from yt.utilities.answer_testing.framework import \ + data_dir_load +from yt.utilities.physical_ratios import \ + _primordial_mass_fraction +from yt.utilities.chemical_formulas import \ + ChemicalFormula + +sloshing = "GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100" + +@requires_file(sloshing) +def test_default_species_fields(): + ds = data_dir_load(sloshing) + sp = ds.sphere("c", (0.2, "unitary")) + amu_cgs = ds.units.physical_constants.amu_cgs + + mueinv = 1.0*_primordial_mass_fraction["H"] / \ + ChemicalFormula("H").weight + mueinv *= sp["index","ones"] + mueinv += 2.0*_primordial_mass_fraction["He"] / \ + ChemicalFormula("He").weight + mupinv = _primordial_mass_fraction["H"] / \ + ChemicalFormula("H").weight + mupinv *= sp["index","ones"] + muainv = _primordial_mass_fraction["He"] / \ + ChemicalFormula("He").weight + muainv *= sp["index","ones"] + mueinv2 = sp["gas","El_number_density"]*amu_cgs/sp["gas","density"] + mupinv2 = sp["gas","H_p1_number_density"]*amu_cgs/sp["gas","density"] + muainv2 = sp["gas","He_p2_number_density"]*amu_cgs/sp["gas","density"] + + + assert_allclose_units(mueinv, mueinv2) + assert_allclose_units(mupinv, mupinv2) + assert_allclose_units(muainv, muainv2) + + assert_equal(sp["gas","H_p1_number_density"], sp["gas","H_nuclei_density"]) + assert_equal(sp["gas","He_p2_number_density"], sp["gas","He_nuclei_density"]) diff --git a/yt/fields/tests/test_sph_fields.py b/yt/fields/tests/test_sph_fields.py new file mode 100644 index 00000000000..d039c434cc7 --- /dev/null +++ b/yt/fields/tests/test_sph_fields.py @@ -0,0 +1,67 @@ +import yt + +from collections import defaultdict +from yt.testing import \ + assert_array_almost_equal, \ + assert_equal, \ + requires_file + +isothermal_h5 = "IsothermalCollapse/snap_505.hdf5" +isothermal_bin = "IsothermalCollapse/snap_505" +snap_33 = "snapshot_033/snap_033.0.hdf5" +tipsy_gal = 'TipsyGalaxy/galaxy.00300' +FIRE_m12i = 'FIRE_M12i_ref11/snapshot_600.hdf5' + +iso_kwargs = dict( + bounding_box=[[-3, 3], [-3, 3], [-3, 3]], + unit_base={'UnitLength_in_cm': 5.0e16, + 'UnitMass_in_g': 1.98992e33, + 'UnitVelocity_in_cm_per_s': 46385.190}, +) + +load_kwargs = defaultdict(dict) +load_kwargs.update({ + isothermal_h5: iso_kwargs, + isothermal_bin: iso_kwargs, +}) + +gas_fields_to_particle_fields = { + 'temperature': 'Temperature', + 'density': 'Density', + 'velocity_x': 'particle_velocity_x', + 'velocity_magnitude': 'particle_velocity_magnitude', +} + +@requires_file(isothermal_bin) +@requires_file(isothermal_h5) +@requires_file(snap_33) +@requires_file(tipsy_gal) +@requires_file(FIRE_m12i) +def test_sph_field_semantics(): + for ds_fn in [tipsy_gal, isothermal_h5, isothermal_bin, snap_33, FIRE_m12i]: + yield sph_fields_validate, ds_fn + +def sph_fields_validate(ds_fn): + ds = yt.load(ds_fn, **(load_kwargs[ds_fn])) + ad = ds.all_data() + for gf, pf in gas_fields_to_particle_fields.items(): + gas_field = ad['gas', gf] + part_field = ad[ds._sph_ptypes[0], pf] + + assert_array_almost_equal(gas_field, part_field) + + npart = ds.particle_type_counts[ds._sph_ptypes[0]] + err_msg = "Field %s is not the correct shape" % (gf,) + assert_equal(npart, gas_field.shape[0], err_msg=err_msg) + + dd = ds.r[0.4:0.6, 0.4:0.6, 0.4:0.6] + + for i, ax in enumerate('xyz'): + dd.set_field_parameter( + 'cp_%s_vec' % (ax,), yt.YTArray([1, 1, 1])) + dd.set_field_parameter('axis', i) + dd.set_field_parameter('omega_baryon', 0.3) + + for f in ds.fields.gas: + gas_field = dd[f] + assert f.is_sph_field diff --git a/yt/fields/tests/test_vector_fields.py b/yt/fields/tests/test_vector_fields.py index d208b9d528e..75e41e1bfa7 100644 --- a/yt/fields/tests/test_vector_fields.py +++ b/yt/fields/tests/test_vector_fields.py @@ -1,11 +1,3 @@ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ @@ -77,6 +69,26 @@ def compare_vector_conversions(data_source): data_source['velocity_%s' % d] - bulk_velocity[i], data_source['relative_velocity_%s' % d]) + for i, ax in enumerate('xyz'): + data_source.set_field_parameter("axis", i) + data_source.clear_data() + assert_allclose_units(data_source["velocity_los"], + data_source["relative_velocity_%s" % ax]) + + for i, ax in enumerate("xyz"): + prj = data_source.ds.proj("velocity_los", i, weight_field="density") + assert_allclose_units(prj["velocity_los"], prj["velocity_%s" % ax]) + + data_source.clear_data() + ax = [0.1, 0.2, -0.3] + data_source.set_field_parameter("axis", ax) + ax /= np.sqrt(np.dot(ax,ax)) + vlos = data_source["relative_velocity_x"]*ax[0] + vlos += data_source["relative_velocity_y"]*ax[1] + vlos += data_source["relative_velocity_z"]*ax[2] + assert_allclose_units(data_source["velocity_los"], vlos) + + def test_vector_component_conversions_fake(): ds = fake_random_ds(16) ad = ds.all_data() diff --git a/yt/fields/tests/test_xray_fields.py b/yt/fields/tests/test_xray_fields.py index ee9dc7f2475..2401d385efa 100644 --- a/yt/fields/tests/test_xray_fields.py +++ b/yt/fields/tests/test_xray_fields.py @@ -4,31 +4,37 @@ requires_ds, can_run_ds, data_dir_load, \ ProjectionValuesTest, FieldValuesTest + def setup(): from yt.config import ytcfg ytcfg["yt","__withintesting"] = "True" + def check_xray_fields(ds_fn, fields): if not can_run_ds(ds_fn): return dso = [ None, ("sphere", ("m", (0.1, 'unitary')))] for field in fields: for dobj_name in dso: for axis in [0, 1, 2]: - yield ProjectionValuesTest(ds_fn, axis, field, + yield ProjectionValuesTest(ds_fn, axis, field, None, dobj_name) yield FieldValuesTest(ds_fn, field, dobj_name) + sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300" +d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d" + + @requires_ds(sloshing, big_data=True) def test_sloshing_apec(): ds = data_dir_load(sloshing) - fields = add_xray_emissivity_field(ds, 0.5, 7.0, table_type="apec", + fields = add_xray_emissivity_field(ds, 0.5, 7.0, table_type="apec", metallicity=0.3) for test in check_xray_fields(ds, fields): test_sloshing_apec.__name__ = test.description yield test -d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d" + @requires_ds(d9p, big_data=True) def test_d9p_cloudy(): ds = data_dir_load(d9p) @@ -36,5 +42,17 @@ def test_d9p_cloudy(): table_type="cloudy", cosmology=ds.cosmology, metallicity=("gas", "metallicity")) for test in check_xray_fields(ds, fields): - test_d9p_cloudy.__name__ = test.description + test.suffix = "current_redshift" + test_d9p_cloudy.__name__ = test.description + test.suffix + yield test + +@requires_ds(d9p, big_data=True) +def test_d9p_cloudy_local(): + ds = data_dir_load(d9p) + fields = add_xray_emissivity_field(ds, 0.5, 2.0, dist=(1.0, "Mpc"), + table_type="cloudy", + metallicity=("gas", "metallicity")) + for test in check_xray_fields(ds, fields): + test.suffix = "dist_1Mpc" + test_d9p_cloudy_local.__name__ = test.description + test.suffix yield test diff --git a/yt/fields/vector_operations.py b/yt/fields/vector_operations.py index ac681e9b539..98b2fdf0531 100644 --- a/yt/fields/vector_operations.py +++ b/yt/fields/vector_operations.py @@ -1,21 +1,9 @@ -""" -This is where we define a handful of vector operations for fields. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .derived_field import \ ValidateParameter, \ - ValidateSpatial + ValidateSpatial, \ + NeedsParameter from yt.utilities.math_utils import \ get_sph_r_component, \ @@ -25,9 +13,7 @@ get_cyl_z_component, \ get_cyl_theta_component -from yt.funcs import \ - just_one, \ - handle_mks_cgs +from yt.funcs import just_one, iterable from yt.geometry.geometry_handler import \ is_curvilinear @@ -44,12 +30,15 @@ def get_bulk(data, basename, unit): def create_magnitude_field(registry, basename, field_units, ftype="gas", slice_info=None, - validators=None, particle_type=False): + validators=None, sampling_type=None): axis_order = registry.ds.coordinates.axis_order field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + if sampling_type is None: + sampling_type = 'local' + def _magnitude(field, data): fn = field_components[0] if data.has_field_parameter('bulk_%s' % basename): @@ -63,11 +52,6 @@ def _magnitude(field, data): mag += (data[fn])**2 return np.sqrt(mag) - if particle_type: - sampling_type = 'particle' - else: - sampling_type = 'cell' - registry.add_field((ftype, "%s_magnitude" % basename), sampling_type=sampling_type, function=_magnitude, @@ -75,8 +59,7 @@ def _magnitude(field, data): validators=validators) def create_relative_field(registry, basename, field_units, ftype='gas', - slice_info=None, validators=None, - particle_type=False): + slice_info=None, validators=None): axis_order = registry.ds.coordinates.axis_order @@ -85,28 +68,54 @@ def create_relative_field(registry, basename, field_units, ftype='gas', def relative_vector(ax): def _relative_vector(field, data): iax = axis_order.index(ax) - d = handle_mks_cgs(data[field_components[iax]], field.units) - bulk = handle_mks_cgs(get_bulk(data, basename, d.unit_quantity), - field.units) + d = data[field_components[iax]] + bulk = get_bulk(data, basename, d.unit_quantity) return d - bulk[iax] return _relative_vector - if particle_type: - sampling_type = 'particle' - else: - sampling_type = 'cell' - for d in axis_order: registry.add_field((ftype, "relative_%s_%s" % (basename, d)), - sampling_type=sampling_type, + sampling_type='local', function=relative_vector(d), units=field_units, validators=validators) +def create_los_field(registry, basename, field_units, + ftype='gas', slice_info=None): + axis_order = registry.ds.coordinates.axis_order + + validators = [ValidateParameter('bulk_%s' % basename), + ValidateParameter("axis", {'axis': [0, 1, 2]})] + + field_comps = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + + def _los_field(field, data): + if data.has_field_parameter('bulk_%s' % basename): + fns = [(fc[0], "relative_%s" % fc[1]) for fc in field_comps] + else: + fns = field_comps + ax = data.get_field_parameter("axis") + if iterable(ax): + # Make sure this is a unit vector + ax /= np.sqrt(np.dot(ax, ax)) + ret = data[fns[0]]*ax[0] + \ + data[fns[1]]*ax[1] + \ + data[fns[2]]*ax[2] + elif ax in [0, 1, 2]: + ret = data[fns[ax]] + else: + raise NeedsParameter(["axis"]) + return ret + + registry.add_field((ftype, "%s_los" % basename), + sampling_type='local', + function=_los_field, + units=field_units, + validators=validators) def create_squared_field(registry, basename, field_units, ftype="gas", slice_info=None, - validators=None, particle_type=False): + validators=None): axis_order = registry.ds.coordinates.axis_order @@ -122,9 +131,9 @@ def _squared(field, data): squared += data[fn] * data[fn] return squared - registry.add_field((ftype, "%s_squared" % basename), sampling_type="cell", + registry.add_field((ftype, "%s_squared" % basename), sampling_type="local", function=_squared, units=field_units, - validators=validators, particle_type=particle_type) + validators=validators) def create_vector_fields(registry, basename, field_units, ftype="gas", slice_info=None): @@ -179,12 +188,14 @@ def _spherical_radius_component(field, data): return rv registry.add_field((ftype, "%s_spherical_radius" % basename), - sampling_type="cell", + sampling_type="local", function=_spherical_radius_component, units=field_units, validators=[ValidateParameter("normal"), ValidateParameter("center"), ValidateParameter("bulk_%s" % basename)]) + create_los_field( + registry, basename, field_units, ftype=ftype, slice_info=slice_info) def _radial(field, data): return data[ftype, "%s_spherical_radius" % basename] @@ -197,19 +208,19 @@ def _tangential(field, data): data[ftype, "%s_spherical_phi" % basename]**2.0) registry.add_field((ftype, "radial_%s" % basename), - sampling_type="cell", + sampling_type="local", function=_radial, units=field_units, validators=[ValidateParameter("normal"), ValidateParameter("center")]) registry.add_field((ftype, "radial_%s_absolute" % basename), - sampling_type="cell", + sampling_type="local", function=_radial_absolute, units=field_units) registry.add_field((ftype, "tangential_%s" % basename), - sampling_type="cell", + sampling_type="local", function=_tangential, units=field_units) @@ -227,7 +238,7 @@ def _spherical_theta_component(field, data): return get_sph_theta_component(vectors, theta, phi, normal) registry.add_field((ftype, "%s_spherical_theta" % basename), - sampling_type="cell", + sampling_type="local", function=_spherical_theta_component, units=field_units, validators=[ValidateParameter("normal"), @@ -247,7 +258,7 @@ def _spherical_phi_component(field, data): return get_sph_phi_component(vectors, phi, normal) registry.add_field((ftype, "%s_spherical_phi" % basename), - sampling_type="cell", + sampling_type="local", function=_spherical_phi_component, units=field_units, validators=[ValidateParameter("normal"), @@ -265,7 +276,7 @@ def _cp_val(field, data): for ax in 'xyz': registry.add_field((ftype, "cutting_plane_%s_%s" % (basename, ax)), - sampling_type="cell", + sampling_type="local", function=_cp_vectors(ax), units=field_units) @@ -291,14 +302,14 @@ def _divergence_abs(field, data): div_units = field_units / registry.ds.unit_system["length"] registry.add_field((ftype, "%s_divergence" % basename), - sampling_type="cell", + sampling_type="local", function=_divergence, units=div_units, validators=[ValidateSpatial(1), ValidateParameter('bulk_%s' % basename)]) registry.add_field((ftype, "%s_divergence_absolute" % basename), - sampling_type="cell", + sampling_type="local", function=_divergence_abs, units=div_units) @@ -307,7 +318,7 @@ def _tangential_over_magnitude(field, data): data[ftype, '%s_magnitude' % basename]) return np.abs(tr) registry.add_field((ftype, "tangential_over_%s_magnitude" % basename), - sampling_type="cell", + sampling_type="local", function=_tangential_over_magnitude, take_log=False) @@ -324,7 +335,7 @@ def _cylindrical_radius_component(field, data): return get_cyl_r_component(vectors, theta, normal) registry.add_field((ftype, "%s_cylindrical_radius" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_radius_component, units=field_units, validators=[ValidateParameter("normal")]) @@ -334,7 +345,7 @@ def _cylindrical_radial(field, data): return data[ftype, '%s_cylindrical_radius' % basename] registry.add_field((ftype, "cylindrical_radial_%s" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_radial, units=field_units) @@ -343,7 +354,7 @@ def _cylindrical_radial_absolute(field, data): return np.abs(data[ftype, '%s_cylindrical_radius' % basename]) registry.add_field((ftype, "cylindrical_radial_%s_absolute" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_radial_absolute, units=field_units, validators=[ValidateParameter("normal")]) @@ -362,7 +373,7 @@ def _cylindrical_theta_component(field, data): return get_cyl_theta_component(vectors, theta, normal) registry.add_field((ftype, "%s_cylindrical_theta" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_theta_component, units=field_units, validators=[ValidateParameter("normal"), @@ -371,20 +382,20 @@ def _cylindrical_theta_component(field, data): def _cylindrical_tangential(field, data): """This field is deprecated and will be removed in a future release""" - return data["%s_cylindrical_theta" % basename] + return data[ftype, "%s_cylindrical_theta" % basename] def _cylindrical_tangential_absolute(field, data): """This field is deprecated and will be removed in a future release""" - return np.abs(data['cylindrical_tangential_%s' % basename]) + return np.abs(data[ftype, 'cylindrical_tangential_%s' % basename]) registry.add_field((ftype, "cylindrical_tangential_%s" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_tangential, units=field_units) registry.add_field( (ftype, "cylindrical_tangential_%s_absolute" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_tangential_absolute, units=field_units) @@ -400,7 +411,7 @@ def _cylindrical_z_component(field, data): return get_cyl_z_component(vectors, normal) registry.add_field((ftype, "%s_cylindrical_z" % basename), - sampling_type="cell", + sampling_type="local", function=_cylindrical_z_component, units=field_units, validators=[ValidateParameter("normal"), @@ -434,7 +445,7 @@ def _cartesian_x(field,data): # it's redundant to define a cartesian x field for 1D data if registry.ds.dimensionality > 1: - registry.add_field((ftype, "%s_cartesian_x" % basename), sampling_type="cell", + registry.add_field((ftype, "%s_cartesian_x" % basename), sampling_type="local", function=_cartesian_x, units=field_units, display_field=True) @@ -464,7 +475,7 @@ def _cartesian_y(field,data): data["%s_phi" % basename] * np.cos(data["phi"])) if registry.ds.dimensionality >= 2: - registry.add_field((ftype, "%s_cartesian_y" % basename), sampling_type="cell", + registry.add_field((ftype, "%s_cartesian_y" % basename), sampling_type="local", function=_cartesian_y, units=field_units, display_field=True) @@ -478,7 +489,7 @@ def _cartesian_z(field,data): data["%s_theta" % basename] * np.sin(data["theta"])) if registry.ds.dimensionality == 3: - registry.add_field((ftype, "%s_cartesian_z" % basename), sampling_type="cell", + registry.add_field((ftype, "%s_cartesian_z" % basename), sampling_type="local", function=_cartesian_z, units=field_units, display_field=True) @@ -486,7 +497,7 @@ def _cartesian_z(field,data): def create_averaged_field(registry, basename, field_units, ftype="gas", slice_info=None, validators=None, - weight="cell_mass"): + weight="mass"): if validators is None: validators = [] diff --git a/yt/fields/xray_emission_fields.py b/yt/fields/xray_emission_fields.py index 63873163c38..37342f0de8c 100644 --- a/yt/fields/xray_emission_fields.py +++ b/yt/fields/xray_emission_fields.py @@ -1,19 +1,3 @@ -""" -Integrator classes to deal with interpolation and integration of input spectral -bins. Currently only supports Cloudy and APEC-style data. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np import os @@ -23,7 +7,6 @@ from yt.funcs import \ mylog, \ only_on_root, \ - issue_deprecation_warning, \ parse_h5_attr from yt.utilities.exceptions import YTFieldNotFound from yt.utilities.exceptions import YTException @@ -33,10 +16,11 @@ from yt.utilities.cosmology import Cosmology data_version = {"cloudy": 2, - "apec": 2} + "apec": 3} data_url = "http://yt-project.org/data" + def _get_data_file(table_type, data_dir=None): data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type]) if data_dir is None: @@ -45,11 +29,12 @@ def _get_data_file(table_type, data_dir=None): data_path = os.path.join(data_dir, data_file) if not os.path.exists(data_path): msg = "Failed to find emissivity data file %s! " % data_file + \ - "Please download from http://yt-project.org/data!" + "Please download from %s!" % data_url mylog.error(msg) raise IOError(msg) return data_path + class EnergyBoundsException(YTException): def __init__(self, lower, upper): self.lower = lower @@ -59,6 +44,7 @@ def __str__(self): return "Energy bounds are %e to %e keV." % \ (self.lower, self.upper) + class ObsoleteDataException(YTException): def __init__(self, table_type): data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type]) @@ -68,6 +54,7 @@ def __init__(self, table_type): def __str__(self): return self.msg + class XrayEmissivityIntegrator(object): r"""Class for making X-ray emissivity fields. Uses hdf5 data tables generated from Cloudy and AtomDB/APEC. @@ -149,10 +136,11 @@ def get_interpolator(self, data_type, e_min, e_max, energy=True): return emiss + def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0, - metallicity=("gas", "metallicity"), + metallicity=("gas", "metallicity"), table_type="cloudy", data_dir=None, - cosmology=None, **kwargs): + cosmology=None, dist=None, ftype="gas"): r"""Create X-ray emissivity fields for a given energy range. Parameters @@ -179,35 +167,35 @@ def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0, If set and redshift > 0.0, this cosmology will be used when computing the cosmological dependence of the emission fields. If not set, yt's default LCDM cosmology will be used. + dist : (value, unit) tuple or :class:`~yt.units.yt_array.YTQuantity`, optional + The distance to the source, used for making intensity fields. You should + only use this if your source is nearby (not cosmological). Default: None + ftype : string, optional + The field type to use when creating the fields, default "gas" - This will create three fields: + This will create at least three fields: "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3) "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1) "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3) + and if a redshift or distance is specified it will create two others: + + "xray_intensity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3 arcsec^-2) + "xray_photon_intensity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3 arcsec^-2) + + These latter two are really only useful when making projections. + Examples -------- >>> import yt >>> ds = yt.load("sloshing_nomag2_hdf5_plt_cnt_0100") >>> yt.add_xray_emissivity_field(ds, 0.5, 2) - >>> p = yt.ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV") + >>> p = yt.ProjectionPlot(ds, 'x', ("gas","xray_emissivity_0.5_2_keV"), + ... table_type='apec') >>> p.save() """ - # The next several if constructs are for backwards-compatibility - if "constant_metallicity" in kwargs: - issue_deprecation_warning("The \"constant_metallicity\" parameter is deprecated. Set " - "the \"metallicity\" parameter to a constant float value instead.") - metallicity = kwargs["constant_metallicity"] - if "with_metals" in kwargs: - issue_deprecation_warning("The \"with_metals\" parameter is deprecated. Use the " - "\"metallicity\" parameter to choose a constant or " - "spatially varying metallicity.") - if kwargs["with_metals"] and isinstance(metallicity, float): - raise RuntimeError("\"with_metals=True\", but you specified a constant metallicity!") - if not kwargs["with_metals"] and not isinstance(metallicity, float): - raise RuntimeError("\"with_metals=False\", but you didn't specify a constant metallicity!") if not isinstance(metallicity, float) and metallicity is not None: try: metallicity = ds._get_field_info(*metallicity) @@ -215,7 +203,20 @@ def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0, raise RuntimeError("Your dataset does not have a {} field! ".format(metallicity) + "Perhaps you should specify a constant metallicity instead?") - my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, redshift=redshift) + if table_type == "cloudy": + # Cloudy wants to scale by nH**2 + other_n = "H_nuclei_density" + else: + # APEC wants to scale by nH*ne + other_n = "El_number_density" + + def _norm_field(field, data): + return data[ftype, "H_nuclei_density"]*data[ftype, other_n] + ds.add_field((ftype, "norm_field"), _norm_field, units="cm**-6", + sampling_type='local') + + my_si = XrayEmissivityIntegrator(table_type, data_dir=data_dir, + redshift=redshift) em_0 = my_si.get_interpolator("primordial", e_min, e_max) emp_0 = my_si.get_interpolator("primordial", e_min, e_max, energy=False) @@ -225,87 +226,99 @@ def add_xray_emissivity_field(ds, e_min, e_max, redshift=0.0, def _emissivity_field(field, data): with np.errstate(all='ignore'): - dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]), - "log_T": np.log10(data["gas", "temperature"])} + dd = {"log_nH": np.log10(data[ftype, "H_nuclei_density"]), + "log_T": np.log10(data[ftype, "temperature"])} my_emissivity = np.power(10, em_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): - my_Z = data[metallicity.name] + my_Z = data[metallicity.name].to("Zsun") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, em_Z(dd)) my_emissivity[np.isnan(my_emissivity)] = 0 - return data["gas","H_nuclei_density"]**2 * \ + return data[ftype, "norm_field"] * \ YTArray(my_emissivity, "erg*cm**3/s") - emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max) - ds.add_field(("gas", emiss_name), function=_emissivity_field, + emiss_name = (ftype, "xray_emissivity_%s_%s_keV" % (e_min, e_max)) + ds.add_field(emiss_name, function=_emissivity_field, display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max), - sampling_type="cell", units="erg/cm**3/s") + sampling_type="local", units="erg/cm**3/s") def _luminosity_field(field, data): - return data[emiss_name] * data["cell_volume"] + return data[emiss_name]*data[ftype, "mass"]/data[ftype, "density"] - lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max) - ds.add_field(("gas", lum_name), function=_luminosity_field, + lum_name = (ftype, "xray_luminosity_%s_%s_keV" % (e_min, e_max)) + ds.add_field(lum_name, function=_luminosity_field, display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max), - sampling_type="cell", units="erg/s") + sampling_type="local", units="erg/s") def _photon_emissivity_field(field, data): - dd = {"log_nH": np.log10(data["gas", "H_nuclei_density"]), - "log_T": np.log10(data["gas", "temperature"])} + dd = {"log_nH": np.log10(data[ftype, "H_nuclei_density"]), + "log_T": np.log10(data[ftype, "temperature"])} my_emissivity = np.power(10, emp_0(dd)) if metallicity is not None: if isinstance(metallicity, DerivedField): - my_Z = data[metallicity.name] + my_Z = data[metallicity.name].to("Zsun") else: my_Z = metallicity my_emissivity += my_Z * np.power(10, emp_Z(dd)) - return data["gas", "H_nuclei_density"]**2 * \ + return data[ftype, "norm_field"] * \ YTArray(my_emissivity, "photons*cm**3/s") - phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max) - ds.add_field(("gas", phot_name), function=_photon_emissivity_field, + phot_name = (ftype, "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)) + ds.add_field(phot_name, function=_photon_emissivity_field, display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max), - sampling_type="cell", units="photons/cm**3/s") + sampling_type="local", units="photons/cm**3/s") fields = [emiss_name, lum_name, phot_name] - if redshift > 0.0: - - if cosmology is None: - if hasattr(ds, "cosmology"): - cosmology = ds.cosmology + if redshift > 0.0 or dist is not None: + + if dist is None: + if cosmology is None: + if hasattr(ds, "cosmology"): + cosmology = ds.cosmology + else: + cosmology = Cosmology() + D_L = cosmology.luminosity_distance(0.0, redshift) + angular_scale = 1.0/cosmology.angular_scale(0.0, redshift) + dist_fac = ds.quan(1.0/(4.0*np.pi*D_L*D_L*angular_scale*angular_scale).v, "rad**-2") + else: + redshift = 0.0 # Only for local sources! + if not isinstance(dist, YTQuantity): + try: + dist = ds.quan(dist[0], dist[1]) + except TypeError: + raise RuntimeError("Please specifiy 'dist' as a YTQuantity " + "or a (value, unit) tuple!") else: - cosmology = Cosmology() - - D_L = cosmology.luminosity_distance(0.0, redshift) - angular_scale = 1.0/cosmology.angular_scale(0.0, redshift) - dist_fac = 1.0/(4.0*np.pi*D_L*D_L*angular_scale*angular_scale) + dist = ds.quan(dist.value, dist.units) + angular_scale = dist/ds.quan(1.0, "radian") + dist_fac = ds.quan(1.0/(4.0*np.pi*dist*dist*angular_scale*angular_scale).v, "rad**-2") - ei_name = "xray_intensity_%s_%s_keV" % (e_min, e_max) + ei_name = (ftype, "xray_intensity_%s_%s_keV" % (e_min, e_max)) def _intensity_field(field, data): I = dist_fac*data[emiss_name] return I.in_units("erg/cm**3/s/arcsec**2") - ds.add_field(("gas", ei_name), function=_intensity_field, + ds.add_field(ei_name, function=_intensity_field, display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max), - sampling_type="cell", units="erg/cm**3/s/arcsec**2") + sampling_type="local", units="erg/cm**3/s/arcsec**2") - i_name = "xray_photon_intensity_%s_%s_keV" % (e_min, e_max) + i_name = (ftype, "xray_photon_intensity_%s_%s_keV" % (e_min, e_max)) def _photon_intensity_field(field, data): I = (1.0+redshift)*dist_fac*data[phot_name] return I.in_units("photons/cm**3/s/arcsec**2") - ds.add_field(("gas", i_name), function=_photon_intensity_field, + ds.add_field(i_name, function=_photon_intensity_field, display_name=r"I_{X} (%s-%s keV)" % (e_min, e_max), - sampling_type="cell", units="photons/cm**3/s/arcsec**2") + sampling_type="local", units="photons/cm**3/s/arcsec**2") fields += [ei_name, i_name] - [mylog.info("Adding %s field." % field) for field in fields] + [mylog.info("Adding ('%s','%s') field." % field) for field in fields] return fields diff --git a/yt/frontends/_skeleton/__init__.py b/yt/frontends/_skeleton/__init__.py index eed8e082592..c8a618b340d 100644 --- a/yt/frontends/_skeleton/__init__.py +++ b/yt/frontends/_skeleton/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/_skeleton/api.py b/yt/frontends/_skeleton/api.py index 25cd50ed230..fb890486926 100644 --- a/yt/frontends/_skeleton/api.py +++ b/yt/frontends/_skeleton/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends._skeleton - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ SkeletonGrid, \ SkeletonHierarchy, \ diff --git a/yt/frontends/_skeleton/data_structures.py b/yt/frontends/_skeleton/data_structures.py index 7aa7d81ecb5..a01de813529 100644 --- a/yt/frontends/_skeleton/data_structures.py +++ b/yt/frontends/_skeleton/data_structures.py @@ -1,18 +1,3 @@ -""" -Skeleton data structures - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import numpy as np import weakref diff --git a/yt/frontends/_skeleton/fields.py b/yt/frontends/_skeleton/fields.py index b466b93889a..684bebc0dd6 100644 --- a/yt/frontends/_skeleton/fields.py +++ b/yt/frontends/_skeleton/fields.py @@ -1,18 +1,3 @@ -""" -Skeleton-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/_skeleton/io.py b/yt/frontends/_skeleton/io.py index c0fdd2136b0..22a91c6b813 100644 --- a/yt/frontends/_skeleton/io.py +++ b/yt/frontends/_skeleton/io.py @@ -1,18 +1,3 @@ -""" -Skeleton-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.io_handler import \ BaseIOHandler diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index a39c1d4243d..3f29649737f 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -50,7 +50,7 @@ class AdaptaHOPDataset(Dataset): _field_info_class = AdaptaHOPFieldInfo # AdaptaHOP internally assumes 1Mpc == 3.0824cm - _code_length_to_Mpc = Mpc.to('cm').value / 3.08e24 + _code_length_to_Mpc = (1.0 * Mpc).to('cm').value / 3.08e24 def __init__(self, filename, dataset_type="adaptahop_binary", n_ref = 16, over_refine_factor = 1, @@ -299,4 +299,4 @@ def _set_halo_properties(self): for attr_name in ('mass', 'position', 'velocity'): setattr(self, attr_name, ds.r['halos', 'particle_%s' % attr_name][ihalo]) # Add members - self.member_ids = self.halo_ds.index.io.members(ihalo).astype(np.int64) \ No newline at end of file + self.member_ids = self.halo_ds.index.io.members(ihalo).astype(np.int64) diff --git a/yt/frontends/ahf/__init__.py b/yt/frontends/ahf/__init__.py index 56ae88e1e13..45cf0d77321 100644 --- a/yt/frontends/ahf/__init__.py +++ b/yt/frontends/ahf/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/ahf/api.py b/yt/frontends/ahf/api.py index 3069b9115f2..2326c273921 100644 --- a/yt/frontends/ahf/api.py +++ b/yt/frontends/ahf/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.ahf - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ AHFHalosDataset diff --git a/yt/frontends/ahf/data_structures.py b/yt/frontends/ahf/data_structures.py index ac5b591f9bb..6ec37ef25bd 100644 --- a/yt/frontends/ahf/data_structures.py +++ b/yt/frontends/ahf/data_structures.py @@ -1,18 +1,3 @@ -""" -AHF data structures - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import glob import os import stat @@ -34,7 +19,7 @@ class AHFHalosFile(HaloCatalogFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range=None): root, _ = os.path.splitext(filename) candidates = glob.glob(root + '*.AHF_halos') if len(candidates) == 1: @@ -42,7 +27,7 @@ def __init__(self, ds, io, filename, file_id): else: raise ValueError('Too many AHF_halos files.') self.col_names = self._read_column_names(filename) - super(AHFHalosFile, self).__init__(ds, io, filename, file_id) + super(AHFHalosFile, self).__init__(ds, io, filename, file_id, range) def read_data(self, usecols=None): return np.genfromtxt(self.filename, names=self.col_names, diff --git a/yt/frontends/ahf/fields.py b/yt/frontends/ahf/fields.py index 17c12f0d2ed..7277f140586 100644 --- a/yt/frontends/ahf/fields.py +++ b/yt/frontends/ahf/fields.py @@ -1,18 +1,3 @@ -''' -AHF-specific fields - - - -''' - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/ahf/io.py b/yt/frontends/ahf/io.py index 1725b248162..098ad52f76f 100644 --- a/yt/frontends/ahf/io.py +++ b/yt/frontends/ahf/io.py @@ -1,18 +1,3 @@ -""" -AHF-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from operator import attrgetter import numpy as np @@ -43,6 +28,13 @@ def _read_particle_coords(self, chunks, ptf): x, y, z = (pos[:, i] for i in range(3)) yield 'halos', (x, y, z) + def _yield_coordinates(self, data_file): + halos = data_file.read_data(usecols=['Xc', 'Yc', 'Zc']) + x = halos['Xc'].astype('float64') + y = halos['Yc'].astype('float64') + z = halos['Zc'].astype('float64') + yield 'halos', np.asarray((x, y, z)).T + def _read_particle_fields(self, chunks, ptf, selector): # This gets called after the arrays have been allocated. It needs to # yield ((ptype, field), data) where data is the masked results of @@ -50,6 +42,7 @@ def _read_particle_fields(self, chunks, ptf, selector): # Selector objects have a .select_points(x,y,z) that returns a mask, so # you need to do your masking here. for data_file in self._get_data_files(chunks, ptf): + si, ei = data_file.start, data_file.end cols = [] for field_list in ptf.values(): cols.extend(field_list) @@ -63,7 +56,7 @@ def _read_particle_fields(self, chunks, ptf, selector): if mask is None: continue for ptype, field_list in sorted(ptf.items()): for field in field_list: - data = halos[field][mask].astype('float64') + data = halos[field][si:ei][mask].astype('float64') yield (ptype, field), data def _initialize_index(self, data_file, regions): @@ -90,7 +83,11 @@ def _initialize_index(self, data_file, regions): def _count_particles(self, data_file): halos = data_file.read_data(usecols=['ID']) - return {'halos': len(halos['ID'])} + nhalos = len(halos['ID']) + si, ei = data_file.start, data_file.end + if None not in (si, ei): + nhalos = np.clip(nhalos - si, 0, ei - si) + return {'halos': nhalos} def _identify_fields(self, data_file): fields = [('halos', f) for f in data_file.col_names] diff --git a/yt/frontends/ahf/tests/test_outputs.py b/yt/frontends/ahf/tests/test_outputs.py index 972d6d8f7ba..7d3cdee61e5 100644 --- a/yt/frontends/ahf/tests/test_outputs.py +++ b/yt/frontends/ahf/tests/test_outputs.py @@ -1,22 +1,8 @@ -""" -AHF frontend tests using ahf_halos dataset - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os.path from yt.testing import \ assert_equal, \ - requires_file + requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ FieldValuesTest, \ requires_ds, \ @@ -43,4 +29,9 @@ def test_fields_ahf_halos(): @requires_file(ahf_halos) def test_AHFHalosDataset(): - assert isinstance(load(ahf_halos), AHFHalosDataset) + ds = load(ahf_halos) + assert isinstance(ds, AHFHalosDataset) + ad = ds.all_data() + ad['particle_mass'] + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/amrvac/__init__.py b/yt/frontends/amrvac/__init__.py index 08e25504338..1c69b8b9233 100644 --- a/yt/frontends/amrvac/__init__.py +++ b/yt/frontends/amrvac/__init__.py @@ -14,7 +14,6 @@ #----------------------------------------------------------------------------- from yt.utilities.on_demand_imports import _f90nml as f90nml -from yt.extern.six import string_types def read_amrvac_namelist(parfiles): """Read one or more parfiles, and return a unified f90nml.Namelist object. @@ -35,9 +34,9 @@ def read_amrvac_namelist(parfiles): """ # typechecking - if isinstance(parfiles, string_types): + if isinstance(parfiles, str): parfiles = [parfiles] - assert all([isinstance(pf, string_types) for pf in parfiles]) + assert all([isinstance(pf, str) for pf in parfiles]) # first merge the namelists namelists = [f90nml.read(parfile) for parfile in parfiles] diff --git a/yt/frontends/api.py b/yt/frontends/api.py index 38b4c069f88..46d8967b5b2 100644 --- a/yt/frontends/api.py +++ b/yt/frontends/api.py @@ -1,26 +1,11 @@ -""" -API for yt.frontends - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import sys, types, os, glob, time, importlib -from yt.extern.six.moves import cPickle as pickle _frontends = [ 'adaptahop', 'ahf', 'amrvac', 'art', + 'arepo', 'artio', 'athena', 'athena_pp', @@ -47,6 +32,7 @@ 'rockstar', 'sdf', 'stream', + 'swift', 'tipsy', 'ytdata', ] diff --git a/yt/analysis_modules/absorption_spectrum/tests/__init__.py b/yt/frontends/arepo/__init__.py similarity index 100% rename from yt/analysis_modules/absorption_spectrum/tests/__init__.py rename to yt/frontends/arepo/__init__.py diff --git a/yt/frontends/arepo/api.py b/yt/frontends/arepo/api.py new file mode 100644 index 00000000000..fdd4405e77f --- /dev/null +++ b/yt/frontends/arepo/api.py @@ -0,0 +1,8 @@ +from .data_structures import \ + ArepoHDF5Dataset, \ + ArepoFieldInfo + +from .io import \ + IOHandlerArepoHDF5 + +from . import tests diff --git a/yt/frontends/arepo/data_structures.py b/yt/frontends/arepo/data_structures.py new file mode 100644 index 00000000000..a9ea881a853 --- /dev/null +++ b/yt/frontends/arepo/data_structures.py @@ -0,0 +1,75 @@ +from yt.frontends.gadget.api import GadgetHDF5Dataset +from yt.funcs import mylog +from yt.utilities.on_demand_imports import _h5py as h5py + +from .fields import \ + ArepoFieldInfo + +import numpy as np + + +class ArepoHDF5Dataset(GadgetHDF5Dataset): + _field_info_class = ArepoFieldInfo + + def __init__(self, filename, dataset_type="arepo_hdf5", + unit_base=None, + smoothing_factor=2.0, + index_order=None, + index_filename=None, + kernel_name=None, + bounding_box=None, + units_override=None, + unit_system="cgs"): + super(ArepoHDF5Dataset, self).__init__( + filename, dataset_type=dataset_type, unit_base=unit_base, + index_order=index_order, index_filename=index_filename, + kernel_name=kernel_name, bounding_box=bounding_box, + units_override=units_override, unit_system=unit_system) + # The "smoothing_factor" is a user-configurable parameter which + # is multiplied by the radius of the sphere with a volume equal + # to that of the Voronoi cell to create smoothing lengths. + self.smoothing_factor = smoothing_factor + self.gamma = 5./3. + + @classmethod + def _is_valid(self, *args, **kwargs): + need_groups = ['Header', 'Config'] + veto_groups = ['FOF', 'Group', 'Subhalo'] + valid = True + try: + fh = h5py.File(args[0], mode='r') + valid = all(ng in fh["/"] for ng in need_groups) and \ + not any(vg in fh["/"] for vg in veto_groups) and \ + ("VORONOI" in fh["/Config"].attrs.keys() or + "AMR" in fh["/Config"].attrs.keys()) + fh.close() + except: + valid = False + pass + return valid + + def _get_uvals(self): + handle = h5py.File(self.parameter_filename, mode="r") + uvals = {} + missing = False + for unit in ["UnitLength_in_cm", "UnitMass_in_g", + "UnitVelocity_in_cm_per_s"]: + if unit in handle["/Header"].attrs: + uvals[unit] = handle["/Header"].attrs[unit] + else: + mylog.warning("Arepo header is missing %s!" % unit) + missing = True + handle.close() + if missing: + uvals = None + return uvals + + def _set_code_unit_attributes(self): + self._unit_base = self._get_uvals() + super(ArepoHDF5Dataset, self)._set_code_unit_attributes() + munit = np.sqrt(self.mass_unit / + (self.time_unit**2 * self.length_unit)).to("gauss") + if self.cosmological_simulation: + self.magnetic_unit = self.quan(munit.value, "%s/a**2" % munit.units) + else: + self.magnetic_unit = munit diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py new file mode 100644 index 00000000000..fd43b69f57b --- /dev/null +++ b/yt/frontends/arepo/fields.py @@ -0,0 +1,107 @@ +from yt.frontends.gadget.api import GadgetFieldInfo +from yt.fields.magnetic_field import \ + setup_magnetic_field_aliases +from yt.fields.species_fields import \ + add_species_field_by_fraction, \ + setup_species_fields +from yt.fields.field_info_container import \ + FieldInfoContainer + +metal_elements = ["He", "C", "N", "O", "Ne", + "Mg", "Si", "Fe"] + + +class ArepoFieldInfo(GadgetFieldInfo): + known_particle_fields = GadgetFieldInfo.known_particle_fields + \ + (("smoothing_length", ("code_length", [], None)), + ("MagneticField", + ("code_magnetic", ["particle_magnetic_field"], None)), + ("MagneticFieldDivergence", + ("code_magnetic/code_length", ["magnetic_field_divergence"], None)), + ("GFM_Metallicity", ("", ["metallicity"], None)), + ("GFM_Metals_00", ("", ["H_fraction"], None)), + ("GFM_Metals_01", ("", ["He_fraction"], None)), + ("GFM_Metals_02", ("", ["C_fraction"], None)), + ("GFM_Metals_03", ("", ["N_fraction"], None)), + ("GFM_Metals_04", ("", ["O_fraction"], None)), + ("GFM_Metals_05", ("", ["Ne_fraction"], None)), + ("GFM_Metals_06", ("", ["Mg_fraction"], None)), + ("GFM_Metals_07", ("", ["Si_fraction"], None)), + ("GFM_Metals_08", ("", ["Fe_fraction"], None)), + ) + + def __init__(self, ds, field_list, slice_info=None): + if ds.cosmological_simulation: + GFM_SFT_units = "dimensionless" + else: + GFM_SFT_units = "code_length/code_velocity" + self.known_particle_fields += (("GFM_StellarFormationTime", (GFM_SFT_units, ["stellar_age"], None)), ) + super(ArepoFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) + + def setup_particle_fields(self, ptype): + FieldInfoContainer.setup_particle_fields(self, ptype) + if ptype == "PartType0": + self.setup_gas_particle_fields(ptype) + setup_species_fields(self, ptype) + + def setup_gas_particle_fields(self, ptype): + super(ArepoFieldInfo, self).setup_gas_particle_fields(ptype) + + if (ptype, 'InternalEnergy') in self.field_list: + def _pressure(field, data): + return (data.ds.gamma-1.0)*data[ptype, "density"] * \ + data[ptype, "InternalEnergy"] + self.add_field((ptype, "pressure"), function=_pressure, + sampling_type="particle", + units=self.ds.unit_system['pressure']) + + if (ptype, "GFM_Metals_00") in self.field_list: + self.nuclei_names = metal_elements + self.species_names = ["H"] + if (ptype, "NeutralHydrogenAbundance") in self.field_list: + self.species_names += ["H_p0", "H_p1"] + self.species_names += metal_elements + + if (ptype, "MagneticField") in self.field_list: + setup_magnetic_field_aliases( + self, ptype, "MagneticField" + ) + + if (ptype, "NeutralHydrogenAbundance") in self.field_list: + def _h_p0_fraction(field, data): + return data[ptype, "GFM_Metals_00"] * \ + data[ptype, "NeutralHydrogenAbundance"] + + self.add_field((ptype, "H_p0_fraction"), + sampling_type="particle", + function=_h_p0_fraction, + units="") + + def _h_p1_fraction(field, data): + return data[ptype, "GFM_Metals_00"] * \ + (1.0-data[ptype, "NeutralHydrogenAbundance"]) + + self.add_field((ptype, "H_p1_fraction"), + sampling_type="particle", + function=_h_p1_fraction, + units="") + + add_species_field_by_fraction(self, ptype, "H_p0") + add_species_field_by_fraction(self, ptype, "H_p1") + + for species in ['H', 'H_p0', 'H_p1']: + for suf in ["_density", "_number_density"]: + field = "%s%s" % (species, suf) + self.alias(("gas", field), (ptype, field)) + + self.alias(("gas", "H_nuclei_density"), ("gas", "H_number_density")) + + if (ptype, "ElectronAbundance") in self.field_list: + def _el_number_density(field, data): + return data[ptype, "ElectronAbundance"] * \ + data[ptype, "H_number_density"] + self.add_field((ptype, "El_number_density"), + sampling_type="particle", + function=_el_number_density, + units=self.ds.unit_system["number_density"]) + self.alias(("gas", "El_number_density"), (ptype, "El_number_density")) diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py new file mode 100644 index 00000000000..b0af0f90b83 --- /dev/null +++ b/yt/frontends/arepo/io.py @@ -0,0 +1,33 @@ +from yt.frontends.gadget.api import IOHandlerGadgetHDF5 +import numpy as np +from yt.utilities.on_demand_imports import _h5py as h5py + +class IOHandlerArepoHDF5(IOHandlerGadgetHDF5): + _dataset_type = "arepo_hdf5" + + def _get_smoothing_length(self, data_file, position_dtype, position_shape): + ptype = self.ds._sph_ptypes[0] + ind = int(ptype[-1]) + si, ei = data_file.start, data_file.end + with h5py.File(data_file.filename, "r") as f: + pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") + pcount = np.clip(pcount - si, 0, ei - si) + # Arepo cells do not have "smoothing lengths" by definition, so + # we compute one here by finding the radius of the sphere + # corresponding to the volume of the Voroni cell and multiplying + # by a user-configurable smoothing factor. + hsml = f[ptype]["Masses"][si:ei,...]/f[ptype]["Density"][si:ei,...] + hsml *= 3.0/(4.0*np.pi) + hsml **= (1./3.) + hsml *= self.ds.smoothing_factor + dt = hsml.dtype.newbyteorder("N") # Native + if position_dtype is not None and dt < position_dtype: + dt = position_dtype + return hsml.astype(dt) + + def _identify_fields(self, data_file): + fields, _units = super(IOHandlerArepoHDF5, + self)._identify_fields(data_file) + fields.append(("PartType0", "smoothing_length")) + return fields, _units + diff --git a/yt/analysis_modules/cosmological_observation/light_cone/tests/__init__.py b/yt/frontends/arepo/tests/__init__.py similarity index 100% rename from yt/analysis_modules/cosmological_observation/light_cone/tests/__init__.py rename to yt/frontends/arepo/tests/__init__.py diff --git a/yt/frontends/arepo/tests/test_outputs.py b/yt/frontends/arepo/tests/test_outputs.py new file mode 100644 index 00000000000..0c1e36dc705 --- /dev/null +++ b/yt/frontends/arepo/tests/test_outputs.py @@ -0,0 +1,90 @@ +import os +import tempfile +from collections import OrderedDict +from yt.testing import requires_file, ParticleSelectionComparison +from yt.utilities.answer_testing.framework import \ + data_dir_load, \ + requires_ds, \ + sph_answer +from yt.frontends.arepo.api import ArepoHDF5Dataset + +bullet_h5 = "ArepoBullet/snapshot_150.hdf5" +tng59_h5 = "TNGHalo/halo_59.hdf5" +_tng59_bbox = [[45135.0, 51343.0], [51844.0, 56184.0], [60555.0, 63451.0]] + + +@requires_file(bullet_h5) +def test_arepo_hdf5(): + assert isinstance(data_dir_load(bullet_h5), + ArepoHDF5Dataset) + +@requires_file(bullet_h5) +def test_arepo_hdf5_selection(): + ds = data_dir_load(bullet_h5) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() + + +bullet_fields = OrderedDict( + [ + (("gas", "density"), None), + (("gas", "temperature"), None), + (("gas", "temperature"), ('gas', 'density')), + (('gas', 'velocity_magnitude'), None) + ] +) + + +@requires_ds(bullet_h5) +def test_arepo_bullet(): + ds = data_dir_load(bullet_h5) + for test in sph_answer(ds, 'snapshot_150', 26529600, + bullet_fields): + test_arepo_bullet.__name__ = test.description + yield test + + +@requires_file(tng59_h5) +def test_tng_hdf5(): + assert isinstance(data_dir_load(tng59_h5), + ArepoHDF5Dataset) + +tng59_fields = OrderedDict( + [ + (("gas", "density"), None), + (("gas", "temperature"), None), + (("gas", "temperature"), ('gas', 'density')), + (("gas", "H_number_density"), None), + (("gas", "H_p0_number_density"), None), + (("gas", "H_p1_number_density"), None), + (("gas", "El_number_density"), None), + (("gas", "C_number_density"), None), + (('gas', 'velocity_magnitude'), None), + (('gas', 'magnetic_field_strength'), None) + ] +) + +@requires_ds(tng59_h5) +def test_arepo_tng59(): + ds = data_dir_load(tng59_h5, kwargs = {'bounding_box': _tng59_bbox}) + for test in sph_answer(ds, 'halo_59', 10107142, + tng59_fields): + test_arepo_tng59.__name__ = test.description + yield test + +@requires_ds(tng59_h5) +def test_index_override(): + # This tests that we can supply an index_filename, and that when we do, it + # doesn't get written if our bounding_box is overwritten. + tmpfd, tmpname = tempfile.mkstemp(suffix=".ewah") + os.close(tmpfd) + ds = data_dir_load(tng59_h5, kwargs = {'index_filename': tmpname, + 'bounding_box': _tng59_bbox}) + ds.index + assert len(open(tmpname, "r").read()) == 0 + +@requires_file(tng59_h5) +def test_arepo_tng59_selection(): + ds = data_dir_load(tng59_h5) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/art/__init__.py b/yt/frontends/art/__init__.py index 71b1f7d35fa..8481d7b4d5f 100644 --- a/yt/frontends/art/__init__.py +++ b/yt/frontends/art/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/art/api.py b/yt/frontends/art/api.py index a828053aca2..fe112a4e86e 100644 --- a/yt/frontends/art/api.py +++ b/yt/frontends/art/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.art - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ ARTDomainFile,\ ARTDomainSubset,\ diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index 4e743ec9185..6ce45b693cc 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -1,14 +1,3 @@ -""" -ART-specific data structures -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import glob import numpy as np import os @@ -798,11 +787,11 @@ def _count_art_octs(self, f, offset, MinLev, MaxLevelNow): level_oct_offsets.append(f.tell()) # Get the info for this level, skip the rest - # print "Reading oct tree data for level", Lev - # print 'offset:',f.tell() + # print("Reading oct tree data for level", Lev) + # print('offset:',f.tell()) Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, 'i', '>') - # print 'Level %i : '%Lev, iNOLL - # print 'offset after level record:',f.tell() + # print('Level %i : '%Lev, iNOLL) + # print('offset after level record:',f.tell()) nLevel = iNOLL[Lev] ntot = ntot + nLevel diff --git a/yt/frontends/art/definitions.py b/yt/frontends/art/definitions.py index 363827aaa55..ca1dc870746 100644 --- a/yt/frontends/art/definitions.py +++ b/yt/frontends/art/definitions.py @@ -1,18 +1,3 @@ -""" -Definitions specific to ART - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - # If not otherwise specified, we are big endian endian = '>' diff --git a/yt/frontends/art/fields.py b/yt/frontends/art/fields.py index 70e8d1b6a64..19712ab9493 100644 --- a/yt/frontends/art/fields.py +++ b/yt/frontends/art/fields.py @@ -1,21 +1,5 @@ -""" -ART-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer -from yt.utilities.physical_constants import mh b_units = "code_magnetic" ra_units = "code_length / code_time**2" @@ -66,7 +50,8 @@ def _temperature(field, data): tr *= (data.ds.parameters['gamma'] - 1.) tr /= data.ds.parameters['aexpn']**2 return tr * data['art', 'GasEnergy'] / data['art', 'Density'] - self.add_field(('gas', 'temperature'), sampling_type="cell", + self.add_field(('gas', 'temperature'), + sampling_type="cell", function=_temperature, units=unit_system["temperature"]) @@ -76,7 +61,8 @@ def velocity(field, data): data[('gas','density')]) return velocity for ax in 'xyz': - self.add_field(('gas','velocity_%s' % ax), sampling_type="cell", + self.add_field(('gas','velocity_%s' % ax), + sampling_type="cell", function = _get_vel(ax), units=unit_system["velocity"]) @@ -86,7 +72,8 @@ def _momentum_magnitude(field, data): data['gas','momentum_z']**2)**0.5 tr *= data['index','cell_volume'].in_units('cm**3') return tr - self.add_field(('gas', 'momentum_magnitude'), sampling_type="cell", + self.add_field(('gas', 'momentum_magnitude'), + sampling_type="cell", function=_momentum_magnitude, units=unit_system["momentum"]) @@ -94,7 +81,8 @@ def _velocity_magnitude(field, data): tr = data['gas','momentum_magnitude'] tr /= data['gas','cell_mass'] return tr - self.add_field(('gas', 'velocity_magnitude'), sampling_type="cell", + self.add_field(('gas', 'velocity_magnitude'), + sampling_type="cell", function=_velocity_magnitude, units=unit_system["velocity"]) @@ -102,7 +90,8 @@ def _metal_density(field, data): tr = data['gas','metal_ia_density'] tr += data['gas','metal_ii_density'] return tr - self.add_field(('gas','metal_density'), sampling_type="cell", + self.add_field(('gas','metal_density'), + sampling_type="cell", function=_metal_density, units=unit_system["density"]) @@ -110,7 +99,8 @@ def _metal_mass_fraction(field, data): tr = data['gas','metal_density'] tr /= data['gas','density'] return tr - self.add_field(('gas', 'metal_mass_fraction'), sampling_type="cell", + self.add_field(('gas', 'metal_mass_fraction'), + sampling_type="cell", function=_metal_mass_fraction, units='') @@ -118,7 +108,8 @@ def _H_mass_fraction(field, data): tr = (1. - data.ds.parameters['Y_p'] - data['gas', 'metal_mass_fraction']) return tr - self.add_field(('gas', 'H_mass_fraction'), sampling_type="cell", + self.add_field(('gas', 'H_mass_fraction'), + sampling_type="cell", function=_H_mass_fraction, units='') @@ -126,60 +117,50 @@ def _metallicity(field, data): tr = data['gas','metal_mass_fraction'] tr /= data['gas','H_mass_fraction'] return tr - self.add_field(('gas', 'metallicity'), sampling_type="cell", + self.add_field(('gas', 'metallicity'), + sampling_type="cell", function=_metallicity, units='') atoms = ['C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', \ 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', \ 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn'] - for atom in atoms: + def _specific_metal_density_function(atom): def _specific_metal_density(field, data): - nucleus_densityIa = data[('gas','metal_ia_density')].in_units("g / cm**3")*\ - data.ds.quan(SNIa_abundance[atom],"1 / g")*atomic_mass[atom]*mh - nucleus_densityII = data[('gas','metal_ii_density')].in_units("g / cm**3")*\ - data.ds.quan(SNIa_abundance[atom],"1 / g")*atomic_mass[atom]*mh + nucleus_densityIa = data['gas','metal_ia_density']*\ + SNIa_abundance[atom] + nucleus_densityII = data['gas','metal_ii_density']*\ + SNII_abundance[atom] return nucleus_densityIa + nucleus_densityII - self.add_field(('gas','%s_nuclei_mass_density'%atom),sampling_type="cell", - function=_specific_metal_density, + return _specific_metal_density + for atom in atoms: + self.add_field(('gas','%s_nuclei_mass_density'%atom), + sampling_type="cell", + function=_specific_metal_density_function(atom), units=unit_system["density"]) - # based on Iwamoto et al 1999 -# number of atoms per gram of SNIa metal +# mass fraction of each atom in SNIa metal SNIa_abundance = { - 'H' : 0.00E+00, 'He' : 0.00E+00, 'C' : 1.75E+21, - 'N' : 3.61E+16, 'O' : 3.90E+21, 'F' : 1.30E+13, - 'Ne' : 9.76E+19, 'Na' : 1.20E+18, 'Mg' : 1.54E+20, - 'Al' : 1.59E+19, 'Si' : 2.43E+21, 'P' : 5.02E+18, - 'S' : 1.18E+21, 'Cl' : 2.14E+18, 'Ar' : 1.71E+20, - 'K' : 8.74E+17, 'Ca' : 1.30E+20, 'Sc' : 2.14E+15, - 'Ti' : 3.12E+18, 'V' : 6.41E+17, 'Cr' : 7.11E+19, - 'Mn' : 7.04E+19, 'Fe' : 5.85E+21, 'Co' : 7.69E+18, - 'Ni' : 9.34E+20, 'Cu' : 2.06E+16, 'Zn' : 1.88E+17} - -# number of atoms per gram of SNII metal + 'H' : 0.00E+00, 'He' : 0.00E+00, 'C' : 3.52E-02, + 'N' : 8.47E-07, 'O' : 1.04E-01, 'F' : 4.14E-10, + 'Ne' : 3.30E-03, 'Na' : 4.61E-05, 'Mg' : 6.25E-03, + 'Al' : 7.19E-04, 'Si' : 1.14E-01, 'P' : 2.60E-04, + 'S' : 6.35E-02, 'Cl' : 1.27E-04, 'Ar' : 1.14E-02, + 'K' : 5.72E-05, 'Ca' : 8.71E-03, 'Sc' : 1.61E-07, + 'Ti' : 2.50E-04, 'V' : 5.46E-05, 'Cr' : 6.19E-03, + 'Mn' : 6.47E-03, 'Fe' : 5.46E-01, 'Co' : 7.59E-04, + 'Ni' : 9.17E-02, 'Cu' : 2.19E-06, 'Zn' : 2.06E-05} + +# mass fraction of each atom in SNII metal SNII_abundance = { - 'H' : 0.00E+00, 'He' : 0.00E+00, 'C' : 1.55E+21, - 'N' : 2.62E+19, 'O' : 2.66E+22, 'F' : 1.44E+13, - 'Ne' : 2.70E+21, 'Na' : 6.67E+19, 'Mg' : 1.19E+21, - 'Al' : 1.29E+20, 'Si' : 1.02E+21, 'P' : 9.20E+18, - 'S' : 3.02E+20, 'Cl' : 7.95E+17, 'Ar' : 4.71E+19, - 'K' : 4.06E+17, 'Ca' : 3.45E+19, 'Sc' : 1.20E+15, - 'Ti' : 6.47E+17, 'V' : 4.62E+16, 'Cr' : 5.96E+18, - 'Mn' : 1.65E+18, 'Fe' : 3.82E+20, 'Co' : 2.90E+17, - 'Ni' : 2.40E+19, 'Cu' : 4.61E+15, 'Zn' : 6.81E+16} - -# taken from TRIDENT -atomic_mass = { - 'H' : 1.00794, 'He': 4.002602, 'Li': 6.941, - 'Be': 9.012182, 'B' : 10.811, 'C' : 12.0107, - 'N' : 14.0067, 'O' : 15.9994, 'F' : 18.9984032, - 'Ne': 20.1797, 'Na': 22.989770, 'Mg': 24.3050, - 'Al': 26.981538, 'Si': 28.0855, 'P' : 30.973761, - 'S' : 32.065, 'Cl': 35.453, 'Ar': 39.948, - 'K' : 39.0983, 'Ca': 40.078, 'Sc': 44.955910, - 'Ti': 47.867, 'V' : 50.9415, 'Cr': 51.9961, - 'Mn': 54.938049, 'Fe': 55.845, 'Co': 58.933200, - 'Ni': 58.6934, 'Cu': 63.546, 'Zn': 65.409} + 'H' : 0.00E+00, 'He' : 0.00E+00, 'C' : 3.12E-02, + 'N' : 6.15E-04, 'O' : 7.11E-01, 'F' : 4.57E-10, + 'Ne' : 9.12E-02, 'Na' : 2.56E-03, 'Mg' : 4.84E-02, + 'Al' : 5.83E-03, 'Si' : 4.81E-02, 'P' : 4.77E-04, + 'S' : 1.62E-02, 'Cl' : 4.72E-05, 'Ar' : 3.15E-03, + 'K' : 2.65E-05, 'Ca' : 2.31E-03, 'Sc' : 9.02E-08, + 'Ti' : 5.18E-05, 'V' : 3.94E-06, 'Cr' : 5.18E-04, + 'Mn' : 1.52E-04, 'Fe' : 3.58E-02, 'Co' : 2.86E-05, + 'Ni' : 2.35E-03, 'Cu' : 4.90E-07, 'Zn' : 7.46E-06} diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index cdd364ab68a..afdcef80662 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -1,18 +1,3 @@ -""" -ART-specific IO - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os diff --git a/yt/frontends/art/tests/test_outputs.py b/yt/frontends/art/tests/test_outputs.py index 37b91fac0d6..d147637caa3 100644 --- a/yt/frontends/art/tests/test_outputs.py +++ b/yt/frontends/art/tests/test_outputs.py @@ -1,24 +1,9 @@ -""" -ART frontend tests using D9p a=0.500 - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ requires_file, \ assert_equal, \ units_override_check, \ - assert_almost_equal + assert_almost_equal, \ + ParticleSelectionComparison from yt.units.yt_array import \ YTQuantity from yt.utilities.answer_testing.framework import \ @@ -117,3 +102,9 @@ def test_ARTDataset(): @requires_file(d9p) def test_units_override(): units_override_check(d9p) + +@requires_file(d9p) +def test_particle_selection(): + ds = data_dir_load(d9p) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/artio/__init__.py b/yt/frontends/artio/__init__.py index 5b2013d3837..17195075cf4 100644 --- a/yt/frontends/artio/__init__.py +++ b/yt/frontends/artio/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/artio/api.py b/yt/frontends/artio/api.py index 11144e5a0ad..2b0cf57443f 100644 --- a/yt/frontends/artio/api.py +++ b/yt/frontends/artio/api.py @@ -1,19 +1,3 @@ -""" -API for yt.frontends.artio - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ ARTIODataset diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 8a1015ed2a3..afac9e87b6e 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -1,19 +1,3 @@ -""" -ARTIO-specific data structures - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os import stat diff --git a/yt/frontends/artio/definitions.py b/yt/frontends/artio/definitions.py index 9eb69f869b2..b7442bfa324 100644 --- a/yt/frontends/artio/definitions.py +++ b/yt/frontends/artio/definitions.py @@ -1,19 +1,3 @@ -""" -Definitions specific to ART - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - yt_to_art = { 'Density': 'HVAR_GAS_DENSITY', 'TotalEnergy': 'HVAR_GAS_ENERGY', diff --git a/yt/frontends/artio/fields.py b/yt/frontends/artio/fields.py index bd3fb6a858e..ac09d389812 100644 --- a/yt/frontends/artio/fields.py +++ b/yt/frontends/artio/fields.py @@ -1,19 +1,3 @@ -""" -ARTIO-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.units.yt_array import \ @@ -77,7 +61,8 @@ def velocity(field, data): return data["momentum_%s" % axis]/data["density"] return velocity for ax in 'xyz': - self.add_field(("gas", "velocity_%s" % ax), sampling_type="cell", + self.add_field(("gas", "velocity_%s" % ax), + sampling_type="cell", function = _get_vel(ax), units = unit_system["velocity"]) @@ -101,7 +86,9 @@ def _temperature(field, data): # TODO: The conversion factor here needs to be addressed, as previously # it was set as: # unit_T = unit_v**2.0*mb / constants.k - self.add_field(("gas", "temperature"), sampling_type="cell", function = _temperature, + self.add_field(("gas", "temperature"), + sampling_type="cell", + function = _temperature, units = unit_system["temperature"]) # Create a metal_density field as sum of existing metal fields. @@ -121,7 +108,8 @@ def _metal_density(field, data): def _metal_density(field, data): tr = data["metal_ii_density"] return tr - self.add_field(("gas","metal_density"), sampling_type="cell", + self.add_field(("gas","metal_density"), + sampling_type="cell", function=_metal_density, units=unit_system["density"], take_log=True) @@ -134,13 +122,21 @@ def _creation_time(field,data): def _age(field, data): return data.ds.current_time - data["STAR","creation_time"] - self.add_field((ptype, "creation_time"), sampling_type="particle", function=_creation_time, units="yr") - self.add_field((ptype, "age"), sampling_type="particle", function=_age, units="yr") + self.add_field((ptype, "creation_time"), + sampling_type="particle", + function=_creation_time, + units="yr") + self.add_field((ptype, "age"), + sampling_type="particle", + function=_age, + units="yr") if self.ds.cosmological_simulation: def _creation_redshift(field,data): return 1.0/data.ds._handle.auni_from_tcode_array(data["STAR","BIRTH_TIME"]) - 1.0 - self.add_field((ptype, "creation_redshift"), sampling_type="particle", function=_creation_redshift) + self.add_field((ptype, "creation_redshift"), + sampling_type="particle", + function=_creation_redshift) super(ARTIOFieldInfo, self).setup_particle_fields(ptype) diff --git a/yt/frontends/artio/io.py b/yt/frontends/artio/io.py index 4abccb3d232..95cc6f9fead 100644 --- a/yt/frontends/artio/io.py +++ b/yt/frontends/artio/io.py @@ -1,18 +1,3 @@ -""" -ARTIO-specific IO - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np from yt.utilities.io_handler import \ diff --git a/yt/frontends/artio/tests/test_outputs.py b/yt/frontends/artio/tests/test_outputs.py index 894e62782d1..12a68c141bb 100644 --- a/yt/frontends/artio/tests/test_outputs.py +++ b/yt/frontends/artio/tests/test_outputs.py @@ -1,19 +1,3 @@ -""" -ARTIO frontend tests - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.convenience import load from yt.testing import \ assert_equal, \ diff --git a/yt/frontends/athena/api.py b/yt/frontends/athena/api.py index 40995e5df55..d6809d21dd2 100644 --- a/yt/frontends/athena/api.py +++ b/yt/frontends/athena/api.py @@ -1,17 +1,3 @@ -""" -API for yt.frontends.athena - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from .data_structures import \ AthenaGrid, \ AthenaHierarchy, \ diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index 26acc9b109a..e1ac69646e7 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for Athena. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os import weakref @@ -27,30 +12,25 @@ GridIndex from yt.data_objects.static_output import \ Dataset +from yt.utilities.chemical_formulas import \ + default_mu from yt.utilities.lib.misc_utilities import \ get_box_grids_level from yt.geometry.geometry_handler import \ YTDataChunk -from yt.extern.six import PY2 from .fields import AthenaFieldInfo from yt.utilities.decompose import \ decompose_array, get_psize def chk23(strin): - if PY2: - return strin - else: - return strin.encode('utf-8') + return strin.encode('utf-8') def str23(strin): - if PY2: - return strin + if isinstance(strin, list): + return [s.decode('utf-8') for s in strin] else: - if isinstance(strin, list): - return [s.decode('utf-8') for s in strin] - else: - return strin.decode('utf-8') + return strin.decode('utf-8') def check_readline(fl): line = fl.readline() @@ -571,6 +551,7 @@ def _parse_parameter_file(self): self.parameters["Gamma"] = 5./3. self.geometry = self.specified_parameters.get("geometry", "cartesian") self._handle.close() + self.mu = self.specified_parameters.get("mu", default_mu) @classmethod def _is_valid(self, *args, **kwargs): diff --git a/yt/frontends/athena/definitions.py b/yt/frontends/athena/definitions.py index eed306ed5b7..307b63a9616 100644 --- a/yt/frontends/athena/definitions.py +++ b/yt/frontends/athena/definitions.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/athena/fields.py b/yt/frontends/athena/fields.py index 47fc26f4133..a35c7b3d1a9 100644 --- a/yt/frontends/athena/fields.py +++ b/yt/frontends/athena/fields.py @@ -1,18 +1,3 @@ -""" -Athena-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.utilities.physical_constants import \ @@ -51,14 +36,19 @@ def setup_fluid_fields(self): vel_field = ("athena", "velocity_%s" % comp) mom_field = ("athena", "momentum_%s" % comp) if vel_field in self.field_list: - self.add_output_field(vel_field, sampling_type="cell", units="code_length/code_time") + self.add_output_field(vel_field, + sampling_type="cell", + units="code_length/code_time") self.alias(("gas","velocity_%s" % comp), vel_field, units=unit_system["velocity"]) elif mom_field in self.field_list: - self.add_output_field(mom_field, sampling_type="cell", + self.add_output_field(mom_field, + sampling_type="cell", units="code_mass/code_time/code_length**2") - self.add_field(("gas","velocity_%s" % comp), sampling_type="cell", - function=velocity_field(comp), units = unit_system["velocity"]) + self.add_field(("gas","velocity_%s" % comp), + sampling_type="cell", + function=velocity_field(comp), + units = unit_system["velocity"]) # Add pressure, energy, and temperature fields def eint_from_etot(data): eint = data["athena","total_energy"].copy() @@ -74,42 +64,47 @@ def etot_from_pres(data): etot += data["gas", "magnetic_energy"] return etot if ("athena","pressure") in self.field_list: - self.add_output_field(("athena","pressure"), sampling_type="cell", + self.add_output_field(("athena","pressure"), + sampling_type="cell", units=pres_units) self.alias(("gas","pressure"),("athena","pressure"), units=unit_system["pressure"]) def _thermal_energy(field, data): return data["athena","pressure"] / \ (data.ds.gamma-1.)/data["athena","density"] - self.add_field(("gas","thermal_energy"), sampling_type="cell", + self.add_field(("gas","thermal_energy"), + sampling_type="cell", function=_thermal_energy, units=unit_system["specific_energy"]) def _total_energy(field, data): return etot_from_pres(data)/data["athena","density"] - self.add_field(("gas","total_energy"), sampling_type="cell", + self.add_field(("gas","total_energy"), + sampling_type="cell", function=_total_energy, units=unit_system["specific_energy"]) elif ("athena","total_energy") in self.field_list: - self.add_output_field(("athena","total_energy"), sampling_type="cell", + self.add_output_field(("athena","total_energy"), + sampling_type="cell", units=pres_units) def _thermal_energy(field, data): return eint_from_etot(data)/data["athena","density"] - self.add_field(("gas","thermal_energy"), sampling_type="cell", + self.add_field(("gas","thermal_energy"), + sampling_type="cell", function=_thermal_energy, units=unit_system["specific_energy"]) def _total_energy(field, data): return data["athena","total_energy"]/data["athena","density"] - self.add_field(("gas","total_energy"), sampling_type="cell", + self.add_field(("gas","total_energy"), + sampling_type="cell", function=_total_energy, units=unit_system["specific_energy"]) # Add temperature field def _temperature(field, data): - if data.has_field_parameter("mu"): - mu = data.get_field_parameter("mu") - else: - mu = 0.6 - return mu*mh*data["gas","pressure"]/data["gas","density"]/kboltz - self.add_field(("gas","temperature"), sampling_type="cell", function=_temperature, + return data.ds.mu*data["gas","pressure"]/data["gas","density"]*mh/kboltz + self.add_field(("gas","temperature"), + sampling_type="cell", + function=_temperature, units=unit_system["temperature"]) - setup_magnetic_field_aliases(self, "athena", ["cell_centered_B_%s" % ax for ax in "xyz"]) + setup_magnetic_field_aliases( + self, "athena", ["cell_centered_B_%s" % ax for ax in "xyz"]) diff --git a/yt/frontends/athena/io.py b/yt/frontends/athena/io.py index 971fa2b66f5..6a510bf6839 100644 --- a/yt/frontends/athena/io.py +++ b/yt/frontends/athena/io.py @@ -1,18 +1,3 @@ -""" -The data-file handling functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.io_handler import \ BaseIOHandler import numpy as np diff --git a/yt/frontends/athena/tests/test_outputs.py b/yt/frontends/athena/tests/test_outputs.py index b8f5a8bb038..18371883c99 100644 --- a/yt/frontends/athena/tests/test_outputs.py +++ b/yt/frontends/athena/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Athena frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ diff --git a/yt/frontends/athena_pp/api.py b/yt/frontends/athena_pp/api.py index bb52145a3e6..497dd4e2984 100644 --- a/yt/frontends/athena_pp/api.py +++ b/yt/frontends/athena_pp/api.py @@ -1,17 +1,3 @@ -""" -API for yt.frontends.athena++ - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from .data_structures import \ AthenaPPGrid, \ AthenaPPHierarchy, \ diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index 55bc877924d..03501b729ac 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for Athena. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os import weakref @@ -36,6 +21,8 @@ SemiStructuredMesh from itertools import chain, product from .fields import AthenaPPFieldInfo +from yt.utilities.chemical_formulas import \ + default_mu geom_map = {"cartesian": "cartesian", "cylindrical": "cylindrical", @@ -335,6 +322,7 @@ def _parse_parameter_file(self): self.parameters["Gamma"] = self.specified_parameters["gamma"] else: self.parameters["Gamma"] = 5./3. + self.mu = self.specified_parameters.get("mu", default_mu) @classmethod def _is_valid(self, *args, **kwargs): diff --git a/yt/frontends/athena_pp/definitions.py b/yt/frontends/athena_pp/definitions.py index eed306ed5b7..307b63a9616 100644 --- a/yt/frontends/athena_pp/definitions.py +++ b/yt/frontends/athena_pp/definitions.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/athena_pp/fields.py b/yt/frontends/athena_pp/fields.py index ff79e34f6bc..3d4003b676d 100644 --- a/yt/frontends/athena_pp/fields.py +++ b/yt/frontends/athena_pp/fields.py @@ -1,18 +1,3 @@ -""" -Athena++-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.utilities.physical_constants import \ @@ -51,7 +36,8 @@ def setup_fluid_fields(self): self.alias(("gas","%s_%s" % (vel_prefix, comp)), vel_field, units=unit_system["velocity"]) elif mom_field in self.field_list: - self.add_output_field(mom_field, sampling_type="cell", + self.add_output_field(mom_field, + sampling_type="cell", units="code_mass/code_time/code_length**2") self.add_field(("gas","%s_%s" % (vel_prefix, comp)), sampling_type="cell", function=velocity_field(i+1), units=unit_system["velocity"]) @@ -80,11 +66,7 @@ def _thermal_energy(field, data): units=unit_system["specific_energy"]) # Add temperature field def _temperature(field, data): - if data.has_field_parameter("mu"): - mu = data.get_field_parameter("mu") - else: - mu = 0.6 - return (data["gas","pressure"]/data["gas","density"])*mu*mh/kboltz + return (data["gas","pressure"]/data["gas","density"])*data.ds.mu*mh/kboltz self.add_field(("gas", "temperature"), sampling_type="cell", function=_temperature, units=unit_system["temperature"]) diff --git a/yt/frontends/athena_pp/io.py b/yt/frontends/athena_pp/io.py index 4e0e856b5bc..88763e26421 100644 --- a/yt/frontends/athena_pp/io.py +++ b/yt/frontends/athena_pp/io.py @@ -1,18 +1,3 @@ -""" -Athena++-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from itertools import groupby diff --git a/yt/frontends/athena_pp/tests/test_outputs.py b/yt/frontends/athena_pp/tests/test_outputs.py index 86bd03dd832..d176cedde67 100644 --- a/yt/frontends/athena_pp/tests/test_outputs.py +++ b/yt/frontends/athena_pp/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Athena++ frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ assert_equal, \ diff --git a/yt/frontends/boxlib/__init__.py b/yt/frontends/boxlib/__init__.py index cab6e807be5..07fa1336903 100644 --- a/yt/frontends/boxlib/__init__.py +++ b/yt/frontends/boxlib/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/boxlib/api.py b/yt/frontends/boxlib/api.py index a311660db53..0ccf409e52c 100644 --- a/yt/frontends/boxlib/api.py +++ b/yt/frontends/boxlib/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.orion - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ BoxlibGrid, \ BoxlibHierarchy, \ diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 502962ccc75..618f1aee107 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for BoxLib Codes - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import inspect import os import re diff --git a/yt/frontends/boxlib/definitions.py b/yt/frontends/boxlib/definitions.py index 53ddeab9b58..aaceef492f9 100644 --- a/yt/frontends/boxlib/definitions.py +++ b/yt/frontends/boxlib/definitions.py @@ -1,18 +1,3 @@ -""" -Various definitions for various other modules and routines - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - # TODO: get rid of enzo parameters we do not need parameterDict = {"CosmologyCurrentRedshift": float, diff --git a/yt/frontends/boxlib/fields.py b/yt/frontends/boxlib/fields.py index fcf9840ead7..9ad5446708f 100644 --- a/yt/frontends/boxlib/fields.py +++ b/yt/frontends/boxlib/fields.py @@ -1,16 +1,3 @@ -""" -BoxLib code fields - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import string import re import numpy as np @@ -211,7 +198,8 @@ def velocity(field, data): return velocity for ax in 'xyz': - self.add_field((ptype, "particle_velocity_%s" % ax), sampling_type="particle", + self.add_field((ptype, "particle_velocity_%s" % ax), + sampling_type="particle", function=_get_vel(ax), units="code_length/code_time") @@ -224,14 +212,17 @@ def setup_fluid_fields(self): self.setup_momentum_to_velocity() elif any(f[1] == "xvel" for f in self.field_list): self.setup_velocity_to_momentum() - self.add_field(("gas", "thermal_energy"), sampling_type="cell", + self.add_field(("gas", "thermal_energy"), + sampling_type="cell", function=_thermal_energy, units=unit_system["specific_energy"]) - self.add_field(("gas", "thermal_energy_density"), sampling_type="cell", + self.add_field(("gas", "thermal_energy_density"), + sampling_type="cell", function=_thermal_energy_density, units=unit_system["pressure"]) if ("gas", "temperature") not in self.field_aliases: - self.add_field(("gas", "temperature"), sampling_type="cell", + self.add_field(("gas", "temperature"), + sampling_type="cell", function=_temperature, units=unit_system["temperature"]) @@ -241,7 +232,8 @@ def velocity(field, data): return data["%smom" % axis]/data["density"] return velocity for ax in 'xyz': - self.add_field(("gas", "velocity_%s" % ax), sampling_type="cell", + self.add_field(("gas", "velocity_%s" % ax), + sampling_type="cell", function=_get_vel(ax), units=self.ds.unit_system["velocity"]) @@ -251,7 +243,8 @@ def momentum(field, data): return data["%svel" % axis]*data["density"] return momentum for ax in 'xyz': - self.add_field(("gas", "momentum_%s" % ax), sampling_type="cell", + self.add_field(("gas", "momentum_%s" % ax), + sampling_type="cell", function=_get_mom(ax), units=mom_units) @@ -406,7 +399,8 @@ def setup_fluid_fields(self): # We have a mass fraction nice_name, tex_label = _nice_species_name(field) # Overwrite field to use nicer tex_label display_name - self.add_output_field(("boxlib", field), sampling_type="cell", + self.add_output_field(("boxlib", field), + sampling_type="cell", units="", display_name=tex_label) self.alias(("gas", "%s_fraction" % nice_name), @@ -437,7 +431,9 @@ def setup_fluid_fields(self): nice_name, tex_label = _nice_species_name(field) display_name = r'\dot{\omega}\left[%s\right]' % tex_label # Overwrite field to use nicer tex_label'ed display_name - self.add_output_field(("boxlib", field), sampling_type="cell", units=unit_system["frequency"], + self.add_output_field(("boxlib", field), + sampling_type="cell", + units=unit_system["frequency"], display_name=display_name) self.alias(("gas", "%s_creation_rate" % nice_name), ("boxlib", field), units=unit_system["frequency"]) diff --git a/yt/frontends/boxlib/io.py b/yt/frontends/boxlib/io.py index 6cf5d428969..d733bb259d1 100644 --- a/yt/frontends/boxlib/io.py +++ b/yt/frontends/boxlib/io.py @@ -1,18 +1,3 @@ -""" -AMReX/Boxlib data-file handling functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import numpy as np from collections import defaultdict diff --git a/yt/frontends/boxlib/tests/test_outputs.py b/yt/frontends/boxlib/tests/test_outputs.py index f764a62dc4a..d1fdcef1cff 100644 --- a/yt/frontends/boxlib/tests/test_outputs.py +++ b/yt/frontends/boxlib/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Boxlib frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ diff --git a/yt/frontends/chombo/api.py b/yt/frontends/chombo/api.py index e08b6623d1a..0edc141c133 100644 --- a/yt/frontends/chombo/api.py +++ b/yt/frontends/chombo/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.chombo - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ ChomboGrid, \ ChomboHierarchy, \ diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index fa21ce99dae..935ac7b0ab5 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -1,25 +1,9 @@ -""" -Data structures for Chombo. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import re import os import weakref import numpy as np -from six import string_types from stat import \ ST_CTIME @@ -28,7 +12,6 @@ setdefaultattr from yt.data_objects.grid_patch import \ AMRGridPatch -from yt.extern import six from yt.geometry.grid_geometry_handler import \ GridIndex from yt.data_objects.static_output import \ @@ -374,7 +357,7 @@ def _is_valid(self, *args, **kwargs): pluto_ini_file_exists = False orion2_ini_file_exists = False - if isinstance(args[0], six.string_types): + if isinstance(args[0], str): dir_name = os.path.dirname(os.path.abspath(args[0])) pluto_ini_filename = os.path.join(dir_name, "pluto.ini") orion2_ini_filename = os.path.join(dir_name, "orion2.ini") @@ -533,7 +516,7 @@ def _is_valid(self, *args, **kwargs): pluto_ini_file_exists = False - if isinstance(args[0], six.string_types): + if isinstance(args[0], str): dir_name = os.path.dirname(os.path.abspath(args[0])) pluto_ini_filename = os.path.join(dir_name, "pluto.ini") pluto_ini_file_exists = os.path.isfile(pluto_ini_filename) @@ -679,7 +662,7 @@ def _is_valid(self, *args, **kwargs): pluto_ini_file_exists = False orion2_ini_file_exists = False - if isinstance(args[0], string_types): + if isinstance(args[0], str): dir_name = os.path.dirname(os.path.abspath(args[0])) pluto_ini_filename = os.path.join(dir_name, "pluto.ini") orion2_ini_filename = os.path.join(dir_name, "orion2.ini") @@ -737,7 +720,7 @@ def _is_valid(self, *args, **kwargs): pluto_ini_file_exists = False orion2_ini_file_exists = False - if isinstance(args[0], six.string_types): + if isinstance(args[0], str): dir_name = os.path.dirname(os.path.abspath(args[0])) pluto_ini_filename = os.path.join(dir_name, "pluto.ini") orion2_ini_filename = os.path.join(dir_name, "orion2.ini") diff --git a/yt/frontends/chombo/definitions.py b/yt/frontends/chombo/definitions.py index 626b4ba16b7..8b137891791 100644 --- a/yt/frontends/chombo/definitions.py +++ b/yt/frontends/chombo/definitions.py @@ -1,16 +1 @@ -""" -Various definitions for various other modules and routines - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/chombo/fields.py b/yt/frontends/chombo/fields.py index e4097f2240f..fadf3b72e96 100644 --- a/yt/frontends/chombo/fields.py +++ b/yt/frontends/chombo/fields.py @@ -1,18 +1,3 @@ -""" -Chombo-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.units.unit_object import Unit from yt.fields.field_info_container import \ @@ -140,28 +125,37 @@ def velocity(field, data): return velocity for ax in 'xyz': - self.add_field(("gas", "velocity_%s" % ax), sampling_type="cell", + self.add_field(("gas", "velocity_%s" % ax), + sampling_type="cell", function = _get_vel(ax), units = unit_system["velocity"]) - self.add_field(("gas", "thermal_energy"), sampling_type="cell", + self.add_field(("gas", "thermal_energy"), + sampling_type="cell", function = _thermal_energy, units = unit_system["specific_energy"]) - self.add_field(("gas", "thermal_energy_density"), sampling_type="cell", + self.add_field(("gas", "thermal_energy_density"), + sampling_type="cell", function = _thermal_energy_density, units = unit_system["pressure"]) - self.add_field(("gas", "kinetic_energy"), sampling_type="cell", + self.add_field(("gas", "kinetic_energy"), + sampling_type="cell", function = _kinetic_energy, units = unit_system["pressure"]) - self.add_field(("gas", "specific_kinetic_energy"), sampling_type="cell", + self.add_field(("gas", "specific_kinetic_energy"), + sampling_type="cell", function = _specific_kinetic_energy, units = unit_system["specific_energy"]) - self.add_field(("gas", "magnetic_energy"), sampling_type="cell", + self.add_field(("gas", "magnetic_energy"), + sampling_type="cell", function = _magnetic_energy, units = unit_system["pressure"]) - self.add_field(("gas", "specific_magnetic_energy"), sampling_type="cell", + self.add_field(("gas", "specific_magnetic_energy"), + sampling_type="cell", function = _specific_magnetic_energy, units = unit_system["specific_energy"]) - self.add_field(("gas", "temperature"), sampling_type="cell", function=_temperature, + self.add_field(("gas", "temperature"), + sampling_type="cell", + function=_temperature, units=unit_system["temperature"]) setup_magnetic_field_aliases(self, "chombo", ["%s-magnfield" % ax for ax in "XYZ"]) @@ -200,9 +194,12 @@ def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ): output_units = units if (ptype, f) not in self.field_list: continue - self.add_output_field((ptype, f), sampling_type="particle", - units = units, - display_name = dn, output_units = output_units, take_log=False) + self.add_output_field((ptype, f), + sampling_type="particle", + units=units, + display_name=dn, + output_units=output_units, + take_log=False) for alias in aliases: self.alias((ptype, alias), (ptype, f), units = output_units) @@ -220,8 +217,9 @@ def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ): raise RuntimeError if field[0] not in self.ds.particle_types: continue - self.add_output_field(field, sampling_type="particle", - units = self.ds.field_units.get(field, "")) + self.add_output_field(field, + sampling_type="particle", + units=self.ds.field_units.get(field, "")) self.setup_smoothed_fields(ptype, num_neighbors=num_neighbors, ftype=ftype) @@ -261,16 +259,19 @@ def __init__(self, ds, field_list): super(ChomboPICFieldInfo2D, self).__init__(ds, field_list) for ftype in fluid_field_types: - self.add_field((ftype, 'gravitational_field_z'), sampling_type="cell", - function = _dummy_field, - units = "code_length / code_time**2") + self.add_field((ftype, 'gravitational_field_z'), + sampling_type="cell", + function = _dummy_field, + units = "code_length / code_time**2") for ptype in particle_field_types: - self.add_field((ptype, "particle_position_z"), sampling_type="particle", + self.add_field((ptype, "particle_position_z"), + sampling_type="particle", function = _dummy_position, units = "code_length") - self.add_field((ptype, "particle_velocity_z"), sampling_type="particle", + self.add_field((ptype, "particle_velocity_z"), + sampling_type="particle", function = _dummy_velocity, units = "code_length / code_time") @@ -291,25 +292,31 @@ def __init__(self, ds, field_list): super(ChomboPICFieldInfo1D, self).__init__(ds, field_list) for ftype in fluid_field_types: - self.add_field((ftype, 'gravitational_field_y'), sampling_type="cell", - function = _dummy_field, - units = "code_length / code_time**2") + self.add_field((ftype, 'gravitational_field_y'), + sampling_type="cell", + function = _dummy_field, + units = "code_length / code_time**2") - self.add_field((ftype, 'gravitational_field_z'), sampling_type="cell", - function = _dummy_field, - units = "code_length / code_time**2") + self.add_field((ftype, 'gravitational_field_z'), + sampling_type="cell", + function = _dummy_field, + units = "code_length / code_time**2") for ptype in particle_field_types: - self.add_field((ptype, "particle_position_y"), sampling_type="particle", + self.add_field((ptype, "particle_position_y"), + sampling_type="particle", function = _dummy_position, units = "code_length") - self.add_field((ptype, "particle_position_z"), sampling_type="particle", + self.add_field((ptype, "particle_position_z"), + sampling_type="particle", function = _dummy_position, units = "code_length") - self.add_field((ptype, "particle_velocity_y"), sampling_type="particle", + self.add_field((ptype, "particle_velocity_y"), + sampling_type="particle", function = _dummy_velocity, units = "code_length / code_time") - self.add_field((ptype, "particle_velocity_z"), sampling_type="particle", + self.add_field((ptype, "particle_velocity_z"), + sampling_type="particle", function = _dummy_velocity, units = "code_length / code_time") diff --git a/yt/frontends/chombo/io.py b/yt/frontends/chombo/io.py index 9e7a5caea2e..aa300765b90 100644 --- a/yt/frontends/chombo/io.py +++ b/yt/frontends/chombo/io.py @@ -1,18 +1,3 @@ -""" -The data-file handling functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import re import numpy as np from yt.geometry.selection_routines import \ diff --git a/yt/frontends/chombo/tests/test_outputs.py b/yt/frontends/chombo/tests/test_outputs.py index b80d9853906..da8e4353b85 100644 --- a/yt/frontends/chombo/tests/test_outputs.py +++ b/yt/frontends/chombo/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Chombo frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ requires_file, \ assert_equal, \ diff --git a/yt/frontends/eagle/api.py b/yt/frontends/eagle/api.py index e16dec0bc67..e9ee31bb242 100644 --- a/yt/frontends/eagle/api.py +++ b/yt/frontends/eagle/api.py @@ -1,19 +1,3 @@ -""" -API for EAGLE frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ EagleDataset, \ EagleNetworkDataset diff --git a/yt/frontends/eagle/data_structures.py b/yt/frontends/eagle/data_structures.py index f1e88c9c3a3..2577bdd636f 100644 --- a/yt/frontends/eagle/data_structures.py +++ b/yt/frontends/eagle/data_structures.py @@ -1,20 +1,3 @@ -""" -Data structures for EAGLE frontend. - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np diff --git a/yt/frontends/eagle/definitions.py b/yt/frontends/eagle/definitions.py index 4f1db0c2c36..1ab8b51d979 100644 --- a/yt/frontends/eagle/definitions.py +++ b/yt/frontends/eagle/definitions.py @@ -1,19 +1,3 @@ -""" -EAGLE definitions - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - eaglenetwork_ions = \ ('electron', 'H1', 'H2', 'H_m', 'He1', 'He2','He3', 'C1',\ 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C_m', 'N1', 'N2', \ diff --git a/yt/frontends/eagle/fields.py b/yt/frontends/eagle/fields.py index 8bbbb86ee24..0b11a4fad5d 100644 --- a/yt/frontends/eagle/fields.py +++ b/yt/frontends/eagle/fields.py @@ -1,20 +1,3 @@ -""" -EAGLE fields - - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.owls.fields import \ OWLSFieldInfo from yt.units.yt_array import YTQuantity diff --git a/yt/frontends/eagle/io.py b/yt/frontends/eagle/io.py index 84128d7c1e8..1ed10ddcb63 100644 --- a/yt/frontends/eagle/io.py +++ b/yt/frontends/eagle/io.py @@ -1,20 +1,3 @@ -""" -EAGLE data-file handling function - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.owls.io import \ IOHandlerOWLS diff --git a/yt/frontends/eagle/tests/test_outputs.py b/yt/frontends/eagle/tests/test_outputs.py index 51a6e46e169..bdf1992cb15 100644 --- a/yt/frontends/eagle/tests/test_outputs.py +++ b/yt/frontends/eagle/tests/test_outputs.py @@ -1,21 +1,6 @@ -""" -Eagle frontend tests using the snapshot_028_z000p000 dataset - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ - requires_file + requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ data_dir_load from yt.frontends.eagle.api import EagleDataset @@ -23,11 +8,15 @@ s28 = "snapshot_028_z000p000/snap_028_z000p000.0.hdf5" @requires_file(s28) def test_EagleDataset(): - assert isinstance(data_dir_load(s28), EagleDataset) + ds = data_dir_load(s28) + assert isinstance(ds, EagleDataset) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() s399 = "snipshot_399_z000p000/snip_399_z000p000.0.hdf5" @requires_file(s399) def test_Snipshot(): ds = data_dir_load(s399) - ds.index assert isinstance(ds, EagleDataset) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/enzo/answer_testing_support.py b/yt/frontends/enzo/answer_testing_support.py index e73d001a6be..1c907f38a24 100644 --- a/yt/frontends/enzo/answer_testing_support.py +++ b/yt/frontends/enzo/answer_testing_support.py @@ -1,18 +1,3 @@ -""" -Answer Testing support for Enzo. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os diff --git a/yt/frontends/enzo/api.py b/yt/frontends/enzo/api.py index ef02373d38c..b1349d43faa 100644 --- a/yt/frontends/enzo/api.py +++ b/yt/frontends/enzo/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.enzo - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ EnzoGrid, \ EnzoGridInMemory, \ diff --git a/yt/frontends/enzo/data_structures.py b/yt/frontends/enzo/data_structures.py index 8b30cc57d46..addf196dd6b 100644 --- a/yt/frontends/enzo/data_structures.py +++ b/yt/frontends/enzo/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for Enzo - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from __future__ import absolute_import from yt.utilities.on_demand_imports import _h5py as h5py @@ -28,7 +13,6 @@ from yt.utilities.on_demand_imports import \ _libconf as libconf from collections import defaultdict -from yt.extern.six.moves import zip as izip from yt.frontends.enzo.misc import \ cosmology_get_units @@ -159,7 +143,8 @@ def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, if field in self.ds.field_info: conv_factor = self.ds.field_info[field]._convert_function( self) - if self.ds.field_info[field].particle_type: continue + if self.ds.field_info[field].sampling_type == "particle": + continue temp = self.index.io._read_raw_data_set(self, field) temp = temp.swapaxes(0, 2) cube.field_data[field] = np.multiply( @@ -363,7 +348,7 @@ def _rebuild_top_grids(self, level = 0): mylog.info("Finished rebuilding") def _populate_grid_objects(self): - for g,f in izip(self.grids, self.filenames): + for g,f in zip(self.grids, self.filenames): g._prepare_grid() g._setup_dx() g.set_filename(f[0]) @@ -402,13 +387,15 @@ def _setup_derived_fields(self): aps = self.dataset.parameters.get( "AppendActiveParticleType", []) for fname, field in self.ds.field_info.items(): - if not field.particle_type: continue + if not field.sampling_type == "particle": continue if isinstance(fname, tuple): continue if field._function is NullFunc: continue for apt in aps: dd = field._copy_def() dd.pop("name") - self.ds.field_info.add_field((apt, fname), sampling_type="cell", **dd) + self.ds.field_info.add_field((apt, fname), + sampling_type="cell", + **dd) def _detect_output_fields(self): self.field_list = [] diff --git a/yt/frontends/enzo/definitions.py b/yt/frontends/enzo/definitions.py index 62b829b61f2..e69de29bb2d 100644 --- a/yt/frontends/enzo/definitions.py +++ b/yt/frontends/enzo/definitions.py @@ -1,15 +0,0 @@ -""" -Definitions specific to Enzo - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/enzo/fields.py b/yt/frontends/enzo/fields.py index b7cb7192de8..1fc35263864 100644 --- a/yt/frontends/enzo/fields.py +++ b/yt/frontends/enzo/fields.py @@ -1,18 +1,3 @@ -""" -Fields specific to Enzo - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.fields.field_info_container import \ FieldInfoContainer @@ -27,20 +12,20 @@ vel_units = "code_velocity" known_species_names = { - 'HI' : 'H', + 'HI' : 'H_p0', 'HII' : 'H_p1', - 'HeI' : 'He', + 'HeI' : 'He_p0', 'HeII' : 'He_p1', 'HeIII' : 'He_p2', - 'H2I' : 'H2', + 'H2I' : 'H2_p0', 'H2II' : 'H2_p1', 'HM' : 'H_m1', - 'HeH' : 'HeH', - 'DI' : 'D', + 'HeH' : 'HeH_p0', + 'DI' : 'D_p0', 'DII' : 'D_p1', - 'HDI' : 'HD', + 'HDI' : 'HD_p0', 'Electron': 'El', - 'OI' : 'O', + 'OI' : 'O_p0', 'OII' : 'O_p1', 'OIII' : 'O_p2', 'OIV' : 'O_p3', @@ -154,9 +139,10 @@ def add_species_field(self, species): # off, we add the species field itself. Then we'll add a few more # items... # - self.add_output_field(("enzo", "%s_Density" % species), sampling_type="cell", - take_log=True, - units="code_mass/code_length**3") + self.add_output_field(("enzo", "%s_Density" % species), + sampling_type="cell", + take_log=True, + units="code_mass/code_length**3") yt_name = known_species_names[species] # don't alias electron density since mass is wrong if species != "Electron": @@ -170,7 +156,8 @@ def setup_species_fields(self): if sp in known_species_names] def _electron_density(field, data): return data["Electron_Density"] * (me/mp) - self.add_field(("gas", "El_density"), sampling_type="cell", + self.add_field(("gas", "El_density"), + sampling_type="cell", function = _electron_density, units = self.ds.unit_system["density"]) for sp in species_names: @@ -217,8 +204,9 @@ def setup_energy_field(self): te_name = "TotalEnergy" if hydro_method == 2: - self.add_output_field(("enzo", te_name), sampling_type="cell", - units="code_velocity**2") + self.add_output_field(("enzo", te_name), + sampling_type="cell", + units="code_velocity**2") self.alias(("gas", "thermal_energy"), ("enzo", te_name)) def _ge_plus_kin(field, data): ret = data[te_name] + 0.5*data["velocity_x"]**2.0 @@ -233,7 +221,8 @@ def _ge_plus_kin(field, data): units = unit_system["specific_energy"]) elif dual_energy == 1: self.add_output_field( - ("enzo", te_name), sampling_type="cell", + ("enzo", te_name), + sampling_type="cell", units = "code_velocity**2") self.alias( ("gas", "total_energy"), @@ -248,7 +237,8 @@ def _ge_plus_kin(field, data): units = unit_system["specific_energy"]) elif hydro_method in (4, 6): self.add_output_field( - ("enzo", te_name), sampling_type="cell", + ("enzo", te_name), + sampling_type="cell", units="code_velocity**2") # Subtract off B-field energy def _sub_b(field, data): @@ -260,11 +250,14 @@ def _sub_b(field, data): ret -= data["magnetic_energy"]/data["density"] return ret self.add_field( - ("gas", "thermal_energy"), sampling_type="cell", - function=_sub_b, units = unit_system["specific_energy"]) + ("gas", "thermal_energy"), + sampling_type="cell", + function=_sub_b, + units=unit_system["specific_energy"]) else: # Otherwise, we assume TotalEnergy is kinetic+thermal self.add_output_field( - ("enzo", te_name), sampling_type="cell", + ("enzo", te_name), + sampling_type="cell", units = "code_velocity**2") self.alias( ("gas", "total_energy"), @@ -278,22 +271,36 @@ def _tot_minus_kin(field, data): ret -= 0.5*data["velocity_z"]**2.0 return ret self.add_field( - ("gas", "thermal_energy"), sampling_type="cell", - function = _tot_minus_kin, - units = unit_system["specific_energy"]) + ("gas", "thermal_energy"), + sampling_type="cell", + function=_tot_minus_kin, + units=unit_system["specific_energy"]) if multi_species == 0 and 'Mu' in params: + def _mean_molecular_weight(field, data): + return params["Mu"]*data['index', 'ones'] + + self.add_field( + ("gas", "mean_molecular_weight"), + sampling_type="cell", + function=_mean_molecular_weight, + units="") + def _number_density(field, data): return data['gas', 'density']/(mp*params['Mu']) + self.add_field( - ("gas", "number_density"), sampling_type="cell", - function = _number_density, + ("gas", "number_density"), + sampling_type="cell", + function=_number_density, units=unit_system["number_density"]) def setup_particle_fields(self, ptype): def _age(field, data): return data.ds.current_time - data["creation_time"] - self.add_field((ptype, "age"), sampling_type="particle", function = _age, - units = "yr") + self.add_field((ptype, "age"), + sampling_type="particle", + function=_age, + units = "yr") super(EnzoFieldInfo, self).setup_particle_fields(ptype) diff --git a/yt/frontends/enzo/io.py b/yt/frontends/enzo/io.py index 8989bfafca2..04230028f6b 100644 --- a/yt/frontends/enzo/io.py +++ b/yt/frontends/enzo/io.py @@ -1,22 +1,6 @@ -""" -Enzo-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.io_handler import \ BaseIOHandler from yt.utilities.logger import ytLogger as mylog -from yt.extern.six import b, iteritems from yt.utilities.on_demand_imports import _h5py as h5py from yt.geometry.selection_routines import GridSelector import numpy as np @@ -42,7 +26,7 @@ def _read_field_names(self, grid): fields = [] dtypes = set([]) add_io = "io" in grid.ds.particle_types - for name, v in iteritems(group): + for name, v in group.items(): # NOTE: This won't work with 1D datasets or references. # For all versions of Enzo I know about, we can assume all floats # are of the same size. So, let's grab one. @@ -83,7 +67,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for g in chunk.objs: if g.filename is None: continue if f is None: - #print "Opening (read) %s" % g.filename + #print("Opening (read) %s" % g.filename) f = h5py.File(g.filename, mode="r") nap = sum(g.NumberOfActiveParticles.values()) if g.NumberOfParticles == 0 and nap == 0: @@ -127,7 +111,7 @@ def io_iter(self, chunks, fields): # problem, but one we can return to. if fid is not None: fid.close() - fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY) + fid = h5py.h5f.open(obj.filename.encode('latin-1'), h5py.h5f.ACC_RDONLY) filename = obj.filename for field in fields: nodal_flag = self.ds.field_info[field].nodal_flag @@ -143,7 +127,7 @@ def _read_obj_field(self, obj, field, fid_data): fid, data = fid_data if fid is None: close = True - fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY) + fid = h5py.h5f.open(obj.filename.encode("latin-1"), h5py.h5f.ACC_RDONLY) else: close = False if data is None: @@ -152,7 +136,7 @@ def _read_obj_field(self, obj, field, fid_data): ftype, fname = field try: node = "/Grid%08i/%s" % (obj.id, fname) - dg = h5py.h5d.open(fid, b(node)) + dg = h5py.h5d.open(fid, node.encode("latin-1")) except KeyError: if fname == "Dark_Matter_Density": data[:] = 0 @@ -323,7 +307,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): f = None for g in chunk.objs: if f is None: - #print "Opening (count) %s" % g.filename + #print("Opening (count) %s" % g.filename) f = h5py.File(g.filename, mode="r") gds = f.get("/Grid%08i" % g.id) if gds is None: diff --git a/yt/frontends/enzo/misc.py b/yt/frontends/enzo/misc.py index 43a9ee98787..9d45fdd98f2 100644 --- a/yt/frontends/enzo/misc.py +++ b/yt/frontends/enzo/misc.py @@ -1,18 +1,3 @@ -""" -Miscellaneous functions that are Enzo-specific - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.physical_ratios import \ diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 749040319f7..3e4f4d2c43d 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -1,23 +1,12 @@ -""" -EnzoSimulation class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import glob import os -from math import ceil +from unyt import \ + dimensions, \ + unyt_array +from unyt.unit_registry import \ + UnitRegistry from yt.convenience import \ load @@ -25,11 +14,6 @@ only_on_root from yt.data_objects.time_series import \ SimulationTimeSeries, DatasetSeries -from yt.units import dimensions -from yt.units.unit_registry import \ - UnitRegistry -from yt.units.yt_array import \ - YTArray from yt.utilities.cosmology import \ Cosmology from yt.utilities.exceptions import \ @@ -94,13 +78,18 @@ def _set_units(self): unit_registry=self.unit_registry) self.time_unit = self.cosmology.time_unit.in_units("s") - self.unit_registry.modify("h", self.hubble_constant) + if 'h' in self.unit_registry: + self.unit_registry.modify('h', self.hubble_constant) + else: + self.unit_registry.add('h', self.hubble_constant, + dimensions.dimensionless) # Comoving lengths - for my_unit in ["m", "pc", "AU", "au"]: + for my_unit in ["m", "pc", "AU"]: new_unit = "%scm" % my_unit # technically not true, but should be ok self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0], - dimensions.length, "\\rm{%s}/(1+z)" % my_unit) + dimensions.length, "\\rm{%s}/(1+z)" % my_unit, + prefixable=True) self.length_unit = self.quan(self.box_size, "Mpccm / h", registry=self.unit_registry) else: @@ -256,8 +245,8 @@ def get_time_series(self, time_data=True, redshift_data=True, else: final_cycle = min(final_cycle, self.parameters['StopCycle']) - my_outputs = my_all_outputs[int(ceil(float(initial_cycle) / - self.parameters['CycleSkipDataDump'])): + my_outputs = my_all_outputs[int(np.ceil(float(initial_cycle) / + self.parameters['CycleSkipDataDump'])): (final_cycle / self.parameters['CycleSkipDataDump'])+1] else: @@ -266,7 +255,7 @@ def get_time_series(self, time_data=True, redshift_data=True, my_initial_time = self.quan(initial_time, "code_time") elif isinstance(initial_time, tuple) and len(initial_time) == 2: my_initial_time = self.quan(*initial_time) - elif not isinstance(initial_time, YTArray): + elif not isinstance(initial_time, unyt_array): raise RuntimeError( "Error: initial_time must be given as a float or " + "tuple of (value, units).") @@ -280,7 +269,7 @@ def get_time_series(self, time_data=True, redshift_data=True, my_final_time = self.quan(final_time, "code_time") elif isinstance(final_time, tuple) and len(final_time) == 2: my_final_time = self.quan(*final_time) - elif not isinstance(final_time, YTArray): + elif not isinstance(final_time, unyt_array): raise RuntimeError( "Error: final_time must be given as a float or " + "tuple of (value, units).") diff --git a/yt/frontends/enzo/tests/test_outputs.py b/yt/frontends/enzo/tests/test_outputs.py index 96b3ab33d60..8c5de7c2543 100644 --- a/yt/frontends/enzo/tests/test_outputs.py +++ b/yt/frontends/enzo/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Enzo frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ @@ -26,9 +11,7 @@ requires_ds, \ small_patch_amr, \ big_patch_amr, \ - data_dir_load, \ - AnalyticHaloMassFunctionTest, \ - SimulatedHaloMassFunctionTest + data_dir_load from yt.visualization.plot_window import \ SlicePlot from yt.frontends.enzo.api import EnzoDataset @@ -136,18 +119,6 @@ def test_kh2d(): test_kh2d.__name__ = test.description yield test -@requires_ds(enzotiny) -def test_simulated_halo_mass_function(): - ds = data_dir_load(enzotiny) - for finder in ["fof", "hop"]: - yield SimulatedHaloMassFunctionTest(ds, finder) - -@requires_ds(enzotiny) -def test_analytic_halo_mass_function(): - ds = data_dir_load(enzotiny) - for fit in range(1, 6): - yield AnalyticHaloMassFunctionTest(ds, fit) - @requires_ds(ecp, big_data=True) def test_ecp(): ds = data_dir_load(ecp) @@ -163,9 +134,9 @@ def test_nuclei_density_fields(): ds = data_dir_load(ecp) ad = ds.all_data() assert_array_equal(ad["H_nuclei_density"], - (ad["H_number_density"] + ad["H_p1_number_density"])) + (ad["H_p0_number_density"] + ad["H_p1_number_density"])) assert_array_equal(ad["He_nuclei_density"], - (ad["He_number_density"] + + (ad["He_p0_number_density"] + ad["He_p1_number_density"] + ad["He_p2_number_density"])) @requires_file(enzotiny) diff --git a/yt/frontends/enzo_p/api.py b/yt/frontends/enzo_p/api.py index d7bb07bd565..76010d668d2 100644 --- a/yt/frontends/enzo_p/api.py +++ b/yt/frontends/enzo_p/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.enzo_p - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ EnzoPGrid, \ EnzoPHierarchy, \ diff --git a/yt/frontends/enzo_p/data_structures.py b/yt/frontends/enzo_p/data_structures.py index 39da2ae0efd..5e6aa498b33 100644 --- a/yt/frontends/enzo_p/data_structures.py +++ b/yt/frontends/enzo_p/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for Enzo-P - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from __future__ import absolute_import from yt.utilities.on_demand_imports import \ diff --git a/yt/frontends/enzo_p/definitions.py b/yt/frontends/enzo_p/definitions.py index 949d5197e1b..e69de29bb2d 100644 --- a/yt/frontends/enzo_p/definitions.py +++ b/yt/frontends/enzo_p/definitions.py @@ -1,15 +0,0 @@ -""" -Definitions specific to Enzo-P - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/enzo_p/fields.py b/yt/frontends/enzo_p/fields.py index c96a579c214..904c705752a 100644 --- a/yt/frontends/enzo_p/fields.py +++ b/yt/frontends/enzo_p/fields.py @@ -1,18 +1,3 @@ -""" -Fields specific to Enzo-P - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.fields.particle_fields import \ diff --git a/yt/frontends/enzo_p/io.py b/yt/frontends/enzo_p/io.py index f3643ba3c8c..ec190190bb2 100644 --- a/yt/frontends/enzo_p/io.py +++ b/yt/frontends/enzo_p/io.py @@ -1,23 +1,7 @@ -""" -Enzo-P-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.exceptions import \ YTException from yt.utilities.io_handler import \ BaseIOHandler -from yt.extern.six import b, iteritems from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -50,7 +34,7 @@ def _read_field_names(self, grid): dtypes = set() # keep one field for each particle type so we can count later sample_pfields = {} - for name, v in iteritems(group): + for name, v in group.items(): if not hasattr(v, "shape") or v.dtype == "O": continue # mesh fields are "field " @@ -142,7 +126,7 @@ def io_iter(self, chunks, fields): # problem, but one we can return to. if fid is not None: fid.close() - fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY) + fid = h5py.h5f.open(obj.filename.encode('latin-1'), h5py.h5f.ACC_RDONLY) filename = obj.filename for field in fields: data = None @@ -156,12 +140,12 @@ def _read_obj_field(self, obj, field, fid_data): fid, data = fid_data if fid is None: close = True - fid = h5py.h5f.open(b(obj.filename), h5py.h5f.ACC_RDONLY) + fid = h5py.h5f.open(obj.filename.encode('latin-1'), h5py.h5f.ACC_RDONLY) else: close = False ftype, fname = field node = "/%s/field%s%s" % (obj.block_name, self._sep, fname) - dg = h5py.h5d.open(fid, b(node)) + dg = h5py.h5d.open(fid, node.encode('latin-1')) rdata = np.empty(self.ds.grid_dimensions[:self.ds.dimensionality][::-1], dtype=self._field_dtype) dg.read(h5py.h5s.ALL, h5py.h5s.ALL, rdata) diff --git a/yt/frontends/enzo_p/misc.py b/yt/frontends/enzo_p/misc.py index 3887a85f4e8..3392f4520ab 100644 --- a/yt/frontends/enzo_p/misc.py +++ b/yt/frontends/enzo_p/misc.py @@ -1,18 +1,3 @@ -""" -Miscellaneous functions that are Enzo-P-specific - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import \ diff --git a/yt/frontends/enzo_p/tests/test_misc.py b/yt/frontends/enzo_p/tests/test_misc.py index 95fb0fe3d4f..da6ec704f52 100644 --- a/yt/frontends/enzo_p/tests/test_misc.py +++ b/yt/frontends/enzo_p/tests/test_misc.py @@ -1,18 +1,3 @@ -""" -Enzo-P misc tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. All rights reserved. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.frontends.enzo_p.misc import \ diff --git a/yt/frontends/enzo_p/tests/test_outputs.py b/yt/frontends/enzo_p/tests/test_outputs.py index 4b16d2b1ad7..f9210c4fef3 100644 --- a/yt/frontends/enzo_p/tests/test_outputs.py +++ b/yt/frontends/enzo_p/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Enzo-P frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.on_demand_imports import \ diff --git a/yt/frontends/exodus_ii/__init__.py b/yt/frontends/exodus_ii/__init__.py index 7b8e0f1ebe1..aabf7b36d63 100644 --- a/yt/frontends/exodus_ii/__init__.py +++ b/yt/frontends/exodus_ii/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/exodus_ii/api.py b/yt/frontends/exodus_ii/api.py index 657b689b8b0..ae0693f14ee 100644 --- a/yt/frontends/exodus_ii/api.py +++ b/yt/frontends/exodus_ii/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.exodus_ii - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ ExodusIIUnstructuredMesh, \ ExodusIIUnstructuredIndex, \ diff --git a/yt/frontends/exodus_ii/data_structures.py b/yt/frontends/exodus_ii/data_structures.py index 79eddd47cf6..d7118fec595 100644 --- a/yt/frontends/exodus_ii/data_structures.py +++ b/yt/frontends/exodus_ii/data_structures.py @@ -1,17 +1,3 @@ -""" -Exodus II data structures - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np from yt.funcs import \ diff --git a/yt/frontends/exodus_ii/fields.py b/yt/frontends/exodus_ii/fields.py index bedd32cbe36..2797db48d06 100644 --- a/yt/frontends/exodus_ii/fields.py +++ b/yt/frontends/exodus_ii/fields.py @@ -1,18 +1,3 @@ -""" -ExodusII-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/exodus_ii/io.py b/yt/frontends/exodus_ii/io.py index d7cd0636792..ffda8d31aa5 100644 --- a/yt/frontends/exodus_ii/io.py +++ b/yt/frontends/exodus_ii/io.py @@ -1,18 +1,3 @@ -""" -ExodusII-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.io_handler import \ BaseIOHandler diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index 314417eb933..f8668290eb8 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -1,6 +1,5 @@ import glob import os -from yt.extern.six import add_metaclass from yt.convenience import \ load from yt.funcs import \ @@ -16,8 +15,8 @@ RegisteredSimulationTimeSeries -@add_metaclass(RegisteredSimulationTimeSeries) -class ExodusIISimulation(DatasetSeries): + +class ExodusIISimulation(DatasetSeries, metaclass = RegisteredSimulationTimeSeries): r""" Initialize an ExodusII Simulation object. @@ -33,10 +32,10 @@ class ExodusIISimulation(DatasetSeries): >>> sim = yt.simulation("demo_second", "ExodusII") >>> sim.get_time_series() >>> for ds in sim: - ... print ds.current_time + ... print(ds.current_time) """ - + def __init__(self, simulation_directory, find_outputs=False): self.simulation_directory = simulation_directory fn_pattern = "%s/*" % self.simulation_directory diff --git a/yt/frontends/exodus_ii/tests/test_outputs.py b/yt/frontends/exodus_ii/tests/test_outputs.py index ba4834ca8ac..08504c217cb 100644 --- a/yt/frontends/exodus_ii/tests/test_outputs.py +++ b/yt/frontends/exodus_ii/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Exodus II frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ assert_array_equal, \ diff --git a/yt/frontends/fits/api.py b/yt/frontends/fits/api.py index 84876953685..89013272a13 100644 --- a/yt/frontends/fits/api.py +++ b/yt/frontends/fits/api.py @@ -1,15 +1,3 @@ -""" -API for yt.frontends.fits -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ FITSGrid, \ FITSHierarchy, \ diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index f31a9bf230e..9455df15713 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -1,20 +1,7 @@ -""" -FITS-specific data structures -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import stat import numpy as np import numpy.core.defchararray as np_char import os -import re import time import uuid import weakref @@ -35,28 +22,26 @@ YTDataChunk from yt.data_objects.static_output import \ Dataset +from yt.units.unit_object import UnitParseError +from yt.units.yt_array import YTQuantity from yt.utilities.file_handler import \ FITSFileHandler from yt.utilities.io_handler import \ io_registry from .fields import FITSFieldInfo, \ - WCSFITSFieldInfo + WCSFITSFieldInfo, YTFITSFieldInfo from yt.utilities.decompose import \ decompose_array, get_psize from yt.funcs import issue_deprecation_warning +from yt.units import dimensions from yt.units.unit_lookup_table import \ default_unit_symbol_lut, \ - prefixable_units, \ unit_prefixes -from yt.units import dimensions from yt.utilities.on_demand_imports import \ _astropy, NotAModule lon_prefixes = ["X","RA","GLON","LINEAR"] lat_prefixes = ["Y","DEC","GLAT","LINEAR"] -delimiters = ["*", "/", "-", "^", "(", ")"] -delimiters += [str(i) for i in range(10)] -regex_pattern = '|'.join(re.escape(_) for _ in delimiters) spec_names = {"V": "Velocity", "F": "Frequency", @@ -69,11 +54,10 @@ sky_prefixes = list(sky_prefixes) spec_prefixes = list(spec_names.keys()) -field_from_unit = {"Jy":"intensity", - "K":"temperature"} class FITSGrid(AMRGridPatch): _id_offset = 0 + def __init__(self, id, index, level): AMRGridPatch.__init__(self, id, filename=index.index_filename, index=index) @@ -84,6 +68,7 @@ def __init__(self, id, index, level): def __repr__(self): return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions) + class FITSHierarchy(GridIndex): grid = FITSGrid @@ -92,7 +77,7 @@ def __init__(self,ds,dataset_type='fits'): self.dataset_type = dataset_type self.field_indexes = {} self.dataset = weakref.proxy(ds) - # for now, the index file is the dataset! + # for now, the index file is the dataset self.index_filename = self.dataset.parameter_filename self.directory = os.path.dirname(self.index_filename) self._handle = ds._handle @@ -103,31 +88,27 @@ def _initialize_data_storage(self): pass def _guess_name_from_units(self, units): + field_from_unit = {"Jy": "intensity", + "K": "temperature"} for k,v in field_from_unit.items(): if k in units: mylog.warning("Guessing this is a %s field based on its units of %s." % (v,k)) return v return None - def _determine_image_units(self, header, known_units): + def _determine_image_units(self, bunit): try: - field_units = header["bunit"].lower().strip(" ").replace(" ", "") - # FITS units always return upper-case, so we need to get - # the right case by comparing against known units. This - # only really works for common units. - units = set(re.split(regex_pattern, field_units)) - if '' in units: - units.remove('') - n = int(0) - for unit in units: - if unit in known_units: - field_units = field_units.replace(unit, known_units[unit]) - n += 1 - if n != len(units) or n == 0: - field_units = "dimensionless" - if field_units[0] == "/": - field_units = "1%s" % field_units - return field_units + try: + # First let AstroPy attempt to figure the unit out + u = 1.0*_astropy.units.Unit(bunit, format="fits") + u = YTQuantity.from_astropy(u).units + except ValueError: + try: + # Let yt try it by itself + u = self.ds.quan(1.0, bunit).units + except UnitParseError: + return "dimensionless" + return str(u) except KeyError: return "dimensionless" @@ -155,7 +136,7 @@ def _detect_output_fields(self): [(unit.lower(), unit) for unit in self.ds.unit_registry.lut] ) for unit in list(known_units.values()): - if unit in prefixable_units: + if unit in self.ds.unit_registry.prefixable_units: for p in ["n","u","m","c","k"]: known_units[(p+unit).lower()] = p+unit # We create a field from each slice on the 4th axis @@ -168,7 +149,7 @@ def _detect_output_fields(self): if isinstance(hdu, _astropy.pyfits.BinTableHDU) or hdu.header["naxis"] == 0: continue if self._ensure_same_dims(hdu): - units = self._determine_image_units(hdu.header, known_units) + units = self._determine_image_units(hdu.header["bunit"]) try: # Grab field name from btype fname = hdu.header["btype"] @@ -275,6 +256,7 @@ def _chunk_io(self, dobj, cache = True, local_only = False): yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs), cache = cache) + def find_primary_header(fileh): # Sometimes the primary hdu doesn't have an image if len(fileh) > 1 and fileh[0].header["naxis"] == 0: @@ -284,6 +266,7 @@ def find_primary_header(fileh): header = fileh[first_image].header return header, first_image + def check_fits_valid(args): ext = args[0].rsplit(".", 1)[-1] if ext.upper() in ("GZ", "FZ"): @@ -305,12 +288,12 @@ def check_fits_valid(args): except Exception: pass return None + def check_sky_coords(args, ndim): fileh = check_fits_valid(args) if fileh is not None: try: - if (len(fileh) > 1 and - fileh[1].name == "EVENTS" and ndim == 2): + if len(fileh) > 1 and fileh[1].name == "EVENTS" and ndim == 2: fileh.close() return True else: @@ -319,12 +302,15 @@ def check_sky_coords(args, ndim): return False axis_names = [header.get("ctype%d" % (i + 1), "") for i in range(header["naxis"])] + if len(axis_names) == 3 and axis_names.count("LINEAR") == 2: + return any(a[0] in spec_prefixes for a in axis_names) x = find_axes(axis_names, sky_prefixes + spec_prefixes) fileh.close() return x >= ndim except Exception: pass return False + class FITSDataset(Dataset): _index_class = FITSHierarchy _field_info_class = FITSFieldInfo @@ -390,39 +376,44 @@ def __init__(self, filename, def _set_code_unit_attributes(self): """ - Generates the conversion to various physical _units based on the parameter file + Generates the conversion to various physical _units based on the + parameter file """ - default_length_units = [u for u,v in default_unit_symbol_lut.items() - if str(v[1]) == "(length)"] - more_length_units = [] - for unit in default_length_units: - if unit in prefixable_units: - more_length_units += [prefix+unit for prefix in unit_prefixes] - default_length_units += more_length_units - file_units = [] - cunits = [self.wcs.wcs.cunit[i] for i in range(self.dimensionality)] - for unit in (_.to_string() for _ in cunits): - if unit in default_length_units: - file_units.append(unit) - if len(set(file_units)) == 1: - length_factor = self.wcs.wcs.cdelt[0] - length_unit = str(file_units[0]) - mylog.info("Found length units of %s." % (length_unit)) - else: - self.no_cgs_equiv_length = True - mylog.warning("No length conversion provided. Assuming 1 = 1 cm.") - length_factor = 1.0 - length_unit = "cm" - setdefaultattr(self, 'length_unit', self.quan(length_factor,length_unit)) - setdefaultattr(self, 'mass_unit', self.quan(1.0, "g")) - setdefaultattr(self, 'time_unit', self.quan(1.0, "s")) - setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s")) - if "beam_size" in self.specified_parameters: - beam_size = self.specified_parameters["beam_size"] - beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value - else: - beam_size = 1.0 - self.unit_registry.add("beam", beam_size, dimensions=dimensions.solid_angle) + if getattr(self, 'length_unit', None) is None: + default_length_units = [u for u,v in default_unit_symbol_lut.items() + if str(v[1]) == "(length)"] + more_length_units = [] + for unit in default_length_units: + if unit in self.unit_registry.prefixable_units: + more_length_units += [prefix+unit for prefix in unit_prefixes] + default_length_units += more_length_units + file_units = [] + cunits = [self.wcs.wcs.cunit[i] for i in range(self.dimensionality)] + for unit in (_.to_string() for _ in cunits): + if unit in default_length_units: + file_units.append(unit) + if len(set(file_units)) == 1: + length_factor = self.wcs.wcs.cdelt[0] + length_unit = str(file_units[0]) + mylog.info("Found length units of %s." % length_unit) + else: + self.no_cgs_equiv_length = True + mylog.warning("No length conversion provided. Assuming 1 = 1 cm.") + length_factor = 1.0 + length_unit = "cm" + setdefaultattr(self, 'length_unit', + self.quan(length_factor, length_unit)) + for unit, cgs in [("time", "s"), ("mass", "g")]: + # We set these to cgs for now, but they may have been overridden + if getattr(self, unit+'_unit', None) is not None: + continue + mylog.warning("Assuming 1.0 = 1.0 %s", cgs) + setdefaultattr(self, "%s_unit" % unit, self.quan(1.0, cgs)) + self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit / + (self.time_unit**2 * self.length_unit)) + self.magnetic_unit.convert_to_units("gauss") + self.velocity_unit = self.length_unit / self.time_unit + def _parse_parameter_file(self): @@ -447,20 +438,13 @@ def _parse_parameter_file(self): self._determine_wcs() + self.current_time = 0.0 + self.domain_dimensions = np.array(self.dims)[:self.dimensionality] if self.dimensionality == 2: self.domain_dimensions = np.append(self.domain_dimensions, [int(1)]) - - domain_left_edge = np.array([0.5]*3) - domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions]) - - if self.dimensionality == 2: - domain_left_edge[-1] = 0.5 - domain_right_edge[-1] = 1.5 - - self.domain_left_edge = domain_left_edge - self.domain_right_edge = domain_right_edge + self._determine_bbox() # Get the simulation time try: @@ -511,6 +495,17 @@ def _determine_wcs(self): else: self.wcs = wcs + def _determine_bbox(self): + domain_left_edge = np.array([0.5]*3) + domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions]) + + if self.dimensionality == 2: + domain_left_edge[-1] = 0.5 + domain_right_edge[-1] = 1.5 + + self.domain_left_edge = domain_left_edge + self.domain_right_edge = domain_right_edge + def _determine_axes(self): self.lat_axis = 1 self.lon_axis = 0 @@ -540,6 +535,7 @@ def _guess_candidates(cls, base, directories, files): def close(self): self._handle.close() + def find_axes(axis_names, prefixes): x = 0 for p in prefixes: @@ -547,6 +543,71 @@ def find_axes(axis_names, prefixes): x += np.any(y) return x + +class YTFITSDataset(FITSDataset): + _field_info_class = YTFITSFieldInfo + + def _parse_parameter_file(self): + super(YTFITSDataset, self)._parse_parameter_file() + # Get the current time + if "time" in self.primary_header: + self.current_time = self.primary_header["time"] + + def _set_code_unit_attributes(self): + """ + Generates the conversion to various physical _units based on the parameter file + """ + for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"), + ("velocity", "cm/s"), ("magnetic", "gauss")]: + if unit == "magnetic": + short_unit = "bfunit" + else: + short_unit = "%sunit" % unit[0] + if short_unit in self.primary_header: + # units should now be in header + u = self.quan(self.primary_header[short_unit], + self.primary_header.comments[short_unit].strip("[]")) + mylog.info("Found %s units of %s." % (unit, u)) + else: + if unit == "length": + # Falling back to old way of getting units for length + # in old files + u = self.quan(1.0, str(self.wcs.wcs.cunit[0])) + mylog.info("Found %s units of %s." % (unit, u)) + else: + # Give up otherwise + u = self.quan(1.0, cgs) + mylog.warning("No unit for %s found. Assuming 1.0 code_%s = 1.0 %s" % (unit, unit, cgs)) + setdefaultattr(self, '%s_unit' % unit, u) + + def _determine_bbox(self): + dx = np.zeros(3) + dx[:self.dimensionality] = self.wcs.wcs.cdelt + domain_left_edge = np.zeros(3) + domain_left_edge[:self.dimensionality] = self.wcs.wcs.crval-dx[:self.dimensionality]*(self.wcs.wcs.crpix-0.5) + domain_right_edge = domain_left_edge + dx*self.domain_dimensions + + if self.dimensionality == 2: + domain_left_edge[-1] = 0.0 + domain_right_edge[-1] = dx[0] + + self.domain_left_edge = domain_left_edge + self.domain_right_edge = domain_right_edge + + @classmethod + def _is_valid(cls, *args, **kwargs): + fileh = check_fits_valid(args) + if fileh is None: + return False + else: + if "WCSNAME" in fileh[0].header: + isyt = fileh[0].header["WCSNAME"].strip() == "yt" + else: + isyt = False + fileh.close() + return isyt + + class SkyDataFITSDataset(FITSDataset): _field_info_class = WCSFITSFieldInfo @@ -600,11 +661,16 @@ def _set_code_unit_attributes(self): pixel_area = self.quan(pixel_area, "%s**2" % (units)).in_cgs() pixel_dims = pixel_area.units.dimensions self.unit_registry.add("pixel", float(pixel_area.value), dimensions=pixel_dims) + if "beam_size" in self.specified_parameters: + beam_size = self.specified_parameters["beam_size"] + beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value + self.unit_registry.add("beam", beam_size, dimensions=dimensions.solid_angle) @classmethod def _is_valid(cls, *args, **kwargs): return check_sky_coords(args, 2) + class SpectralCubeFITSHierarchy(FITSHierarchy): def _domain_decomp(self): @@ -701,6 +767,7 @@ def pixel2spec(self, pixel_value): def _is_valid(cls, *args, **kwargs): return check_sky_coords(args, 3) + class EventsFITSHierarchy(FITSHierarchy): def _detect_output_fields(self): @@ -726,6 +793,7 @@ def _parse_index(self): self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64') self._particle_indices[1] = self.grid_particle_count.squeeze() + class EventsFITSDataset(SkyDataFITSDataset): _index_class = EventsFITSHierarchy def __init__(self, filename, diff --git a/yt/frontends/fits/fields.py b/yt/frontends/fits/fields.py index c57cff62335..4e2e0980d2c 100644 --- a/yt/frontends/fits/fields.py +++ b/yt/frontends/fits/fields.py @@ -1,27 +1,52 @@ -""" -FITS-specific fields -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer + class FITSFieldInfo(FieldInfoContainer): known_other_fields = () def __init__(self, ds, field_list, slice_info=None): super(FITSFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) for field in ds.field_list: - if field[0] == "fits": + if field[0] == "fits": self[field].take_log = False + +class YTFITSFieldInfo(FieldInfoContainer): + known_other_fields = ( + ("density", ("code_mass/code_length**3", ["density"], None)), + ("dark_matter_density", ("code_mass/code_length**3", ["dark_matter_density"], None)), + ("number_density", ("1/code_length**3", ["number_density"], None)), + ("pressure", ("dyne/code_length**2", ["pressure"], None)), + ("thermal_energy", ("erg / g", ["thermal_energy"], None)), + ("temperature", ("K", ["temperature"], None)), + ("velocity_x", ("code_length/code_time", ["velocity_x"], None)), + ("velocity_y", ("code_length/code_time", ["velocity_y"], None)), + ("velocity_z", ("code_length/code_time", ["velocity_z"], None)), + ("magnetic_field_x", ("gauss", [], None)), + ("magnetic_field_y", ("gauss", [], None)), + ("magnetic_field_z", ("gauss", [], None)), + ("metallicity", ("Zsun", ["metallicity"], None)), + + # We need to have a bunch of species fields here, too + ("metal_density", ("code_mass/code_length**3", ["metal_density"], None)), + ("hi_density", ("code_mass/code_length**3", ["hi_density"], None)), + ("hii_density", ("code_mass/code_length**3", ["hii_density"], None)), + ("h2i_density", ("code_mass/code_length**3", ["h2i_density"], None)), + ("h2ii_density", ("code_mass/code_length**3", ["h2ii_density"], None)), + ("h2m_density", ("code_mass/code_length**3", ["h2m_density"], None)), + ("hei_density", ("code_mass/code_length**3", ["hei_density"], None)), + ("heii_density", ("code_mass/code_length**3", ["heii_density"], None)), + ("heiii_density", ("code_mass/code_length**3", ["heiii_density"], None)), + ("hdi_density", ("code_mass/code_length**3", ["hdi_density"], None)), + ("di_density", ("code_mass/code_length**3", ["di_density"], None)), + ("dii_density", ("code_mass/code_length**3", ["dii_density"], None)), + ) + + def __init__(self, ds, field_list, slice_info=None): + super(YTFITSFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) + + class WCSFITSFieldInfo(FITSFieldInfo): def setup_fluid_fields(self): @@ -47,7 +72,7 @@ def _world_f(field, data): unit = "degree" if unit.lower() == "rad": unit = "radian" - self.add_field(("fits",name), sampling_type="cell", + self.add_field(("fits", name), sampling_type="cell", function=world_f(axis, unit), units=unit) if self.ds.dimensionality == 3: @@ -55,5 +80,5 @@ def _spec(field, data): axis = "xyz"[data.ds.spec_axis] sp = (data[axis].ndarray_view()-self.ds._p0)*self.ds._dz + self.ds._z0 return data.ds.arr(sp, data.ds.spec_unit) - self.add_field(("fits","spectral"), sampling_type="cell", function=_spec, + self.add_field(("fits", "spectral"), sampling_type="cell", function=_spec, units=self.ds.spec_unit, display_name=self.ds.spec_name) diff --git a/yt/frontends/fits/io.py b/yt/frontends/fits/io.py index 05c101cc50e..d93fe1ae3f9 100644 --- a/yt/frontends/fits/io.py +++ b/yt/frontends/fits/io.py @@ -1,15 +1,3 @@ -""" -FITS-specific IO functions -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.io_handler import \ @@ -79,19 +67,19 @@ def _read_fluid_selection(self, chunks, selector, fields, size): ind = 0 for chunk in chunks: for g in chunk.objs: - start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int") + start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).d.astype("int") end = start + g.ActiveDimensions - slices = [slice(start[i],end[i]) for i in range(3)] + slices = [slice(start[i], end[i]) for i in range(3)] if self.ds.dimensionality == 2: nx, ny = g.ActiveDimensions[:2] nz = 1 data = np.zeros((nx,ny,nz)) - data[:,:,0] = ds.data[slices[1],slices[0]].transpose() + data[:,:,0] = ds.data[slices[1], slices[0]].T elif self.ds.naxis == 4: idx = self.ds.index._axis_map[fname] - data = ds.data[idx,slices[2],slices[1],slices[0]].transpose() + data = ds.data[idx, slices[2], slices[1], slices[0]].T else: - data = ds.data[slices[2],slices[1],slices[0]].transpose() + data = ds.data[slices[2], slices[1], slices[0]].T if fname in self.ds.nan_mask: data[np.isnan(data)] = self.ds.nan_mask[fname] elif "all" in self.ds.nan_mask: diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index 7a140788080..5a2084dfab6 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -1,26 +1,10 @@ -""" -Miscellaneous FITS routines -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import base64 -from yt.extern.six import PY3 from yt.fields.derived_field import ValidateSpatial from yt.funcs import mylog, issue_deprecation_warning from yt.utilities.on_demand_imports import _astropy from yt.units.yt_array import YTQuantity, YTArray -if PY3: - from io import BytesIO as IO -else: - from yt.extern.six.moves import StringIO as IO +from io import BytesIO import os @@ -67,7 +51,9 @@ def setup_counts_fields(ds, ebounds, ftype="gas"): cfunc = _make_counts(emin, emax) fname = "counts_%s-%s" % (emin, emax) mylog.info("Creating counts field %s." % fname) - ds.add_field((ftype,fname), sampling_type="cell", function=cfunc, + ds.add_field((ftype,fname), + sampling_type="cell", + function=cfunc, units="counts/pixel", validators = [ValidateSpatial()], display_name="Counts (%s-%s keV)" % (emin, emax)) @@ -159,7 +145,7 @@ def ds9_region(ds, reg, obj=None, field_parameters=None): >>> ds = yt.load("m33_hi.fits") >>> circle_region = ds9_region(ds, "circle.reg") - >>> print circle_region.quantities.extrema("flux") + >>> print(circle_region.quantities.extrema("flux")) """ import pyregion from yt.frontends.fits.api import EventsFITSDataset @@ -284,7 +270,7 @@ def _repr_html_(self): ret = '' for k, v in self.plots.items(): canvas = FigureCanvasAgg(v) - f = IO() + f = BytesIO() canvas.print_figure(f) f.seek(0) img = base64.b64encode(f.read()).decode() diff --git a/yt/frontends/fits/tests/test_outputs.py b/yt/frontends/fits/tests/test_outputs.py index 7063f33b4ea..6563b0fefe5 100644 --- a/yt/frontends/fits/tests/test_outputs.py +++ b/yt/frontends/fits/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -FITS frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ diff --git a/yt/frontends/flash/__init__.py b/yt/frontends/flash/__init__.py index 0d5dbd7499b..40d9d41e9d6 100644 --- a/yt/frontends/flash/__init__.py +++ b/yt/frontends/flash/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/flash/api.py b/yt/frontends/flash/api.py index 22931180ca6..dacd64a284f 100644 --- a/yt/frontends/flash/api.py +++ b/yt/frontends/flash/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.flash - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ FLASHGrid, \ FLASHHierarchy, \ diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 41ffda75fc4..115f7d5fac3 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -1,18 +1,3 @@ -""" -FLASH-specific data structures - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import stat import numpy as np @@ -21,7 +6,9 @@ from yt.data_objects.grid_patch import \ AMRGridPatch from yt.data_objects.static_output import \ - Dataset, ParticleFile + Dataset, \ + ParticleFile, \ + validate_index_order from yt.funcs import \ mylog, \ setdefaultattr @@ -35,6 +22,7 @@ from yt.utilities.physical_ratios import cm_per_mpc from .fields import FLASHFieldInfo + class FLASHGrid(AMRGridPatch): _id_offset = 1 #__slots__ = ["_level_id", "stop_index"] @@ -48,6 +36,7 @@ def __init__(self, id, index, level): def __repr__(self): return "FLASHGrid_%04i (%s)" % (self.id, self.ActiveDimensions) + class FLASHHierarchy(GridIndex): grid = FLASHGrid @@ -177,6 +166,7 @@ def _populate_grid_objects(self): g.dds[1] = DD self.max_level = self.grid_levels.max() + class FLASHDataset(Dataset): _index_class = FLASHHierarchy _field_info_class = FLASHFieldInfo @@ -452,25 +442,30 @@ def _guess_candidates(cls, base, directories, files): def close(self): self._handle.close() + class FLASHParticleFile(ParticleFile): pass + class FLASHParticleDataset(FLASHDataset): _index_class = ParticleIndex - over_refine_factor = 1 filter_bbox = False _file_class = FLASHParticleFile def __init__(self, filename, dataset_type='flash_particle_hdf5', - storage_filename = None, - units_override = None, - n_ref = 64, unit_system = "cgs"): + storage_filename=None, + units_override=None, + index_order=None, + index_filename=None, + unit_system="cgs"): + self.index_order = validate_index_order(index_order) + self.index_filename = index_filename if self._handle is not None: return self._handle = HDF5FileHandler(filename) - self.n_ref = n_ref self.refine_by = 2 - Dataset.__init__(self, filename, dataset_type, units_override=units_override, + Dataset.__init__(self, filename, dataset_type, + units_override=units_override, unit_system=unit_system) self.storage_filename = storage_filename @@ -478,9 +473,8 @@ def _parse_parameter_file(self): # Let the superclass do all the work but then # fix the domain dimensions super(FLASHParticleDataset, self)._parse_parameter_file() - nz = 1 << self.over_refine_factor domain_dimensions = np.zeros(3, "int32") - domain_dimensions[:self.dimensionality] = nz + domain_dimensions[:self.dimensionality] = 1 self.domain_dimensions = domain_dimensions self.filename_template = self.parameter_filename self.file_count = 1 diff --git a/yt/frontends/flash/fields.py b/yt/frontends/flash/fields.py index a24dcdcddf8..906a1c54f57 100644 --- a/yt/frontends/flash/fields.py +++ b/yt/frontends/flash/fields.py @@ -1,22 +1,5 @@ -""" -FLASH-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer -from yt.utilities.physical_constants import \ - Na # Common fields in FLASH: (Thanks to John ZuHone for this list) # @@ -40,6 +23,7 @@ erg_units = "code_mass * (code_length/code_time)**2" rho_units = "code_mass / code_length**3" + class FLASHFieldInfo(FieldInfoContainer): known_other_fields = ( ("velx", ("code_length/code_time", ["velocity_x"], None)), @@ -59,7 +43,7 @@ class FLASHFieldInfo(FieldInfoContainer): ("eion", (erg_units, [], "Ion Internal Energy")), ("eele", (erg_units, [], "Electron Internal Energy")), ("erad", (erg_units, [], "Radiation Internal Energy")), - ("pden", (rho_units, [], None)), + ("pden", (rho_units, [], "Particle Mass Density")), ("depo", ("code_length**2/code_time**2", [], None)), ("ye", ("", [], "Y_e")), ("magp", (pres_units, [], None)), @@ -93,16 +77,21 @@ class FLASHFieldInfo(FieldInfoContainer): ("particle_velz", ("code_length/code_time", ["particle_velocity_z"], None)), ("particle_tag", ("", ["particle_index"], None)), ("particle_mass", ("code_mass", ["particle_mass"], None)), + ("particle_gpot", ("code_length**2/code_time**2", + ["particle_gravitational_potential"], None)), ) def setup_fluid_fields(self): from yt.fields.magnetic_field import \ setup_magnetic_field_aliases unit_system = self.ds.unit_system + # Adopt FLASH 4.6 value for Na + Na = self.ds.quan(6.022140857e23, "g**-1") for i in range(1, 1000): - self.add_output_field(("flash", "r{0:03}".format(i)), sampling_type="cell", - units = "", - display_name="Energy Group {0}".format(i)) + self.add_output_field(("flash", "r{0:03}".format(i)), + sampling_type="cell", + units="", + display_name="Energy Group {0}".format(i)) # Add energy fields def ekin(data): ek = data["flash","velx"]**2 @@ -112,7 +101,8 @@ def ekin(data): ek += data["flash","velz"]**2 return 0.5*ek if ("flash","ener") in self.field_list: - self.add_output_field(("flash","ener"), sampling_type="cell", + self.add_output_field(("flash","ener"), + sampling_type="cell", units="code_length**2/code_time**2") self.alias(("gas","total_energy"),("flash","ener"), units=unit_system["specific_energy"]) @@ -123,10 +113,13 @@ def _ener(field, data): ener += data["flash","magp"]/data["flash","dens"] except Exception: pass return ener - self.add_field(("gas","total_energy"), sampling_type="cell", function=_ener, + self.add_field(("gas","total_energy"), + sampling_type="cell", + function=_ener, units=unit_system["specific_energy"]) if ("flash","eint") in self.field_list: - self.add_output_field(("flash","eint"), sampling_type="cell", + self.add_output_field(("flash","eint"), + sampling_type="cell", units="code_length**2/code_time**2") self.alias(("gas","thermal_energy"),("flash","eint"), units=unit_system["specific_energy"]) @@ -137,29 +130,57 @@ def _eint(field, data): eint -= data["flash","magp"]/data["flash","dens"] except Exception: pass return eint - self.add_field(("gas","thermal_energy"), sampling_type="cell", function=_eint, + self.add_field(("gas","thermal_energy"), + sampling_type="cell", + function=_eint, units=unit_system["specific_energy"]) + ## Derived FLASH Fields - def _nele(field, data): - Na_code = data.ds.quan(Na, '1/code_mass') - return data["flash","dens"]*data["flash","ye"]*Na_code - self.add_field(('flash','nele'), sampling_type="cell", function=_nele, units="code_length**-3") - self.add_field(('flash','edens'), sampling_type="cell", function=_nele, units="code_length**-3") - def _nion(field, data): - Na_code = data.ds.quan(Na, '1/code_mass') - return data["flash","dens"]*data["flash","sumy"]*Na_code - self.add_field(('flash','nion'), sampling_type="cell", function=_nion, units="code_length**-3") if ("flash", "abar") in self.field_list: - self.add_output_field(("flash", "abar"), sampling_type="cell", units="1") - else: + self.alias(("gas", "mean_molecular_weight"), ("flash", "abar")) + elif ("flash", "sumy") in self.field_list: def _abar(field, data): return 1.0 / data["flash","sumy"] - self.add_field(("flash","abar"), sampling_type="cell", function=_abar, units="1") + self.add_field(("gas", "mean_molecular_weight"), + sampling_type="cell", + function=_abar, + units="") + elif "eos_singlespeciesa" in self.ds.parameters: + def _abar(field, data): + return data.ds.parameters["eos_singlespeciesa"]*data["index", "ones"] + self.add_field(("gas", "mean_molecular_weight"), + sampling_type="cell", + function=_abar, + units="") + + if ("flash", "sumy") in self.field_list: + def _nele(field, data): + return data["flash", "dens"] * data["flash", "ye"] * Na + + self.add_field(('gas', 'El_number_density'), + sampling_type="cell", + function=_nele, + units=unit_system["number_density"]) + + def _nion(field, data): + return data["flash", "dens"] * data["flash", "sumy"] * Na + + self.add_field(('gas', 'ion_number_density'), + sampling_type="cell", + function=_nion, + units=unit_system["number_density"]) + + def _number_density(field, data): + return data["gas","El_number_density"]+data["gas","ion_number_density"] + else: + def _number_density(field, data): + return data["flash", "dens"]*Na/data["gas", "mean_molecular_weight"] - def _number_density(fields,data): - return (data["nele"]+data["nion"]) - self.add_field(("gas","number_density"), sampling_type="cell", function=_number_density, + self.add_field(("gas", "number_density"), + sampling_type="cell", + function=_number_density, units=unit_system["number_density"]) - setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"]) + setup_magnetic_field_aliases( + self, "flash", ["mag%s" % ax for ax in "xyz"]) diff --git a/yt/frontends/flash/io.py b/yt/frontends/flash/io.py index 86d91f22d83..e74f5adae7c 100644 --- a/yt/frontends/flash/io.py +++ b/yt/frontends/flash/io.py @@ -1,18 +1,3 @@ -""" -FLASH-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from itertools import groupby @@ -45,6 +30,7 @@ def determine_particle_fields(handle): _particle_fields = {} return _particle_fields + class IOHandlerFLASH(BaseIOHandler): _particle_reader = False _dataset_type = "flash_hdf5" @@ -160,6 +146,7 @@ def _read_chunk_data(self, chunk, fields): rv[g.id][field] = np.asarray(data[...,i], "=f8") return rv + class IOHandlerFLASHParticle(BaseIOHandler): _particle_reader = True _dataset_type = "flash_particle_hdf5" @@ -185,18 +172,15 @@ def _read_particle_coords(self, chunks, ptf): data_files.update(obj.data_files) px, py, pz = self._position_fields p_fields = self._handle["/tracer particles"] - assert(len(data_files) == 1) - for data_file in sorted(data_files): - pcount = self._count_particles(data_file)["io"] - for ptype, field_list in sorted(ptf.items()): - total = 0 - while total < pcount: - count = min(self._chunksize, pcount - total) - x = np.asarray(p_fields[total:total+count, px], dtype="=f8") - y = np.asarray(p_fields[total:total+count, py], dtype="=f8") - z = np.asarray(p_fields[total:total+count, pz], dtype="=f8") - total += count - yield ptype, (x, y, z) + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + pxyz = np.asarray(p_fields[data_file.start:data_file.end, (px,py,pz)], dtype="=f8") + yield "io", pxyz.T + + def _yield_coordinates(self, data_file, needed_ptype=None): + px, py, pz = self._position_fields + p_fields = self._handle["/tracer particles"] + pxyz = np.asarray(p_fields[data_file.start:data_file.end, (px,py,pz)], dtype="=f8") + yield ("io", pxyz) def _read_particle_fields(self, chunks, ptf, selector): chunks = list(chunks) @@ -207,24 +191,20 @@ def _read_particle_fields(self, chunks, ptf, selector): data_files.update(obj.data_files) px, py, pz = self._position_fields p_fields = self._handle["/tracer particles"] - assert(len(data_files) == 1) - for data_file in sorted(data_files): - pcount = self._count_particles(data_file)["io"] + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end + # This should just be a single item for ptype, field_list in sorted(ptf.items()): - total = 0 - while total < pcount: - count = min(self._chunksize, pcount - total) - x = np.asarray(p_fields[total:total+count, px], dtype="=f8") - y = np.asarray(p_fields[total:total+count, py], dtype="=f8") - z = np.asarray(p_fields[total:total+count, pz], dtype="=f8") - total += count - mask = selector.select_points(x, y, z, 0.0) - del x, y, z - if mask is None: continue - for field in field_list: - fi = self._particle_fields[field] - data = p_fields[total-count:total, fi] - yield (ptype, field), data[mask] + x = np.asarray(p_fields[si:ei, px], dtype="=f8") + y = np.asarray(p_fields[si:ei, py], dtype="=f8") + z = np.asarray(p_fields[si:ei, pz], dtype="=f8") + mask = selector.select_points(x, y, z, 0.0) + del x, y, z + if mask is None: continue + for field in field_list: + fi = self._particle_fields[field] + data = p_fields[si:ei, fi] + yield (ptype, field), data[mask] def _initialize_index(self, data_file, regions): p_fields = self._handle["/tracer particles"] @@ -246,9 +226,15 @@ def _initialize_index(self, data_file, regions): ind += self._chunksize return morton + _pcount = None def _count_particles(self, data_file): - pcount = {"io": self._handle["/localnp"][:].sum()} - return pcount + if self._pcount is None: + self._pcount = self._handle["/localnp"][:].sum() + si, ei = data_file.start, data_file.end + pcount = self._pcount + if None not in (si, ei): + pcount = np.clip(pcount - si, 0, ei - si) + return {'io': pcount} def _identify_fields(self, data_file): fields = [("io", field) for field in self._particle_fields] diff --git a/yt/frontends/flash/tests/test_outputs.py b/yt/frontends/flash/tests/test_outputs.py index d6043012ee9..ebd288273d9 100644 --- a/yt/frontends/flash/tests/test_outputs.py +++ b/yt/frontends/flash/tests/test_outputs.py @@ -1,28 +1,14 @@ -""" -FLASH frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ assert_equal, \ requires_file, \ - units_override_check + units_override_check, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ requires_ds, \ small_patch_amr, \ data_dir_load, \ - sph_answer + nbody_answer from yt.frontends.flash.api import FLASHDataset, \ FLASHParticleDataset from collections import OrderedDict @@ -57,16 +43,22 @@ def test_FLASHDataset(): def test_units_override(): units_override_check(sloshing) +@requires_file(sloshing) +def test_mu(): + ds = data_dir_load(sloshing) + sp = ds.sphere("c", (0.1, "unitary")) + assert np.all(sp["gas","mean_molecular_weight"] == + ds.parameters["eos_singlespeciesa"]) + fid_1to3_b1 = "fiducial_1to3_b1/fiducial_1to3_b1_hdf5_part_0080" fid_1to3_b1_fields = OrderedDict( [ - (("deposit", "all_density"), None), - (("deposit", "all_count"), None), - (("deposit", "all_cic"), None), - (("deposit", "all_cic_velocity_x"), ("deposit", "all_cic")), - (("deposit", "all_cic_velocity_y"), ("deposit", "all_cic")), - (("deposit", "all_cic_velocity_z"), ("deposit", "all_cic")), + (("all", "particle_mass"), None), + (("all", "particle_ones"), None), + (("all", "particle_velocity_x"), ("all", "particle_mass")), + (("all", "particle_velocity_y"), ("all", "particle_mass")), + (("all", "particle_velocity_z"), ("all", "particle_mass")), ] ) @@ -75,6 +67,12 @@ def test_units_override(): def test_FLASHParticleDataset(): assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset) +@requires_file(fid_1to3_b1) +def test_FLASHParticleDataset_selection(): + ds = data_dir_load(fid_1to3_b1) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() + dens_turb_mag = 'DensTurbMag/DensTurbMag_hdf5_plt_cnt_0015' @requires_file(dens_turb_mag) @@ -93,6 +91,6 @@ def test_FLASH25_dataset(): @requires_ds(fid_1to3_b1, big_data=True) def test_fid_1to3_b1(): ds = data_dir_load(fid_1to3_b1) - for test in sph_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields): + for test in nbody_answer(ds, 'fiducial_1to3_b1_hdf5_part_0080', 6684119, fid_1to3_b1_fields): test_fid_1to3_b1.__name__ = test.description yield test diff --git a/yt/frontends/gadget/api.py b/yt/frontends/gadget/api.py index 9652d057a92..599324ae9ec 100644 --- a/yt/frontends/gadget/api.py +++ b/yt/frontends/gadget/api.py @@ -1,19 +1,3 @@ -""" -API for Gadget frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014-2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ GadgetDataset, \ GadgetHDF5Dataset, \ diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 850fbb5a981..40fae8eba75 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for Gadget frontend - - - - -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six import string_types from yt.funcs import only_on_root from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -24,9 +8,9 @@ from yt.data_objects.static_output import \ ParticleFile from yt.frontends.sph.data_structures import \ - SPHDataset -from yt.geometry.particle_geometry_handler import \ - ParticleIndex + SPHDataset, \ + SPHParticleIndex +from yt.utilities.chemical_formulas import default_mu from yt.utilities.cosmology import \ Cosmology from yt.utilities.fortran_utils import read_record @@ -41,9 +25,8 @@ from .fields import \ GadgetFieldInfo - def _fix_unit_ordering(unit): - if isinstance(unit[0], string_types): + if isinstance(unit[0], str): unit = unit[1], unit[0] return unit @@ -63,7 +46,7 @@ class GadgetBinaryHeader(object): def __init__(self, filename, header_spec): self.filename = filename - if isinstance(header_spec, string_types): + if isinstance(header_spec, str): header_spec = [header_spec] self.spec = [GadgetDataset._setup_binary_spec(hs, gadget_header_specs) for hs in header_spec] @@ -182,47 +165,81 @@ def validate(self): class GadgetBinaryFile(ParticleFile): - def __init__(self, ds, io, filename, file_id): - header = ds._header + + def __init__(self, ds, io, filename, file_id, range=None): + header = GadgetBinaryHeader(filename, ds._header.spec) self.header = header.value self._position_offset = header.position_offset with header.open() as f: self._file_size = f.seek(0, os.SEEK_END) - super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id) + super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id, range) - def _calculate_offsets(self, field_list): + def _calculate_offsets(self, field_list, pcounts): + # Note that we ignore pcounts here because it's the global count. We + # just want the local count, which we store here. self.field_offsets = self.io._calculate_field_offsets( - field_list, self.total_particles, - self._position_offset, self._file_size) + field_list, self.header['Npart'].copy(), self._position_offset, + self.start, self._file_size) +class GadgetBinaryIndex(SPHParticleIndex): + + def __init__(self, ds, dataset_type): + super(GadgetBinaryIndex, self).__init__(ds, dataset_type) + self._initialize_index() + + def _initialize_index(self): + # Normally this function is called during field detection. We call it + # here because we need to know which fields exist on-disk so that we can + # read in the smoothing lengths for SPH data before we construct the + # Morton bitmaps. + self._detect_output_fields() + super(GadgetBinaryIndex, self)._initialize_index() + + def _initialize_frontend_specific(self): + super(GadgetBinaryIndex, self)._initialize_frontend_specific() + self.io._float_type = self.ds._header.float_type class GadgetDataset(SPHDataset): - _index_class = ParticleIndex + _index_class = GadgetBinaryIndex _file_class = GadgetBinaryFile _field_info_class = GadgetFieldInfo _particle_mass_name = "Mass" _particle_coordinates_name = "Coordinates" _particle_velocity_name = "Velocities" + _sph_ptypes = ('Gas',) _suffix = "" def __init__(self, filename, dataset_type="gadget_binary", additional_fields=(), - unit_base=None, n_ref=64, - over_refine_factor=1, + unit_base=None, + index_order=None, + index_filename=None, + kdtree_filename=None, kernel_name=None, - index_ptype="all", - bounding_box=None, - header_spec="default", - field_spec="default", - ptype_spec="default", + bounding_box = None, + header_spec = "default", + field_spec = "default", + ptype_spec = "default", + long_ids = False, units_override=None, + mean_molecular_weight=None, + header_offset = 0, unit_system="cgs", use_dark_factor = False, w_0 = -1.0, w_a = 0.0): if self._instantiated: return + # Check if filename is a directory + if os.path.isdir(filename): + # Get the .0 snapshot file. We know there's only 1 and it's valid since we + # came through _is_valid in load() + for f in os.listdir(filename): + fname = os.path.join(filename, f) + if ('.0' in f) and ('.ewah' not in f) and os.path.isfile(fname): + filename = os.path.join(filename, f) + break self._header = GadgetBinaryHeader(filename, header_spec) header_size = self._header.size if header_size != [256]: @@ -241,14 +258,21 @@ def __init__(self, filename, dataset_type="gadget_binary", field_spec, gadget_field_specs) self._ptype_spec = self._setup_binary_spec( ptype_spec, gadget_ptype_specs) - self.index_ptype = index_ptype self.storage_filename = None + if long_ids: + self._id_dtype = 'u8' + else: + self._id_dtype = 'u4' + self.long_ids = long_ids + self.header_offset = header_offset if unit_base is not None and "UnitLength_in_cm" in unit_base: # We assume this is comoving, because in the absence of comoving # integration the redshift will be zero. unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"] self._unit_base = unit_base if bounding_box is not None: + # This ensures that we know a bounding box has been applied + self._domain_override = True bbox = np.array(bounding_box, dtype="float64") if bbox.shape == (2, 3): bbox = bbox.transpose() @@ -266,8 +290,11 @@ def __init__(self, filename, dataset_type="gadget_binary", self.w_a = w_a super(GadgetDataset, self).__init__( - filename, dataset_type=dataset_type, unit_system=unit_system, - n_ref=n_ref, over_refine_factor=over_refine_factor, + filename, dataset_type=dataset_type, + unit_system=unit_system, + index_order=index_order, + index_filename=index_filename, + kdtree_filename=kdtree_filename, kernel_name=kernel_name) if self.cosmological_simulation: self.time_unit.convert_to_units('s/h') @@ -277,10 +304,14 @@ def __init__(self, filename, dataset_type="gadget_binary", self.time_unit.convert_to_units('s') self.length_unit.convert_to_units('kpc') self.mass_unit.convert_to_units('Msun') + if mean_molecular_weight is None: + self.mu = default_mu + else: + self.mu = mean_molecular_weight @classmethod def _setup_binary_spec(cls, spec, spec_dict): - if isinstance(spec, string_types): + if isinstance(spec, str): _hs = () for hs in spec.split("+"): _hs += spec_dict[hs] @@ -305,11 +336,11 @@ def _parse_parameter_file(self): # Set standard values # We may have an overridden bounding box. - if self.domain_left_edge is None: - self.domain_left_edge = np.zeros(3, "float64") - self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"] - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + if self.domain_left_edge is None and hvals['BoxSize'] != 0: + self.domain_left_edge = np.zeros(3, "float64") + self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"] + + self.domain_dimensions = np.ones(3, "int32") self.periodicity = (True, True, True) self.cosmological_simulation = 1 @@ -468,32 +499,54 @@ def _is_valid(cls, *args, **kwargs): header_spec = kwargs['header_spec'] else: header_spec = 'default' - header = GadgetBinaryHeader(args[0], header_spec) + # Check to see if passed filename is a directory. If so, use it to get + # the .0 snapshot file. Make sure there's only one such file, otherwise + # there's an ambiguity about which file the user wants. Ignore ewah files + if os.path.isdir(args[0]): + valid_files = [] + for f in os.listdir(args[0]): + fname = os.path.join(args[0], f) + if ('.0' in f) and ('.ewah' not in f) and os.path.isfile(fname): + valid_files.append(f) + if len(valid_files) == 0: + return False + elif len(valid_files) > 1: + return False + else: + validated_file = os.path.join(args[0], valid_files[0]) + else: + validated_file = args[0] + header = GadgetBinaryHeader(validated_file, header_spec) return header.validate() + + class GadgetHDF5Dataset(GadgetDataset): _file_class = ParticleFile + _index_class = SPHParticleIndex _field_info_class = GadgetFieldInfo _particle_mass_name = "Masses" + _sph_ptypes = ('PartType0',) _suffix = ".hdf5" def __init__(self, filename, dataset_type="gadget_hdf5", - unit_base=None, n_ref=64, - over_refine_factor=1, + unit_base=None, + index_order=None, + index_filename=None, kernel_name=None, - index_ptype="all", bounding_box=None, units_override=None, unit_system="cgs"): self.storage_filename = None filename = os.path.abspath(filename) if units_override is not None: - raise RuntimeError("units_override is not supported for GadgetHDF5Dataset. " + - "Use unit_base instead.") + raise RuntimeError( + "units_override is not supported for GadgetHDF5Dataset. " + "Use unit_base instead.") super(GadgetHDF5Dataset, self).__init__( - filename, dataset_type, unit_base=unit_base, n_ref=n_ref, - over_refine_factor=over_refine_factor, index_ptype=index_ptype, + filename, dataset_type, unit_base=unit_base, + index_order=index_order, index_filename=index_filename, kernel_name=kernel_name, bounding_box=bounding_box, unit_system=unit_system) @@ -530,13 +583,12 @@ def _set_owls_eagle(self): self.omega_matter = self.parameters["Omega0"] self.hubble_constant = self.parameters["HubbleParam"] - if self.domain_left_edge is None: - self.domain_left_edge = np.zeros(3, "float64") - self.domain_right_edge = np.ones( - 3, "float64") * self.parameters["BoxSize"] + if self.domain_left_edge is None and self.parameters['BoxSize'] != 0: + self.domain_left_edge = np.zeros(3, "float64") + self.domain_right_edge = \ + np.ones(3, "float64") * self.parameters["BoxSize"] - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") self.cosmological_simulation = 1 self.periodicity = (True, True, True) @@ -581,4 +633,12 @@ def _is_valid(self, *args, **kwargs): except Exception: valid = False pass + + try: + fh = h5py.File(args[0], mode='r') + valid = fh["Header"].attrs["Code"].decode("utf-8") != "SWIFT" + fh.close() + except (IOError, KeyError, ImportError): + pass + return valid diff --git a/yt/frontends/gadget/definitions.py b/yt/frontends/gadget/definitions.py index b4f5e06cfd1..29ed55c7237 100644 --- a/yt/frontends/gadget/definitions.py +++ b/yt/frontends/gadget/definitions.py @@ -1,19 +1,3 @@ -""" -Gadget definitions - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - gadget_header_specs = dict( default = (('Npart', 6, 'i'), ('Massarr', 6, 'd'), diff --git a/yt/frontends/gadget/fields.py b/yt/frontends/gadget/fields.py index 4096324f77b..a07db06bcfe 100644 --- a/yt/frontends/gadget/fields.py +++ b/yt/frontends/gadget/fields.py @@ -1,22 +1,8 @@ -""" -Gadget-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.sph.fields import SPHFieldInfo -from yt.fields.particle_fields import add_volume_weighted_smoothed_field from yt.utilities.physical_constants import mp, kb +from yt.utilities.physical_ratios import \ + _primordial_mass_fraction + class GadgetFieldInfo(SPHFieldInfo): @@ -51,10 +37,10 @@ def _Fraction(field, data): return data[(ptype, 'FourMetalFractions')][:,i] return _Fraction - self.add_field( (ptype, metal_name+"_fraction"), - sampling_type="particle", - function=_Fraction_wrap(i), - units="") + self.add_field((ptype, metal_name+"_fraction"), + sampling_type="particle", + function=_Fraction_wrap(i), + units="") # add the metal density fields def _Density_wrap(i): @@ -63,16 +49,16 @@ def _Metal_density(field, data): data[(ptype, 'density')] return _Metal_density - self.add_field( (ptype, metal_name+"_density"), - sampling_type="particle", - function=_Density_wrap(i), - units=self.ds.unit_system["density"]) + self.add_field((ptype, metal_name+"_density"), + sampling_type="particle", + function=_Density_wrap(i), + units=self.ds.unit_system["density"]) def setup_gas_particle_fields(self, ptype): if (ptype, "ElectronAbundance") in self.ds.field_list: def _temperature(field, data): # Assume cosmic abundances - x_H = 0.76 + x_H = _primordial_mass_fraction["H"] gamma = 5.0/3.0 a_e = data[ptype, 'ElectronAbundance'] mu = 4.0 / (3.0 * x_H + 1.0 + 4.0 * x_H * a_e) @@ -80,30 +66,16 @@ def _temperature(field, data): return ret.in_units(self.ds.unit_system["temperature"]) else: def _temperature(field, data): - # Assume cosmic abundances - x_H = 0.76 gamma = 5.0/3.0 - if data.has_field_parameter("mean_molecular_weight"): - mu = data.get_field_parameter("mean_molecular_weight") - else: - # Assume zero ionization - mu = 4.0 / (3.0 * x_H + 1.0) - ret = data[ptype, "InternalEnergy"]*(gamma-1)*mu*mp/kb + ret = data[ptype, "InternalEnergy"]*(gamma-1)*data.ds.mu*mp/kb return ret.in_units(self.ds.unit_system["temperature"]) - self.add_field( - (ptype, "Temperature"), - sampling_type="particle", - function=_temperature, - units=self.ds.unit_system["temperature"]) + self.add_field((ptype, "Temperature"), + sampling_type="particle", + function=_temperature, + units=self.ds.unit_system["temperature"]) self.alias((ptype, 'temperature'), (ptype, 'Temperature')) - - # For now, we hardcode num_neighbors. We should make this configurable - # in the future. - num_neighbors = 64 - fn = add_volume_weighted_smoothed_field( - ptype, "particle_position", "particle_mass", "smoothing_length", - "density", "Temperature", self, num_neighbors) - - # Alias ("gas", "temperature") to the new smoothed Temperature field - self.alias(("gas", "temperature"), fn[0]) + # need to do this manually since that automatic aliasing that happens + # in the FieldInfoContainer base class has already happened at this + # point + self.alias(('gas', 'temperature'), (ptype, 'Temperature')) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 4e9054035ea..2b2ac4da678 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -1,28 +1,8 @@ -""" -Gadget data-file handling functions - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os -from yt.extern.six import string_types -from yt.utilities.io_handler import \ - BaseIOHandler -from yt.utilities.lib.geometry_utils import \ - compute_morton +from yt.frontends.sph.io import \ + IOHandlerSPH from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py @@ -31,7 +11,7 @@ SNAP_FORMAT_2_OFFSET -class IOHandlerGadgetHDF5(BaseIOHandler): +class IOHandlerGadgetHDF5(IOHandlerSPH): _dataset_type = "gadget_hdf5" _vector_fields = ("Coordinates", "Velocity", "Velocities") _known_ptypes = gadget_hdf5_ptypes @@ -59,95 +39,137 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files, key=lambda x: x.filename): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") # This double-reads for ptype, field_list in sorted(ptf.items()): if data_file.total_particles[ptype] == 0: continue - x = f["/%s/Coordinates" % ptype][:, 0].astype("float64") - y = f["/%s/Coordinates" % ptype][:, 1].astype("float64") - z = f["/%s/Coordinates" % ptype][:, 2].astype("float64") - yield ptype, (x, y, z) + c = f["/%s/Coordinates" % ptype][si:ei, :].astype("float64") + x, y, z = (np.squeeze(_) for _ in np.split(c, 3, axis=1)) + if ptype == self.ds._sph_ptypes[0]: + pdtype = c.dtype + pshape = c.shape + hsml = self._get_smoothing_length(data_file, pdtype, pshape) + else: + hsml = 0.0 + yield ptype, (x, y, z), hsml f.close() + def _yield_coordinates(self, data_file, needed_ptype=None): + si, ei = data_file.start, data_file.end + f = h5py.File(data_file.filename, "r") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + np.clip(pcount - si, 0, ei - si, out=pcount) + pcount = pcount.sum() + for key in f.keys(): + if not key.startswith("PartType"): + continue + if "Coordinates" not in f[key]: + continue + if needed_ptype and key != needed_ptype: + continue + ds = f[key]["Coordinates"][si:ei,...] + dt = ds.dtype.newbyteorder("N") # Native + pos = np.empty(ds.shape, dtype=dt) + pos[:] = ds + yield key, pos + f.close() + + def _get_smoothing_length(self, data_file, position_dtype, position_shape): + ptype = self.ds._sph_ptypes[0] + ind = int(ptype[-1]) + si, ei = data_file.start, data_file.end + with h5py.File(data_file.filename, "r") as f: + pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") + pcount = np.clip(pcount - si, 0, ei - si) + ds = f[ptype]["SmoothingLength"][si:ei,...] + dt = ds.dtype.newbyteorder("N") # Native + if position_dtype is not None and dt < position_dtype: + # Sometimes positions are stored in double precision + # but smoothing lengths are stored in single precision. + # In these cases upcast smoothing length to double precision + # to avoid ValueErrors when we pass these arrays to Cython. + dt = position_dtype + hsml = np.empty(ds.shape, dtype=dt) + hsml[:] = ds + return hsml + def _read_particle_fields(self, chunks, ptf, selector): # Now we have all the sizes, and we can allocate data_files = set([]) for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files, key=lambda x: x.filename): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") for ptype, field_list in sorted(ptf.items()): if data_file.total_particles[ptype] == 0: continue g = f["/%s" % ptype] - coords = g["Coordinates"][:].astype("float64") - mask = selector.select_points( - coords[:, 0], coords[:, 1], coords[:, 2], 0.0) - del coords + if getattr(selector, 'is_all_data', False): + mask = slice(None, None, None) + mask_sum = data_file.total_particles[ptype] + hsmls = None + else: + coords = g["Coordinates"][si:ei].astype("float64") + if ptype == 'PartType0': + hsmls = self._get_smoothing_length(data_file, + g["Coordinates"].dtype, + g["Coordinates"].shape).astype("float64") + else: + hsmls = 0.0 + mask = selector.select_points( + coords[:,0], coords[:,1], coords[:,2], hsmls) + if mask is not None: + mask_sum = mask.sum() + del coords if mask is None: continue for field in field_list: if field in ("Mass", "Masses") and \ ptype not in self.var_mass: - data = np.empty(mask.sum(), dtype="float64") + data = np.empty(mask_sum, dtype="float64") ind = self._known_ptypes.index(ptype) data[:] = self.ds["Massarr"][ind] - elif field in self._element_names: rfield = 'ElementAbundance/' + field - data = g[rfield][:][mask, ...] + data = g[rfield][si:ei][mask, ...] elif field.startswith("Metallicity_"): col = int(field.rsplit("_", 1)[-1]) - data = g["Metallicity"][:, col][mask] + data = g["Metallicity"][si:ei, col][mask] + elif field.startswith("GFM_Metals_"): + col = int(field.rsplit("_", 1)[-1]) + data = g["GFM_Metals"][si:ei, col][mask] elif field.startswith("Chemistry_"): col = int(field.rsplit("_", 1)[-1]) - data = g["ChemistryAbundances"][:, col][mask] + data = g["ChemistryAbundances"][si:ei, col][mask] + elif field == "smoothing_length": + # This is for frontends which do not store + # the smoothing length on-disk, so we do not + # attempt to read them, but instead assume + # that they are calculated in _get_smoothing_length. + if hsmls is None: + hsmls = self._get_smoothing_length(data_file, + g["Coordinates"].dtype, + g["Coordinates"].shape).astype("float64") + data = hsmls[mask] else: - data = g[field][:][mask, ...] + data = g[field][si:ei][mask, ...] yield (ptype, field), data f.close() - def _initialize_index(self, data_file, regions): - index_ptype = self.index_ptype - f = h5py.File(data_file.filename, mode="r") - if index_ptype == "all": - pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum() - keys = f.keys() - else: - pt = int(index_ptype[-1]) - pcount = f["/Header"].attrs["NumPart_ThisFile"][pt] - keys = [index_ptype] - morton = np.empty(pcount, dtype='uint64') - ind = 0 - for key in keys: - if not key.startswith("PartType"): - continue - if "Coordinates" not in f[key]: - continue - ds = f[key]["Coordinates"] - dt = ds.dtype.newbyteorder("N") # Native - pos = np.empty(ds.shape, dtype=dt) - pos[:] = ds - regions.add_data_file(pos, data_file.file_id, - data_file.ds.filter_bbox) - morton[ind:ind + pos.shape[0]] = compute_morton( - pos[:, 0], pos[:, 1], pos[:, 2], - data_file.ds.domain_left_edge, - data_file.ds.domain_right_edge, - data_file.ds.filter_bbox) - ind += pos.shape[0] - f.close() - return morton - def _count_particles(self, data_file): + si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") - pcount = f["/Header"].attrs["NumPart_ThisFile"][:] + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") f.close() + if None not in (si, ei): + np.clip(pcount - si, 0, ei - si, out=pcount) npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) return npart @@ -186,10 +208,10 @@ def _identify_fields(self, data_file): for j in gp.keys(): kk = j fields.append((ptype, str(kk))) - elif k == 'Metallicity' and len(g[k].shape) > 1: + elif k in ['Metallicity', 'GFM_Metals'] and len(g[k].shape) > 1: # Vector of metallicity for i in range(g[k].shape[1]): - fields.append((ptype, "Metallicity_%02i" % i)) + fields.append((ptype, "%s_%02i" % (k, i))) elif k == "ChemistryAbundances" and len(g[k].shape) > 1: for i in range(g[k].shape[1]): fields.append((ptype, "Chemistry_%03i" % i)) @@ -208,7 +230,7 @@ def _identify_fields(self, data_file): ZeroMass = object() -class IOHandlerGadgetBinary(BaseIOHandler): +class IOHandlerGadgetBinary(IOHandlerSPH): _dataset_type = "gadget_binary" _vector_fields = (("Coordinates", 3), ("Velocity", 3), @@ -263,16 +285,21 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): poff = data_file.field_offsets tp = data_file.total_particles f = open(data_file.filename, "rb") for ptype in ptf: - # This is where we could implement sub-chunking f.seek(poff[ptype, "Coordinates"], os.SEEK_SET) pos = self._read_field_from_file( f, tp[ptype], "Coordinates") - yield ptype, (pos[:, 0], pos[:, 1], pos[:, 2]) + if ptype == self.ds._sph_ptypes[0]: + f.seek(poff[ptype, "SmoothingLength"], os.SEEK_SET) + hsml = self._read_field_from_file( + f, tp[ptype], "SmoothingLength") + else: + hsml = 0.0 + yield ptype, (pos[:, 0], pos[:, 1], pos[:, 2]), hsml f.close() def _read_particle_fields(self, chunks, ptf, selector): @@ -280,22 +307,38 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): poff = data_file.field_offsets tp = data_file.total_particles f = open(data_file.filename, "rb") for ptype, field_list in sorted(ptf.items()): - f.seek(poff[ptype, "Coordinates"], os.SEEK_SET) - pos = self._read_field_from_file( - f, tp[ptype], "Coordinates") - mask = selector.select_points( - pos[:, 0], pos[:, 1], pos[:, 2], 0.0) - del pos + if tp[ptype] == 0: + continue + if getattr(selector, 'is_all_data', False): + mask = slice(None, None, None) + else: + f.seek(poff[ptype, "Coordinates"], os.SEEK_SET) + pos = self._read_field_from_file( + f, tp[ptype], "Coordinates") + if ptype == self.ds._sph_ptypes[0]: + f.seek(poff[ptype, "SmoothingLength"], os.SEEK_SET) + hsml = self._read_field_from_file( + f, tp[ptype], "SmoothingLength") + else: + hsml = 0.0 + mask = selector.select_points( + pos[:, 0], pos[:, 1], pos[:, 2], hsml) + del pos + del hsml if mask is None: continue for field in field_list: if field == "Mass" and ptype not in self.var_mass: - data = np.empty(mask.sum(), dtype="float64") + if getattr(selector, 'is_all_data', False): + size = data_file.total_particles[ptype] + else: + size = mask.sum() + data = np.empty(size, dtype="float64") m = self.ds.parameters["Massarr"][ self._ptypes.index(ptype)] data[:] = m @@ -311,7 +354,7 @@ def _read_field_from_file(self, f, count, name): if count == 0: return if name == "ParticleIDs": - dt = self._endian + "u4" + dt = self._endian + self.ds._id_dtype else: dt = self._endian + self._float_type dt = np.dtype(dt) @@ -327,48 +370,53 @@ def _read_field_from_file(self, f, count, name): arr = arr.reshape((count // factor, factor), order="C") return arr - def _get_morton_from_position(self, data_file, count, offset_count, - regions, DLE, DRE): + def _yield_coordinates(self, data_file, needed_ptype=None): + self._float_type = data_file.ds._header.float_type + self._field_size = np.dtype(self._float_type).itemsize with open(data_file.filename, "rb") as f: # We add on an additionally 4 for the first record. - f.seek(data_file._position_offset + 4 + offset_count * 12) - # The first total_particles * 3 values are positions - pp = np.fromfile(f, dtype=self._endian + self._float_type, - count=count * 3) - pp.shape = (count, 3) - pp = pp.astype(self._float_type) - regions.add_data_file(pp, data_file.file_id, - data_file.ds.filter_bbox) - morton = compute_morton(pp[:, 0], pp[:, 1], pp[:, 2], DLE, DRE, - data_file.ds.filter_bbox) - return morton - - def _initialize_index(self, data_file, regions): - DLE = data_file.ds.domain_left_edge - DRE = data_file.ds.domain_right_edge - self._float_type = data_file.ds._header.float_type - if self.index_ptype == "all": - count = sum(data_file.total_particles.values()) - return self._get_morton_from_position( - data_file, count, 0, regions, DLE, DRE) - else: - idpos = self._ptypes.index(self.index_ptype) - count = data_file.total_particles.get(self.index_ptype) - account = [0] + [data_file.total_particles.get(ptype) - for ptype in self._ptypes] - account = np.cumsum(account) - return self._get_morton_from_position( - data_file, account, account[idpos], regions, DLE, DRE) + f.seek(data_file._position_offset + 4) + for ptype, count in data_file.total_particles.items(): + if count == 0: + continue + if needed_ptype is not None and ptype != needed_ptype: + continue + # The first total_particles * 3 values are positions + pp = np.fromfile(f, dtype = self._float_type, count = count*3) + pp.shape = (count, 3) + yield ptype, pp + + def _get_smoothing_length(self, data_file, position_dtype, position_shape): + ret = self._get_field(data_file, 'SmoothingLength', 'Gas') + if position_dtype is not None and ret.dtype != position_dtype: + # Sometimes positions are stored in double precision + # but smoothing lengths are stored in single precision. + # In these cases upcast smoothing length to double precision + # to avoid ValueErrors when we pass these arrays to Cython. + ret = ret.astype(position_dtype) + return ret + + def _get_field(self, data_file, field, ptype): + poff = data_file.field_offsets + tp = data_file.total_particles + with open(data_file.filename, "rb") as f: + f.seek(poff[ptype, field], os.SEEK_SET) + pp = self._read_field_from_file( + f, tp[ptype], field) + return pp def _count_particles(self, data_file): - npart = dict((self._ptypes[i], v) - for i, v in enumerate(data_file.header["Npart"])) + si, ei = data_file.start, data_file.end + pcount = np.array(data_file.header["Npart"]) + if None not in (si, ei): + np.clip(pcount - si, 0, ei - si, out=pcount) + npart = dict((self._ptypes[i], v) for i, v in enumerate(pcount)) return npart # header is 256, but we have 4 at beginning and end for ints _field_size = 4 def _calculate_field_offsets(self, field_list, pcount, - offset, file_size=None): + offset, df_start, file_size=None): # field_list is (ftype, fname) but the blocks are ordered # (fname, ftype) in the file. if self._format == 2: @@ -378,9 +426,14 @@ def _calculate_field_offsets(self, field_list, pcount, pos = offset fs = self._field_size offsets = {} + pcount = dict(zip(self._ptypes, pcount)) for field in self._fields: - if not isinstance(field, string_types): + if field == "ParticleIDs" and self.ds.long_ids: + fs = 8 + else: + fs = 4 + if not isinstance(field, str): field = field[0] if not any((ptype, field) in field_list for ptype in self._ptypes): @@ -398,21 +451,30 @@ def _calculate_field_offsets(self, field_list, pcount, continue if (ptype, field) not in field_list: continue + start_offset = df_start * fs + if field in self._vector_fields: + start_offset *= self._vector_fields[field] + pos += start_offset offsets[(ptype, field)] = pos any_ptypes = True + remain_offset = (pcount[ptype] - df_start) * fs if field in self._vector_fields: - pos += self._vector_fields[field] * pcount[ptype] * fs - else: - pos += pcount[ptype] * fs + remain_offset *= self._vector_fields[field] + pos += remain_offset pos += 4 if not any_ptypes: pos -= 8 if file_size is not None: - if (file_size != pos) & (self._format == 1): # ignore the rest of format 2 + if (file_size != pos) & (self._format == 1): #ignore the rest of format 2 + diff = file_size - pos + possible = [] + for ptype, psize in sorted(pcount.items()): + if psize == 0: continue + if float(diff) / psize == int(float(diff)/psize): + possible.append(ptype) mylog.warning("Your Gadget-2 file may have extra " + - "columns or different precision!" + - " (%s file vs %s computed)", - file_size, pos) + "columns or different precision! " + + "(%s diff => %s?)", diff, possible) return offsets def _identify_fields(self, domain): diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 4b017447bbe..f2de24db014 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -1,33 +1,19 @@ -""" -GadgetSimulation class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013-2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import glob import os +from unyt import \ + dimensions, \ + unyt_array +from unyt.unit_registry import \ + UnitRegistry + from yt.convenience import \ load from yt.funcs import \ only_on_root from yt.data_objects.time_series import \ SimulationTimeSeries, DatasetSeries -from yt.units import dimensions -from yt.units.unit_registry import \ - UnitRegistry -from yt.units.yt_array import \ - YTArray from yt.utilities.cosmology import \ Cosmology from yt.utilities.exceptions import \ @@ -67,7 +53,7 @@ class GadgetSimulation(SimulationTimeSeries): >>> gs = yt.simulation("my_simulation.par", "Gadget") >>> gs.get_time_series() >>> for ds in gs: - ... print ds.current_time + ... print(ds.current_time) """ @@ -87,14 +73,19 @@ def _set_units(self): omega_matter=self.omega_matter, omega_lambda=self.omega_lambda, unit_registry=self.unit_registry) - self.unit_registry.modify("h", self.hubble_constant) + if 'h' in self.unit_registry: + self.unit_registry.modify('h', self.hubble_constant) + else: + self.unit_registry.add('h', self.hubble_constant, + dimensions.dimensionless) # Comoving lengths - for my_unit in ["m", "pc", "AU", "au"]: + for my_unit in ["m", "pc", "AU"]: new_unit = "%scm" % my_unit # technically not true, but should be ok self.unit_registry.add( new_unit, self.unit_registry.lut[my_unit][0], - dimensions.length, "\\rm{%s}/(1+z)" % my_unit) + dimensions.length, "\\rm{%s}/(1+z)" % my_unit, + prefixable=True) self.length_unit = self.quan(self.unit_base["UnitLength_in_cm"], "cmcm / h", registry=self.unit_registry) self.mass_unit = self.quan(self.unit_base["UnitMass_in_g"], @@ -198,7 +189,7 @@ def get_time_series(self, initial_time=None, final_time=None, >>> # An example using the setup_function keyword >>> def print_time(ds): - ... print ds.current_time + ... print(ds.current_time) >>> gs.get_time_series(setup_function=print_time) >>> for ds in gs: ... SlicePlot(ds, "x", "Density").save() @@ -236,7 +227,7 @@ def get_time_series(self, initial_time=None, final_time=None, initial_time = self.quan(initial_time, "code_time") elif isinstance(initial_time, tuple) and len(initial_time) == 2: initial_time = self.quan(*initial_time) - elif not isinstance(initial_time, YTArray): + elif not isinstance(initial_time, unyt_array): raise RuntimeError( "Error: initial_time must be given as a float or " + "tuple of (value, units).") @@ -250,7 +241,7 @@ def get_time_series(self, initial_time=None, final_time=None, final_time = self.quan(final_time, "code_time") elif isinstance(final_time, tuple) and len(final_time) == 2: final_time = self.quan(*final_time) - elif not isinstance(final_time, YTArray): + elif not isinstance(final_time, unyt_array): raise RuntimeError( "Error: final_time must be given as a float or " + "tuple of (value, units).") diff --git a/yt/frontends/gadget/tests/test_outputs.py b/yt/frontends/gadget/tests/test_outputs.py index 7ce03cfd540..d9007800ed9 100644 --- a/yt/frontends/gadget/tests/test_outputs.py +++ b/yt/frontends/gadget/tests/test_outputs.py @@ -1,19 +1,3 @@ -""" -Gadget frontend tests - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import OrderedDict from itertools import product import os @@ -21,7 +5,8 @@ import tempfile import yt -from yt.testing import requires_file +from yt.testing import requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ data_dir_load, \ requires_ds, \ @@ -34,6 +19,8 @@ BE_Gadget = "BigEndianGadgetBinary/BigEndianGadgetBinary" LE_SnapFormat2 = "Gadget3-snap-format2/Gadget3-snap-format2" keplerian_ring = "KeplerianRing/keplerian_ring_0020.hdf5" +snap_33 = "snapshot_033/snap_033.0.hdf5" +snap_33_dir = "snapshot_033/" # py2/py3 compat try: @@ -48,10 +35,6 @@ (("gas", "temperature"), None), (("gas", "temperature"), ('gas', 'density')), (('gas', 'velocity_magnitude'), None), - (("deposit", "all_density"), None), - (("deposit", "all_count"), None), - (("deposit", "all_cic"), None), - (("deposit", "PartType0_density"), None), ] ) iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]]) @@ -92,7 +75,7 @@ def test_gadget_hdf5(): @requires_file(keplerian_ring) def test_non_cosmo_dataset(): """ - Non-cosmological datasets may not have the cosmological parametrs in the + Non-cosmological datasets may not have the cosmological parameters in the Header. The code should fall back gracefully when they are not present, with the Redshift set to 0. """ @@ -119,8 +102,29 @@ def test_pid_uniqueness(): pid = ad['ParticleIDs'] assert len(pid) == len(set(pid.v)) +@requires_file(snap_33) +@requires_file(snap_33_dir) +def test_multifile_read(): + """ + Tests to make sure multi-file gadget snapshot can be loaded by passing '.0' file + or by passing the directory containing the multi-file snapshot. + """ + assert isinstance(data_dir_load(snap_33), GadgetDataset) + assert isinstance(data_dir_load(snap_33_dir), GadgetDataset) + +@requires_file(snap_33) +def test_particle_subselection(): + #This checks that we correctly subselect from a dataset, first by making + #sure we get all the particles, then by comparing manual selections against + #them. + ds = data_dir_load(snap_33) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() + + @requires_ds(BE_Gadget) def test_bigendian_field_access(): ds = data_dir_load(BE_Gadget) data = ds.all_data() data['Halo', 'Velocities'] + diff --git a/yt/frontends/gadget_fof/__init__.py b/yt/frontends/gadget_fof/__init__.py index 6a83f0ac731..04ebe5cd2d7 100644 --- a/yt/frontends/gadget_fof/__init__.py +++ b/yt/frontends/gadget_fof/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/gadget_fof/api.py b/yt/frontends/gadget_fof/api.py index 4cb139a263d..465953f811c 100644 --- a/yt/frontends/gadget_fof/api.py +++ b/yt/frontends/gadget_fof/api.py @@ -1,19 +1,3 @@ -""" -API for GadgetFOF frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ GadgetFOFParticleIndex, \ GadgetFOFHDF5File, \ diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 2c71103760a..a2bb1bd9fc3 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for GadgetFOF frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import defaultdict from functools import partial from yt.utilities.on_demand_imports import _h5py as h5py @@ -25,7 +9,7 @@ from yt.data_objects.data_containers import \ YTSelectionContainer from yt.data_objects.static_output import \ - Dataset + ParticleDataset from yt.frontends.gadget.data_structures import \ _fix_unit_ordering from yt.frontends.gadget_fof.fields import \ @@ -44,9 +28,6 @@ mylog class GadgetFOFParticleIndex(ParticleIndex): - def __init__(self, ds, dataset_type): - super(GadgetFOFParticleIndex, self).__init__(ds, dataset_type) - def _calculate_particle_count(self): """ Calculate the total number of each type of particle. @@ -110,14 +91,15 @@ def _detect_output_fields(self): ds.field_units.update(units) ds.particle_types_raw = ds.particle_types - def _setup_geometry(self): - super(GadgetFOFParticleIndex, self)._setup_geometry() + def _setup_data_io(self): + super(GadgetFOFParticleIndex, self)._setup_data_io() + self._setup_filenames() self._calculate_particle_count() self._calculate_particle_index_starts() self._calculate_file_offset_map() class GadgetFOFHDF5File(HaloCatalogFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, frange): with h5py.File(filename, mode="r") as f: self.header = \ dict((str(field), val) @@ -127,11 +109,9 @@ def __init__(self, ds, io, filename, file_id): self.group_subs_sum = f["Group/GroupNsubs"][()].sum() \ if "Group/GroupNsubs" in f else 0 self.total_ids = self.header["Nids_ThisFile"] - self.total_particles = \ - {"Group": self.header["Ngroups_ThisFile"], - "Subhalo": self.header["Nsubgroups_ThisFile"]} self.total_offset = 0 - super(GadgetFOFHDF5File, self).__init__(ds, io, filename, file_id) + super(GadgetFOFHDF5File, self).__init__( + ds, io, filename, file_id, frange) def _read_particle_positions(self, ptype, f=None): """ @@ -151,17 +131,14 @@ def _read_particle_positions(self, ptype, f=None): return pos -class GadgetFOFDataset(Dataset): +class GadgetFOFDataset(ParticleDataset): _index_class = GadgetFOFParticleIndex _file_class = GadgetFOFHDF5File _field_info_class = GadgetFOFFieldInfo def __init__(self, filename, dataset_type="gadget_fof_hdf5", - n_ref=16, over_refine_factor=1, index_ptype="all", + index_order=None, index_filename=None, unit_base=None, units_override=None, unit_system="cgs"): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor - self.index_ptype = index_ptype if unit_base is not None and "UnitLength_in_cm" in unit_base: # We assume this is comoving, because in the absence of comoving # integration the redshift will be zero. @@ -170,9 +147,10 @@ def __init__(self, filename, dataset_type="gadget_fof_hdf5", if units_override is not None: raise RuntimeError("units_override is not supported for GadgetFOFDataset. "+ "Use unit_base instead.") - super(GadgetFOFDataset, self).__init__(filename, dataset_type, - units_override=units_override, - unit_system=unit_system) + super(GadgetFOFDataset, self).__init__( + filename, dataset_type, units_override=units_override, + index_order=index_order, index_filename=index_filename, + unit_system=unit_system) def add_field(self, *args, **kwargs): super(GadgetFOFDataset, self).add_field(*args, **kwargs) @@ -212,8 +190,7 @@ def _parse_parameter_file(self): self.domain_left_edge = np.zeros(3, "float64") self.domain_right_edge = np.ones(3, "float64") * \ self.parameters["BoxSize"] - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") self.cosmological_simulation = 1 self.periodicity = (True, True, True) self.current_redshift = self.parameters["Redshift"] @@ -324,7 +301,7 @@ def _setup_geometry(self): ndoms = self.real_ds.file_count cls = self.real_ds._file_class self.data_files = \ - [cls(self.dataset, self.io, template % {'num':i}, i) + [cls(self.dataset, self.io, template % {'num':i}, i, None) for i in range(ndoms)] else: self.data_files = self.real_ds.index.data_files @@ -437,15 +414,17 @@ def _get_halo_values(self, ptype, identifiers, fields, return data -class GadgetFOFHaloDataset(Dataset): +class GadgetFOFHaloDataset(ParticleDataset): _index_class = GadgetFOFHaloParticleIndex _file_class = GadgetFOFHDF5File _field_info_class = GadgetFOFHaloFieldInfo def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): self.real_ds = ds - self.particle_types_raw = self.real_ds.particle_types_raw - self.particle_types = self.particle_types_raw + for attr in ['filename_template', 'file_count', + 'particle_types_raw', 'particle_types', + 'periodicity']: + setattr(self, attr, getattr(self.real_ds, attr)) super(GadgetFOFHaloDataset, self).__init__( self.real_ds.parameter_filename, dataset_type) @@ -531,22 +510,22 @@ class GagdetFOFHaloContainer(YTSelectionContainer): >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5") >>> >>> halo = ds.halo("Group", 0) - >>> print halo.mass + >>> print(halo.mass) 13256.5517578 code_mass - >>> print halo.position + >>> print(halo.position) [ 16.18603706 6.95965052 12.52694607] code_length - >>> print halo.velocity + >>> print(halo.velocity) [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s - >>> print halo["Group_R_Crit200"] + >>> print(halo["Group_R_Crit200"]) [ 0.79668683] code_length >>> >>> # particle ids for this halo - >>> print halo["member_ids"] + >>> print(halo["member_ids"]) [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless >>> >>> # get the first subhalo of this halo >>> subhalo = ds.halo("Subhalo", (0, 0)) - >>> print subhalo["member_ids"] + >>> print(subhalo["member_ids"]) [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless """ diff --git a/yt/frontends/gadget_fof/fields.py b/yt/frontends/gadget_fof/fields.py index d6c219a5e5a..9535a83e9e9 100644 --- a/yt/frontends/gadget_fof/fields.py +++ b/yt/frontends/gadget_fof/fields.py @@ -1,19 +1,3 @@ -""" -GadgetFOF-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index 2407183117f..213ac253343 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -1,19 +1,3 @@ -""" -GadgetFOF data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import defaultdict from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -43,7 +27,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): coords = data_file._get_particle_positions(ptype, f=f) @@ -54,6 +38,16 @@ def _read_particle_coords(self, chunks, ptf): z = coords[:, 2] yield ptype, (x, y, z) + def _yield_coordinates(self, data_file): + ptypes = self.ds.particle_types_raw + with h5py.File(data_file.filename, "r") as f: + for ptype in sorted(ptypes): + pcount = data_file.total_particles[ptype] + if pcount == 0: continue + coords = f[ptype]["%sPos" % ptype][()].astype("float64") + coords = np.resize(coords, (pcount, 3)) + yield ptype, coords + def _read_offset_particle_field(self, field, data_file, fh): field_data = np.empty(data_file.total_particles["Group"], dtype="float64") fofindex = np.arange(data_file.total_particles["Group"]) + \ @@ -78,7 +72,8 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): pcount = data_file.total_particles[ptype] @@ -110,7 +105,7 @@ def _read_particle_fields(self, chunks, ptf, selector): if my_div > 1: findex = int(field[field.rfind("_") + 1:]) field_data = field_data[:, findex] - data = field_data[mask] + data = field_data[si:ei][mask] yield (ptype, field), data def _initialize_index(self, data_file, regions): @@ -151,7 +146,14 @@ def _initialize_index(self, data_file, regions): return morton def _count_particles(self, data_file): - return data_file.total_particles + si, ei = data_file.start, data_file.end + pcount = \ + {"Group": data_file.header["Ngroups_ThisFile"], + "Subhalo": data_file.header["Nsubgroups_ThisFile"]} + if None not in (si, ei): + for ptype in pcount: + pcount[ptype] = np.clip(pcount[ptype]-si, 0, ei-si) + return pcount def _identify_fields(self, data_file): fields = [] diff --git a/yt/frontends/gadget_fof/tests/test_outputs.py b/yt/frontends/gadget_fof/tests/test_outputs.py index 376fa9ae7a4..8d82072f068 100644 --- a/yt/frontends/gadget_fof/tests/test_outputs.py +++ b/yt/frontends/gadget_fof/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -GadgetFOF frontend tests using gadget_fof datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.frontends.gadget_fof.api import \ @@ -20,7 +5,8 @@ from yt.testing import \ requires_file, \ assert_equal, \ - assert_array_equal + assert_array_equal, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ FieldValuesTest, \ requires_ds, \ @@ -53,6 +39,11 @@ def test_GadgetFOFDataset(): # fof/subhalo catalog with member particles g298 = "gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5" +@requires_file(g298) +def test_particle_selection(): + ds = data_dir_load(g298) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() @requires_file(g298) def test_subhalos(): diff --git a/yt/frontends/gamer/__init__.py b/yt/frontends/gamer/__init__.py index c74ffcde544..6761e2147af 100644 --- a/yt/frontends/gamer/__init__.py +++ b/yt/frontends/gamer/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/gamer/api.py b/yt/frontends/gamer/api.py index c64c0831155..8c0724c80e5 100644 --- a/yt/frontends/gamer/api.py +++ b/yt/frontends/gamer/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.gamer - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ GAMERGrid, \ GAMERHierarchy, \ diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index b4917ee1834..1a9ecf7078b 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -1,19 +1,3 @@ -from __future__ import division -""" -GAMER-specific data structures - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import stat import numpy as np diff --git a/yt/frontends/gamer/fields.py b/yt/frontends/gamer/fields.py index d88caac0df1..be330231f08 100644 --- a/yt/frontends/gamer/fields.py +++ b/yt/frontends/gamer/fields.py @@ -1,20 +1,5 @@ -""" -GAMER-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import FieldInfoContainer -from yt.utilities.physical_constants import mh, boltzmann_constant_cgs +from yt.utilities.physical_constants import mh, kb b_units = "code_magnetic" pre_units = "code_mass / (code_length*code_time**2)" @@ -76,9 +61,10 @@ def _velocity(field, data): return data["gas", "momentum_%s"%v] / data["gas","density"] return _velocity for v in "xyz": - self.add_field( ("gas","velocity_%s"%v), sampling_type="cell", - function = velocity_xyz(v), - units = unit_system["velocity"] ) + self.add_field(("gas","velocity_%s"%v), + sampling_type="cell", + function = velocity_xyz(v), + units = unit_system["velocity"] ) # ============================================================================ # note that yt internal fields assume @@ -107,31 +93,45 @@ def et(data): # thermal energy per mass (i.e., specific) def _thermal_energy(field, data): return et(data) / data["gamer","Dens"] - self.add_field( ("gas","thermal_energy"), sampling_type="cell", - function = _thermal_energy, - units = unit_system["specific_energy"] ) + self.add_field(("gas","thermal_energy"), + sampling_type="cell", + function = _thermal_energy, + units = unit_system["specific_energy"] ) # total energy per mass def _total_energy(field, data): return data["gamer","Engy"] / data["gamer","Dens"] - self.add_field( ("gas","total_energy"), sampling_type="cell", - function = _total_energy, - units = unit_system["specific_energy"] ) + self.add_field(("gas","total_energy"), + sampling_type="cell", + function = _total_energy, + units = unit_system["specific_energy"] ) # pressure def _pressure(field, data): return et(data)*(data.ds.gamma-1.0) - self.add_field( ("gas","pressure"), sampling_type="cell", - function = _pressure, - units = unit_system["pressure"] ) + self.add_field(("gas","pressure"), + sampling_type="cell", + function = _pressure, + units = unit_system["pressure"] ) + + # mean molecular weight + if hasattr(self.ds, "mu"): + def _mu(field, data): + return data.ds.mu*data["index", "ones"] + + self.add_field(("gas", "mean_molecular_weight"), + sampling_type="cell", + function=_mu, + units="") # temperature def _temperature(field, data): - return data.ds.mu*mh*data["gas","pressure"] / \ - (data["gas","density"]*boltzmann_constant_cgs) - self.add_field( ("gas","temperature"), sampling_type="cell", - function = _temperature, - units = unit_system["temperature"] ) + return data.ds.mu*data["gas","pressure"]*mh / \ + (data["gas","density"]*kb) + self.add_field(("gas","temperature"), + sampling_type="cell", + function = _temperature, + units = unit_system["temperature"] ) # magnetic field aliases --> magnetic_field_x/y/z if self.ds.mhd: diff --git a/yt/frontends/gamer/io.py b/yt/frontends/gamer/io.py index 508ee3c239a..e2b6a9f218e 100644 --- a/yt/frontends/gamer/io.py +++ b/yt/frontends/gamer/io.py @@ -1,19 +1,3 @@ -from __future__ import division -""" -GAMER-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from itertools import groupby diff --git a/yt/frontends/gamer/tests/test_outputs.py b/yt/frontends/gamer/tests/test_outputs.py index 2223f0ed425..e28e8f6a0db 100644 --- a/yt/frontends/gamer/tests/test_outputs.py +++ b/yt/frontends/gamer/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -GAMER frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ diff --git a/yt/frontends/gdf/api.py b/yt/frontends/gdf/api.py index feee39e6e25..99b4205d6f4 100644 --- a/yt/frontends/gdf/api.py +++ b/yt/frontends/gdf/api.py @@ -1,17 +1,3 @@ -""" -API for yt.frontends.gdf - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from .data_structures import \ GDFGrid, \ GDFHierarchy, \ diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index e8a6ef69b5e..9fb6b3a8614 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -1,23 +1,7 @@ -""" -Data structures for GDF. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np import weakref import os -from yt.extern.six import string_types from yt.funcs import \ ensure_tuple, \ just_one, \ @@ -127,7 +111,7 @@ def _parse_index(self): self.dataset.domain_dimensions dx[active_dims] /= self.dataset.refine_by ** levels[i] dxs.append(dx.in_units("code_length")) - dx = self.dataset.arr(dxs, input_units="code_length") + dx = self.dataset.arr(dxs, units="code_length") self.grid_left_edge = self.dataset.domain_left_edge + dx * glis self.grid_dimensions = gdims.astype("int32") self.grid_right_edge = self.grid_left_edge + dx * self.grid_dimensions @@ -204,7 +188,7 @@ def _set_code_unit_attributes(self): self.field_units[field_name] = just_one(field_conv) elif 'field_units' in current_field.attrs: field_units = current_field.attrs['field_units'] - if isinstance(field_units, string_types): + if isinstance(field_units, str): current_field_units = current_field.attrs['field_units'] else: current_field_units = \ @@ -228,7 +212,7 @@ def _set_code_unit_attributes(self): # assign CGS units. setdefaultattr will catch code units # which have already been set via units_override. un = unit_name[:-5] - un = un.replace('magnetic', 'magnetic_field', 1) + un = un.replace('magnetic', 'magnetic_field_cgs', 1) unit = unit_system_registry["cgs"][un] setdefaultattr(self, unit_name, self.quan(value, unit)) setdefaultattr(self, unit_name, self.quan(value, unit)) diff --git a/yt/frontends/gdf/definitions.py b/yt/frontends/gdf/definitions.py index eed306ed5b7..307b63a9616 100644 --- a/yt/frontends/gdf/definitions.py +++ b/yt/frontends/gdf/definitions.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/gdf/fields.py b/yt/frontends/gdf/fields.py index edf035ec4ee..de065bd4605 100644 --- a/yt/frontends/gdf/fields.py +++ b/yt/frontends/gdf/fields.py @@ -1,18 +1,3 @@ -""" -GDF-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/gdf/io.py b/yt/frontends/gdf/io.py index de5b58ed88c..7912d3be992 100644 --- a/yt/frontends/gdf/io.py +++ b/yt/frontends/gdf/io.py @@ -1,18 +1,3 @@ -""" -The data-file handling functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.on_demand_imports import _h5py as h5py from yt.funcs import \ diff --git a/yt/frontends/gdf/tests/test_outputs.py b/yt/frontends/gdf/tests/test_outputs.py index 36aa64a83cd..649ca9e08ab 100644 --- a/yt/frontends/gdf/tests/test_outputs.py +++ b/yt/frontends/gdf/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -GDF frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ diff --git a/yt/frontends/gizmo/api.py b/yt/frontends/gizmo/api.py index 50eb5799362..1c2f1becf02 100644 --- a/yt/frontends/gizmo/api.py +++ b/yt/frontends/gizmo/api.py @@ -1,19 +1,3 @@ -""" -API for Gizmo frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ GizmoDataset diff --git a/yt/frontends/gizmo/data_structures.py b/yt/frontends/gizmo/data_structures.py index cb96bb6c4ee..d4f4a22f59a 100644 --- a/yt/frontends/gizmo/data_structures.py +++ b/yt/frontends/gizmo/data_structures.py @@ -1,18 +1,4 @@ -""" -Data structures for Gizmo frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +import os from yt.utilities.on_demand_imports import _h5py as h5py @@ -22,6 +8,7 @@ from .fields import \ GizmoFieldInfo + class GizmoDataset(GadgetHDF5Dataset): _field_info_class = GizmoFieldInfo @@ -30,8 +17,22 @@ def _is_valid(self, *args, **kwargs): need_groups = ['Header'] veto_groups = ['FOF', 'Group', 'Subhalo'] valid = True + valid_fname = args[0] + # If passed arg is a directory, look for the .0 file in that dir + if os.path.isdir(args[0]): + valid_files = [] + for f in os.listdir(args[0]): + fname = os.path.join(args[0], f) + if ('.0' in f) and ('.ewah' not in f) and os.path.isfile(fname): + valid_files.append(fname) + if len(valid_files) == 0: + valid = False + elif len(valid_files) > 1: + valid = False + else: + valid_fname = valid_files[0] try: - fh = h5py.File(args[0], mode='r') + fh = h5py.File(valid_fname, mode='r') valid = all(ng in fh["/"] for ng in need_groups) and \ not any(vg in fh["/"] for vg in veto_groups) dmetal = "/PartType0/Metallicity" @@ -42,3 +43,7 @@ def _is_valid(self, *args, **kwargs): valid = False pass return valid + + def _set_code_unit_attributes(self): + super(GizmoDataset, self)._set_code_unit_attributes() + self.magnetic_unit = self.quan(1.0, "gauss") diff --git a/yt/frontends/gizmo/fields.py b/yt/frontends/gizmo/fields.py index 3adeac5e4d2..cb9d23b2c97 100644 --- a/yt/frontends/gizmo/fields.py +++ b/yt/frontends/gizmo/fields.py @@ -1,25 +1,7 @@ -""" -Gizmo-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.fields.magnetic_field import \ setup_magnetic_field_aliases -from yt.fields.particle_fields import \ - add_volume_weighted_smoothed_field from yt.fields.species_fields import \ add_species_field_by_density, \ setup_species_fields @@ -74,7 +56,7 @@ def __init__(self, *args, **kwargs): super(SPHFieldInfo, self).__init__(*args, **kwargs) if ("PartType0", "Metallicity_00") in self.field_list: self.nuclei_names = metal_elements - self.species_names = ["H", "H_p1"] + metal_elements + self.species_names = ["H_p0", "H_p1"] + metal_elements def setup_particle_fields(self, ptype): FieldInfoContainer.setup_particle_fields(self, ptype) @@ -87,20 +69,17 @@ def setup_particle_fields(self, ptype): def setup_gas_particle_fields(self, ptype): super(GizmoFieldInfo, self).setup_gas_particle_fields(ptype) - def _h_density(field, data): + def _h_p0_density(field, data): x_H = 1.0 - data[(ptype, "He_metallicity")] - \ data[(ptype, "metallicity")] return x_H * data[(ptype, "density")] * \ data[(ptype, "NeutralHydrogenAbundance")] - self.add_field( - (ptype, "H_density"), - sampling_type="particle", - function=_h_density, - units=self.ds.unit_system["density"]) - add_species_field_by_density(self, ptype, "H", particle_type=True) - for suffix in ["density", "fraction", "mass", "number_density"]: - self.alias((ptype, "H_p0_%s" % suffix), (ptype, "H_%s" % suffix)) + self.add_field((ptype, "H_p0_density"), + sampling_type="particle", + function=_h_p0_density, + units=self.ds.unit_system["density"]) + add_species_field_by_density(self, ptype, "H") def _h_p1_density(field, data): x_H = 1.0 - data[(ptype, "He_metallicity")] - \ @@ -108,48 +87,54 @@ def _h_p1_density(field, data): return x_H * data[(ptype, "density")] * \ (1.0 - data[(ptype, "NeutralHydrogenAbundance")]) - self.add_field( - (ptype, "H_p1_density"), - sampling_type="particle", - function=_h_p1_density, - units=self.ds.unit_system["density"]) - add_species_field_by_density(self, ptype, "H_p1", particle_type=True) + self.add_field((ptype, "H_p1_density"), + sampling_type="particle", + function=_h_p1_density, + units=self.ds.unit_system["density"]) + add_species_field_by_density(self, ptype, "H_p1") def _nuclei_mass_density_field(field, data): species = field.name[1][:field.name[1].find("_")] return data[ptype, "density"] * \ data[ptype, "%s_metallicity" % species] - num_neighbors = 64 for species in ['H', 'H_p0', 'H_p1']: for suf in ["_density", "_number_density"]: field = "%s%s" % (species, suf) - fn = add_volume_weighted_smoothed_field( - ptype, "particle_position", "particle_mass", - "smoothing_length", "density", field, - self, num_neighbors) - self.alias(("gas", field), fn[0]) + self.alias(("gas", field), (ptype, field)) + + if (ptype, "ElectronAbundance") in self.field_list: + def _el_number_density(field, data): + return data[ptype, "ElectronAbundance"] * \ + data[ptype, "H_number_density"] + self.add_field((ptype, "El_number_density"), + sampling_type="particle", + function=_el_number_density, + units=self.ds.unit_system["number_density"]) + self.alias(("gas", "El_number_density"), (ptype, "El_number_density")) for species in self.nuclei_names: - self.add_field( - (ptype, "%s_nuclei_mass_density" % species), - sampling_type="particle", - function=_nuclei_mass_density_field, - units=self.ds.unit_system["density"]) + self.add_field((ptype, "%s_nuclei_mass_density" % species), + sampling_type="particle", + function=_nuclei_mass_density_field, + units=self.ds.unit_system["density"]) for suf in ["_nuclei_mass_density", "_metallicity"]: field = "%s%s" % (species, suf) - fn = add_volume_weighted_smoothed_field( - ptype, "particle_position", "particle_mass", - "smoothing_length", "density", field, - self, num_neighbors) + self.alias(("gas", field), (ptype, field)) - self.alias(("gas", field), fn[0]) + def _metal_density_field(field, data): + return data[ptype, "metallicity"] * data[ptype, "density"] + self.add_field((ptype, "metal_density"), + sampling_type="local", + function=_metal_density_field, + units=self.ds.unit_system["density"]) + self.alias(("gas", "metal_density"), (ptype, "metal_density")) magnetic_field = "MagneticField" if (ptype, magnetic_field) in self.field_list: setup_magnetic_field_aliases( - self, ptype, magnetic_field, ftype=ptype + self, ptype, magnetic_field ) def setup_star_particle_fields(self, ptype): diff --git a/yt/frontends/gizmo/tests/test_outputs.py b/yt/frontends/gizmo/tests/test_outputs.py index 84518302a52..c75fcbc7270 100644 --- a/yt/frontends/gizmo/tests/test_outputs.py +++ b/yt/frontends/gizmo/tests/test_outputs.py @@ -1,19 +1,3 @@ -""" -Gizmo frontend tests - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import OrderedDict import yt @@ -34,9 +18,6 @@ (("gas", "metallicity"), ('gas', 'density')), (("gas", "O_metallicity"), ('gas', 'density')), (('gas', 'velocity_magnitude'), None), - (("deposit", "all_count"), None), - (("deposit", "all_cic"), None), - (("deposit", "PartType0_density"), None), ] ) @@ -105,6 +86,7 @@ def test_gas_particle_fields(): for species in metal_elements: for suffix in ["nuclei_mass_density", "metallicity"]: derived_fields += ["%s_%s" % (species, suffix)] + derived_fields += ["magnetic_field_%s" % axis for axis in "xyz"] for field in derived_fields: assert (ptype, field) in ds.derived_field_list diff --git a/yt/frontends/halo_catalog/__init__.py b/yt/frontends/halo_catalog/__init__.py index 6a83f0ac731..04ebe5cd2d7 100644 --- a/yt/frontends/halo_catalog/__init__.py +++ b/yt/frontends/halo_catalog/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/halo_catalog/api.py b/yt/frontends/halo_catalog/api.py index 38e0c921623..e647bc11327 100644 --- a/yt/frontends/halo_catalog/api.py +++ b/yt/frontends/halo_catalog/api.py @@ -1,19 +1,3 @@ -""" -API for HaloCatalog frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ HaloCatalogDataset diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 7d8a4f64297..d8dac77e752 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for HaloCatalog frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np import glob @@ -45,9 +29,9 @@ def _setup_filenames(self): self.dataset.parameter_filename, 0)] class HaloCatalogFile(ParticleFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range): super(HaloCatalogFile, self).__init__( - ds, io, filename, file_id) + ds, io, filename, file_id, range) def _read_particle_positions(self, ptype, f=None): raise NotImplementedError @@ -61,6 +45,10 @@ def _get_particle_positions(self, ptype, f=None): dle = self.ds.domain_left_edge.to('code_length').v dw = self.ds.domain_width.to('code_length').v pos = self._read_particle_positions(ptype, f=f) + si, ei = self.start, self.end + if None not in (si, ei): + pos = pos[si:ei] + np.subtract(pos, dle, out=pos) np.mod(pos, dw, out=pos) np.add(pos, dle, out=pos) @@ -68,12 +56,12 @@ def _get_particle_positions(self, ptype, f=None): return pos class HaloCatalogHDF5File(HaloCatalogFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range): with h5py.File(filename, mode="r") as f: self.header = dict((field, parse_h5_attr(f, field)) \ for field in f.attrs.keys()) super(HaloCatalogHDF5File, self).__init__( - ds, io, filename, file_id) + ds, io, filename, file_id, range) def _read_particle_positions(self, ptype, f=None): """ @@ -107,10 +95,7 @@ class HaloCatalogDataset(SavedDataset): "domain_left_edge", "domain_right_edge") def __init__(self, filename, dataset_type="halocatalog_hdf5", - n_ref = 16, over_refine_factor = 1, units_override=None, - unit_system="cgs"): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor + units_override=None, unit_system="cgs"): super(HaloCatalogDataset, self).__init__(filename, dataset_type, units_override=units_override, unit_system=unit_system) @@ -118,8 +103,7 @@ def __init__(self, filename, dataset_type="halocatalog_hdf5", def _parse_parameter_file(self): self.refine_by = 2 self.dimensionality = 3 - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(self.dimensionality, "int32") * nz + self.domain_dimensions = np.ones(self.dimensionality, "int32") self.periodicity = (True, True, True) prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2]) self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix) diff --git a/yt/frontends/halo_catalog/fields.py b/yt/frontends/halo_catalog/fields.py index e54f7437e11..d25dc8f44cc 100644 --- a/yt/frontends/halo_catalog/fields.py +++ b/yt/frontends/halo_catalog/fields.py @@ -1,19 +1,3 @@ -""" -HaloCatalog-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 844972f25fa..d16d5a1c703 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -1,30 +1,14 @@ -""" -HaloCatalog data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np -from yt.utilities.exceptions import YTDomainOverflow from yt.funcs import \ mylog, \ parse_h5_attr - +from yt.units.yt_array import \ + uvstack +from yt.utilities.on_demand_imports import _h5py as h5py +from yt.utilities.exceptions import YTDomainOverflow from yt.utilities.io_handler import \ BaseIOHandler - from yt.utilities.lib.geometry_utils import compute_morton @@ -46,7 +30,7 @@ def _read_particle_coords(self, chunks, ptf): for obj in chunk.objs: data_files.update(obj.data_files) pn = "particle_position_%s" - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: units = parse_h5_attr(f[pn % "x"], "units") pos = data_file._get_particle_positions(ptype, f=f) @@ -54,6 +38,16 @@ def _read_particle_coords(self, chunks, ptf): for i in range(3)) yield "halos", (x, y, z) + def _yield_coordinates(self, data_file): + pn = "particle_position_%s" + with h5py.File(data_file.filename, 'r') as f: + units = parse_h5_attr(f[pn % "x"], "units") + x, y, z = (self.ds.arr(f[pn % ax].value.astype("float64"), units) + for ax in 'xyz') + pos = uvstack([x, y, z]).T + pos.convert_to_units('code_length') + yield 'halos', pos + def _read_particle_fields(self, chunks, ptf, selector): # Now we have all the sizes, and we can allocate chunks = list(chunks) @@ -65,7 +59,8 @@ def _read_particle_fields(self, chunks, ptf, selector): for obj in chunk.objs: data_files.update(obj.data_files) pn = "particle_position_%s" - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): units = parse_h5_attr(f[pn % "x"], "units") @@ -76,7 +71,7 @@ def _read_particle_fields(self, chunks, ptf, selector): del x, y, z if mask is None: continue for field in field_list: - data = f[field][mask].astype("float64") + data = f[field][si:ei][mask].astype("float64") yield (ptype, field), data def _initialize_index(self, data_file, regions): @@ -105,7 +100,11 @@ def _initialize_index(self, data_file, regions): return morton def _count_particles(self, data_file): - return {'halos': data_file.header['num_halos']} + si, ei = data_file.start, data_file.end + nhalos = data_file.header['num_halos'] + if None not in (si, ei): + nhalos = np.clip(nhalos - si, 0, ei - si) + return {'halos': nhalos} def _identify_fields(self, data_file): with h5py.File(data_file.filename, mode="r") as f: diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 4c9c2a8a93e..3562812f0f2 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -halo_catalog frontend tests - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. All rights reserved. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.convenience import \ diff --git a/yt/frontends/http_stream/api.py b/yt/frontends/http_stream/api.py index 865965b0cd9..554845f7414 100644 --- a/yt/frontends/http_stream/api.py +++ b/yt/frontends/http_stream/api.py @@ -1,19 +1,3 @@ -""" -API for HTTPStream frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ HTTPStreamDataset diff --git a/yt/frontends/http_stream/data_structures.py b/yt/frontends/http_stream/data_structures.py index 70ac8c64e47..3eff4bc0cba 100644 --- a/yt/frontends/http_stream/data_structures.py +++ b/yt/frontends/http_stream/data_structures.py @@ -1,20 +1,3 @@ -""" -Data structures for HTTPStream frontend. - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import json import numpy as np import time @@ -43,14 +26,14 @@ class HTTPStreamDataset(ParticleDataset): def __init__(self, base_url, dataset_type="http_particle_stream", unit_system="cgs", - n_ref=64, over_refine_factor=1): + index_order=None, index_filename=None): if get_requests() is None: raise ImportError( "This functionality depends on the requests package") self.base_url = base_url super(HTTPStreamDataset, self).__init__( "", dataset_type=dataset_type, unit_system=unit_system, - n_ref=n_ref, over_refine_factor=over_refine_factor) + index_order=index_order, index_filename=index_filename) def __repr__(self): return self.base_url @@ -73,8 +56,7 @@ def _parse_parameter_file(self): # Now we get what we need self.domain_left_edge = np.array(header['domain_left_edge'], "float64") self.domain_right_edge = np.array(header['domain_right_edge'], "float64") - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") self.periodicity = (True, True, True) self.current_time = header['current_time'] diff --git a/yt/frontends/http_stream/io.py b/yt/frontends/http_stream/io.py index 7a409206e6e..8653d693cf6 100644 --- a/yt/frontends/http_stream/io.py +++ b/yt/frontends/http_stream/io.py @@ -1,20 +1,3 @@ -""" -HTTPStream data-file handling function - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import \ @@ -63,7 +46,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): for ptype in ptf: s = self._open_stream(data_file, (ptype, "Coordinates")) c = np.frombuffer(s, dtype="float64") @@ -76,7 +59,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): for ptype, field_list in sorted(ptf.items()): s = self._open_stream(data_file, (ptype, "Coordinates")) c = np.frombuffer(s, dtype="float64") diff --git a/yt/frontends/moab/__init__.py b/yt/frontends/moab/__init__.py index 00bd2a29881..fcc280c5b40 100644 --- a/yt/frontends/moab/__init__.py +++ b/yt/frontends/moab/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/moab/api.py b/yt/frontends/moab/api.py index b84afacde42..eaf37635ec4 100644 --- a/yt/frontends/moab/api.py +++ b/yt/frontends/moab/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.moab - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ MoabHex8Mesh, \ MoabHex8Hierarchy, \ diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 0fbe3aff312..59b455d7ff7 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for MOAB Hex8. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import os import numpy as np diff --git a/yt/frontends/moab/definitions.py b/yt/frontends/moab/definitions.py index 626b4ba16b7..8b137891791 100644 --- a/yt/frontends/moab/definitions.py +++ b/yt/frontends/moab/definitions.py @@ -1,16 +1 @@ -""" -Various definitions for various other modules and routines - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/moab/fields.py b/yt/frontends/moab/fields.py index d39c1ddad0e..2a3190fc731 100644 --- a/yt/frontends/moab/fields.py +++ b/yt/frontends/moab/fields.py @@ -1,16 +1,3 @@ -"""MOAB-specific fields - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/moab/io.py b/yt/frontends/moab/io.py index 370128dc265..3303b7fe851 100644 --- a/yt/frontends/moab/io.py +++ b/yt/frontends/moab/io.py @@ -1,16 +1,3 @@ -"""MOAB-specific fields - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog from yt.utilities.io_handler import BaseIOHandler diff --git a/yt/frontends/moab/tests/test_c5.py b/yt/frontends/moab/tests/test_c5.py index 3d3ff0a61dc..1c9d0b674a2 100644 --- a/yt/frontends/moab/tests/test_c5.py +++ b/yt/frontends/moab/tests/test_c5.py @@ -1,19 +1,3 @@ -""" -Tests of semi-structured meshes in MoabHex8 format. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ diff --git a/yt/frontends/open_pmd/__init__.py b/yt/frontends/open_pmd/__init__.py index cf28545aaed..2c656b643d4 100644 --- a/yt/frontends/open_pmd/__init__.py +++ b/yt/frontends/open_pmd/__init__.py @@ -5,11 +5,3 @@ """ -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# Copyright (c) 2015, Daniel Grassinger (HZDR) -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- diff --git a/yt/frontends/open_pmd/api.py b/yt/frontends/open_pmd/api.py index eefe577e764..cfb0979e774 100644 --- a/yt/frontends/open_pmd/api.py +++ b/yt/frontends/open_pmd/api.py @@ -1,19 +1,3 @@ -""" -API for yt.frontends.open_pmd - - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# Copyright (c) 2015, Daniel Grassinger (HZDR) -# Copyright (c) 2016, Fabian Koller (HZDR) -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - from .data_structures import \ OpenPMDDataset, \ OpenPMDGrid, \ diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 913dd128ef1..7563e9cc49d 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -1,19 +1,3 @@ -""" -openPMD data structures - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# Copyright (c) 2015, Daniel Grassinger (HZDR) -# Copyright (c) 2016, Fabian Koller (HZDR) -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - from distutils.version import StrictVersion from functools import reduce from operator import mul diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index c1d792a9fec..2c55ffb6758 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -1,20 +1,3 @@ -""" -openPMD-specific fields - - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# Copyright (c) 2015, Daniel Grassinger (HZDR) -# Copyright (c) 2016, Fabian Koller (HZDR) -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - import numpy as np from yt.fields.field_info_container import FieldInfoContainer @@ -44,7 +27,8 @@ def poynting(field, data): return poynting for ax in "xyz": - self.add_field(("openPMD", "poynting_vector_%s" % ax), sampling_type="cell", + self.add_field(("openPMD", "poynting_vector_%s" % ax), + sampling_type="cell", function=_get_poyn(ax), units="W/m**2") @@ -57,7 +41,8 @@ def _kin_en(field, data): mass = data[ptype, "particle_mass"] * data[ptype, "particle_weighting"] return speed_of_light * np.sqrt(p2 + mass ** 2 * speed_of_light ** 2) - mass * speed_of_light ** 2 - self.add_field((ptype, "particle_kinetic_energy"), sampling_type="particle", + self.add_field((ptype, "particle_kinetic_energy"), + sampling_type="particle", function=_kin_en, units="kg*m**2/s**2") @@ -76,7 +61,8 @@ def velocity(field, data): return velocity for ax in "xyz": - self.add_field((ptype, "particle_velocity_%s" % ax), sampling_type="particle", + self.add_field((ptype, "particle_velocity_%s" % ax), + sampling_type="particle", function=_get_vel(ax), units="m/s") @@ -90,7 +76,8 @@ def ap(field, data): return ap for ax in "xyz": - self.add_field((ptype, "particle_position_%s" % ax), sampling_type="particle", + self.add_field((ptype, "particle_position_%s" % ax), + sampling_type="particle", function=_abs_pos(ax), units="m") diff --git a/yt/frontends/open_pmd/io.py b/yt/frontends/open_pmd/io.py index 2c755b629be..fdbc15f6e9f 100644 --- a/yt/frontends/open_pmd/io.py +++ b/yt/frontends/open_pmd/io.py @@ -1,20 +1,3 @@ -""" -openPMD-specific IO functions - - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# Copyright (c) 2015, Daniel Grassinger (HZDR) -# Copyright (c) 2016, Fabian Koller (HZDR) -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - from collections import defaultdict import numpy as np diff --git a/yt/frontends/open_pmd/misc.py b/yt/frontends/open_pmd/misc.py index 0abfeddbb50..4439f9c4cde 100644 --- a/yt/frontends/open_pmd/misc.py +++ b/yt/frontends/open_pmd/misc.py @@ -1,11 +1,3 @@ -# ----------------------------------------------------------------------------- -# Copyright (c) 2016, Fabian Koller (HZDR) -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - import numpy as np from yt.utilities.logger import ytLogger as mylog @@ -38,11 +30,11 @@ def parse_unit_dimension(unit_dimension): Examples -------- >>> velocity = [1., 0., -1., 0., 0., 0., 0.] - >>> print parse_unit_dimension(velocity) + >>> print(parse_unit_dimension(velocity)) 'm**1*s**-1' >>> magnetic_field = [0., 1., -2., -1., 0., 0., 0.] - >>> print parse_unit_dimension(magnetic_field) + >>> print(parse_unit_dimension(magnetic_field)) 'kg**1*s**-2*A**-1' """ if len(unit_dimension) != 7: diff --git a/yt/frontends/open_pmd/tests/test_outputs.py b/yt/frontends/open_pmd/tests/test_outputs.py index c430f241acd..4089bd73bfd 100644 --- a/yt/frontends/open_pmd/tests/test_outputs.py +++ b/yt/frontends/open_pmd/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -openPMD frontend tests - - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2016, Fabian Koller (HZDR). -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - from yt.frontends.open_pmd.data_structures import \ OpenPMDDataset from yt.testing import \ diff --git a/yt/frontends/owls/api.py b/yt/frontends/owls/api.py index df8ca293c89..2f0b6f38b6b 100644 --- a/yt/frontends/owls/api.py +++ b/yt/frontends/owls/api.py @@ -1,19 +1,3 @@ -""" -API for OWLS frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ OWLSDataset diff --git a/yt/frontends/owls/data_structures.py b/yt/frontends/owls/data_structures.py index 7a693eb8f37..d0300635d97 100644 --- a/yt/frontends/owls/data_structures.py +++ b/yt/frontends/owls/data_structures.py @@ -1,19 +1,4 @@ -""" -Data structures for OWLS frontend - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +import os from yt.utilities.on_demand_imports import _h5py as h5py @@ -58,8 +43,22 @@ def _is_valid(self, *args, **kwargs): 'PartType0/ChemicalAbundances', 'RuntimePars', 'HashTable'] valid = True + valid_fname = args[0] + # If passed arg is a directory, look for the .0 file in that dir + if os.path.isdir(args[0]): + valid_files = [] + for f in os.listdir(args[0]): + fname = os.path.join(args[0], f) + if ('.0' in f) and ('.ewah' not in f) and os.path.isfile(fname): + valid_files.append(fname) + if len(valid_files) == 0: + valid = False + elif len(valid_files) > 1: + valid = False + else: + valid_fname = valid_files[0] try: - fileh = h5py.File(args[0], mode='r') + fileh = h5py.File(valid_fname, mode='r') for ng in need_groups: if ng not in fileh["/"]: valid = False diff --git a/yt/frontends/owls/definitions.py b/yt/frontends/owls/definitions.py deleted file mode 100644 index f972f278f76..00000000000 --- a/yt/frontends/owls/definitions.py +++ /dev/null @@ -1,15 +0,0 @@ -""" -OWLS definitions - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/owls/fields.py b/yt/frontends/owls/fields.py index 6da41fb546e..12601415797 100644 --- a/yt/frontends/owls/fields.py +++ b/yt/frontends/owls/fields.py @@ -1,20 +1,3 @@ -""" -OWLS fields - - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import numpy as np from . import owls_ion_tables as oit @@ -22,8 +5,6 @@ from yt.funcs import \ mylog, download_file from yt.config import ytcfg -from yt.fields.particle_fields import \ - add_volume_weighted_smoothed_field from yt.fields.species_fields import \ add_species_field_by_fraction, \ add_species_field_by_density @@ -31,6 +12,34 @@ SPHFieldInfo +def _get_ion_mass_frac(ion, ftype, itab, data): + # get element symbol from ion string. ion string will + # be a member of the tuple _ions (i.e. si13) + #-------------------------------------------------------- + if ion[0:2].isalpha(): + symbol = ion[0:2].capitalize() + else: + symbol = ion[0:1].capitalize() + + # mass fraction for the element + #-------------------------------------------------------- + m_frac = data[ftype, symbol+"_fraction"] + + # get nH and T for lookup + #-------------------------------------------------------- + log_nH = np.log10( data["PartType0", "H_number_density"] ) + log_T = np.log10( data["PartType0", "Temperature"] ) + + # get name of owls_ion_file for given ion + #-------------------------------------------------------- + itab.set_iz( data.ds.current_redshift ) + + # find ion balance using log nH and log T + #-------------------------------------------------------- + i_frac = itab.interp( log_nH, log_T ) + + return i_frac, m_frac + class OWLSFieldInfo(SPHFieldInfo): @@ -87,28 +96,19 @@ def setup_particle_fields(self, ptype): # X_fraction are defined in snapshot #----------------------------------------------- for s in self._elements: - add_species_field_by_fraction(self, ptype, s, - particle_type=True) + field_names = add_species_field_by_fraction(self, ptype, s) + if ptype == self.ds._sph_ptypes[0]: + for fn in field_names: + self.alias(("gas", fn[1]), fn) # this needs to be called after the call to # add_species_field_by_fraction for some reason ... # not sure why yet. #------------------------------------------------------- if ptype == 'PartType0': - ftype='gas' - elif ptype == 'PartType1': - ftype='dm' - elif ptype == 'PartType2': - ftype='PartType2' - elif ptype == 'PartType3': - ftype='PartType3' - elif ptype == 'PartType4': - ftype='star' - elif ptype == 'PartType5': - ftype='BH' + ftype = 'gas' else: - # to avoid errors while creating particle filters - ftype=ptype + ftype = ptype super(OWLSFieldInfo,self).setup_particle_fields( ptype, num_neighbors=self._num_neighbors, ftype=ftype) @@ -127,7 +127,7 @@ def setup_particle_fields(self, ptype): # this defines the ion density on particles # X_density for all items in self._ions #----------------------------------------------- - self.setup_gas_ion_density_particle_fields( ptype ) + self.setup_gas_ion_particle_fields( ptype ) # this adds the rest of the ion particle fields # X_fraction, X_mass, X_number_density @@ -151,11 +151,33 @@ def setup_particle_fields(self, ptype): # add particle field #--------------------------------------------------- - add_species_field_by_density(self, ptype, yt_ion, - particle_type=True) - - - # add smoothed ion fields + add_species_field_by_density(self, ptype, yt_ion) + + + def _h_p1_density(field, data): + return data[ptype, "H_density"] - data[ptype, "H_p0_density"] + + self.add_field((ptype, "H_p1_density"), + sampling_type="particle", + function=_h_p1_density, + units=self.ds.unit_system["density"]) + + add_species_field_by_density(self, ptype, "H_p1") + for sfx in ["mass", "density", "number_density"]: + fname = "H_p1_" + sfx + self.alias(("gas", fname), (ptype, fname)) + + def _el_number_density(field, data): + n_e = data[ptype, "H_p1_number_density"] + n_e += data[ptype, "He_p1_number_density"] + n_e += 2.0*data[ptype, "He_p2_number_density"] + self.add_field((ptype, "El_number_density"), + sampling_type="particle", + function=_el_number_density, + units=self.ds.unit_system["number_density"]) + self.alias(("gas", "El_number_density"), (ptype, "El_number_density")) + + # alias ion fields #----------------------------------------------- for ion in self._ions: @@ -174,23 +196,12 @@ def setup_particle_fields(self, ptype): pstr = "_p" + str(roman-1) yt_ion = symbol + pstr - loaded = [] for sfx in smoothed_suffixes: fname = yt_ion + sfx - fn = add_volume_weighted_smoothed_field( - ptype, "particle_position", "particle_mass", - "smoothing_length", "density", fname, self, - self._num_neighbors) - loaded += fn - - self.alias(("gas", fname), fn[0]) - - self._show_field_errors += loaded - self.find_dependencies(loaded) + self.alias(("gas", fname), (ptype, fname)) - - def setup_gas_ion_density_particle_fields( self, ptype ): + def setup_gas_ion_particle_fields( self, ptype ): """ Sets up particle fields for gas ion densities. """ # loop over all ions and make fields @@ -213,17 +224,23 @@ def setup_gas_ion_density_particle_fields( self, ptype ): yt_ion = symbol + pstr ftype = ptype - # add ion density field for particles - #--------------------------------------------------- + # add ion density and mass field for this species + #------------------------------------------------ fname = yt_ion + '_density' dens_func = self._create_ion_density_func( ftype, ion ) - self.add_field( (ftype, fname), - sampling_type="particle", - function = dens_func, - units=self.ds.unit_system["density"]) + self.add_field((ftype, fname), + sampling_type="particle", + function = dens_func, + units=self.ds.unit_system["density"]) self._show_field_errors.append( (ftype,fname) ) - + fname = yt_ion + '_mass' + mass_func = self._create_ion_mass_func( ftype, ion ) + self.add_field((ftype, fname), + sampling_type="particle", + function = mass_func, + units=self.ds.unit_system["mass"]) + self._show_field_errors.append( (ftype,fname) ) def _create_ion_density_func( self, ftype, ion ): @@ -232,31 +249,7 @@ def _create_ion_density_func( self, ftype, ion ): def get_owls_ion_density_field(ion, ftype, itab): def _func(field, data): - - # get element symbol from ion string. ion string will - # be a member of the tuple _ions (i.e. si13) - #-------------------------------------------------------- - if ion[0:2].isalpha(): - symbol = ion[0:2].capitalize() - else: - symbol = ion[0:1].capitalize() - - # mass fraction for the element - #-------------------------------------------------------- - m_frac = data[ftype, symbol+"_fraction"] - - # get nH and T for lookup - #-------------------------------------------------------- - log_nH = np.log10( data["PartType0", "H_number_density"] ) - log_T = np.log10( data["PartType0", "Temperature"] ) - - # get name of owls_ion_file for given ion - #-------------------------------------------------------- - itab.set_iz( data.ds.current_redshift ) - - # find ion balance using log nH and log T - #-------------------------------------------------------- - i_frac = itab.interp( log_nH, log_T ) + m_frac, i_frac = _get_ion_mass_frac(ion, ftype, itab, data) return data[ftype,"Density"] * m_frac * i_frac return _func @@ -265,7 +258,20 @@ def _func(field, data): itab = oit.IonTableOWLS( fname ) return get_owls_ion_density_field(ion, ftype, itab) + def _create_ion_mass_func( self, ftype, ion ): + """ returns a function that calculates the ion mass of a particle + """ + + def get_owls_ion_mass_field(ion, ftype, itab): + def _func(field,data): + m_frac, i_frac = _get_ion_mass_frac(ion, ftype, itab, data) + return data[ftype, "particle_mass"] * m_frac * i_frac + return _func + ion_path = self._get_owls_ion_data_dir() + fname = os.path.join(ion_path, ion+".hdf5") + itab = oit.IonTableOWLS(fname) + return get_owls_ion_mass_field(ion, ftype, itab) diff --git a/yt/frontends/owls/io.py b/yt/frontends/owls/io.py index 9adf00332e0..dde0eacfbe5 100644 --- a/yt/frontends/owls/io.py +++ b/yt/frontends/owls/io.py @@ -1,20 +1,3 @@ -""" -OWLS data-file handling function - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.gadget.io import \ IOHandlerGadgetHDF5 diff --git a/yt/frontends/owls/owls_ion_tables.py b/yt/frontends/owls/owls_ion_tables.py index 5b7a1925a9d..b13473c289b 100644 --- a/yt/frontends/owls/owls_ion_tables.py +++ b/yt/frontends/owls/owls_ion_tables.py @@ -1,23 +1,4 @@ -""" -OWLS ion tables - -A module to handle the HM01 UV background spectra and ionization data from the -OWLS photoionization equilibrium lookup tables. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py -import yt.extern.six as six import numpy as np @@ -29,7 +10,7 @@ def h5rd(fname, path, dtype=None): e.g. rd( fname, '/PartType0/Coordinates' ). """ data = None - fid = h5py.h5f.open(six.b(fname), h5py.h5f.ACC_RDONLY) + fid = h5py.h5f.open(fname.encode('latin-1'), h5py.h5f.ACC_RDONLY) dg = h5py.h5d.open(fid, path.encode('ascii')) if dtype is None: dtype = dg.dtype diff --git a/yt/frontends/owls/simulation_handling.py b/yt/frontends/owls/simulation_handling.py index 2b5f025e148..76ed28a9b30 100644 --- a/yt/frontends/owls/simulation_handling.py +++ b/yt/frontends/owls/simulation_handling.py @@ -1,18 +1,3 @@ -""" -OWLSSimulation class and member functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013-2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os from yt.frontends.gadget.simulation_handling import \ @@ -44,7 +29,7 @@ class OWLSSimulation(GadgetSimulation): >>> es = yt.simulation("my_simulation.par", "OWLS") >>> es.get_time_series() >>> for ds in es: - ... print ds.current_time + ... print(ds.current_time) """ diff --git a/yt/frontends/owls/tests/test_outputs.py b/yt/frontends/owls/tests/test_outputs.py index 6935fe355a5..78595ce2f10 100644 --- a/yt/frontends/owls/tests/test_outputs.py +++ b/yt/frontends/owls/tests/test_outputs.py @@ -1,23 +1,8 @@ -""" -OWLS frontend tests using the snapshot_033 dataset - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import OrderedDict from yt.testing import \ - requires_file + requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ requires_ds, \ data_dir_load, \ @@ -35,11 +20,6 @@ (("gas", "temperature"), ("gas", "density")), (('gas', 'He_p0_number_density'), None), (('gas', 'velocity_magnitude'), None), - (("deposit", "all_density"), None), - (("deposit", "all_count"), None), - (("deposit", "all_cic"), None), - (("deposit", "PartType0_density"), None), - (("deposit", "PartType4_density"), None), ] ) @@ -47,6 +27,8 @@ @requires_ds(os33, big_data=True) def test_snapshot_033(): ds = data_dir_load(os33) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() for test in sph_answer(ds, 'snap_033', 2*128**3, _fields): test_snapshot_033.__name__ = test.description yield test diff --git a/yt/frontends/owls_subfind/__init__.py b/yt/frontends/owls_subfind/__init__.py index 6a83f0ac731..04ebe5cd2d7 100644 --- a/yt/frontends/owls_subfind/__init__.py +++ b/yt/frontends/owls_subfind/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/owls_subfind/api.py b/yt/frontends/owls_subfind/api.py index 01a4bf4c186..7168df707bf 100644 --- a/yt/frontends/owls_subfind/api.py +++ b/yt/frontends/owls_subfind/api.py @@ -1,19 +1,3 @@ -""" -API for OWLSSubfind frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ OWLSSubfindDataset diff --git a/yt/frontends/owls_subfind/data_structures.py b/yt/frontends/owls_subfind/data_structures.py index 82adbdff259..adaf4b0d2b9 100644 --- a/yt/frontends/owls_subfind/data_structures.py +++ b/yt/frontends/owls_subfind/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for OWLSSubfind frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import defaultdict from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -34,7 +18,7 @@ from yt.geometry.particle_geometry_handler import \ ParticleIndex from yt.data_objects.static_output import \ - Dataset, \ + ParticleDataset, \ ParticleFile from yt.frontends.gadget.data_structures import \ _fix_unit_ordering @@ -73,6 +57,9 @@ def _calculate_file_offset_map(self): def _detect_output_fields(self): # TODO: Add additional fields + self._setup_filenames() + self._calculate_particle_index_starts() + self._calculate_file_offset_map() dsl = [] units = {} for dom in self.data_files: @@ -89,32 +76,26 @@ def _detect_output_fields(self): ds.field_units.update(units) ds.particle_types_raw = ds.particle_types - def _setup_geometry(self): - super(OWLSSubfindParticleIndex, self)._setup_geometry() - self._calculate_particle_index_starts() - self._calculate_file_offset_map() - class OWLSSubfindHDF5File(ParticleFile): - def __init__(self, ds, io, filename, file_id): - super(OWLSSubfindHDF5File, self).__init__(ds, io, filename, file_id) + def __init__(self, ds, io, filename, file_id, bounds): + super(OWLSSubfindHDF5File, self).__init__(ds, io, filename, file_id, bounds) with h5py.File(filename, mode="r") as f: self.header = dict((field, f.attrs[field]) \ for field in f.attrs.keys()) -class OWLSSubfindDataset(Dataset): +class OWLSSubfindDataset(ParticleDataset): _index_class = OWLSSubfindParticleIndex _file_class = OWLSSubfindHDF5File _field_info_class = OWLSSubfindFieldInfo _suffix = ".hdf5" def __init__(self, filename, dataset_type="subfind_hdf5", - n_ref = 16, over_refine_factor = 1, units_override=None, - unit_system="cgs"): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor - super(OWLSSubfindDataset, self).__init__(filename, dataset_type, - units_override=units_override, - unit_system=unit_system) + index_order=None, index_filename=None, + units_override=None, unit_system="cgs"): + super(OWLSSubfindDataset, self).__init__( + filename, dataset_type, index_order=index_order, + index_filename=index_filename, units_override=units_override, + unit_system=unit_system) def _parse_parameter_file(self): handle = h5py.File(self.parameter_filename, mode="r") @@ -132,8 +113,7 @@ def _parse_parameter_file(self): self.current_time = self.quan(hvals["Time_GYR"], "Gyr") self.domain_left_edge = np.zeros(3, "float64") self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"] - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") self.cosmological_simulation = 1 self.periodicity = (True, True, True) self.current_redshift = hvals["Redshift"] diff --git a/yt/frontends/owls_subfind/fields.py b/yt/frontends/owls_subfind/fields.py index 9d4610a8922..a41904a8784 100644 --- a/yt/frontends/owls_subfind/fields.py +++ b/yt/frontends/owls_subfind/fields.py @@ -1,19 +1,3 @@ -""" -OWLSSubfind-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/owls_subfind/io.py b/yt/frontends/owls_subfind/io.py index 1d6c8f71f51..e30e6244884 100644 --- a/yt/frontends/owls_subfind/io.py +++ b/yt/frontends/owls_subfind/io.py @@ -1,19 +1,3 @@ -""" -OWLSSubfind data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -42,7 +26,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files, key=lambda f: f.filename): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): pcount = data_file.total_particles[ptype] @@ -76,7 +60,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files, key=lambda f: f.filename): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): pcount = data_file.total_particles[ptype] diff --git a/yt/frontends/owls_subfind/tests/test_outputs.py b/yt/frontends/owls_subfind/tests/test_outputs.py index cb7be40cc71..33c642671a1 100644 --- a/yt/frontends/owls_subfind/tests/test_outputs.py +++ b/yt/frontends/owls_subfind/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -OWLSSubfind frontend tests using owls_fof_halos datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os.path from yt.testing import \ assert_equal, \ diff --git a/yt/frontends/ramses/__init__.py b/yt/frontends/ramses/__init__.py index bde2b72939e..1d8841fe239 100644 --- a/yt/frontends/ramses/__init__.py +++ b/yt/frontends/ramses/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/ramses/api.py b/yt/frontends/ramses/api.py index f153eb6c564..b47602b931f 100644 --- a/yt/frontends/ramses/api.py +++ b/yt/frontends/ramses/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.ramses - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ RAMSESDataset diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a794210f39d..a65262209ea 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -1,20 +1,3 @@ -""" -RAMSES-specific data structures - - - -""" -# BytesIO needs absolute import -from __future__ import print_function, absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import numpy as np import stat @@ -22,7 +5,6 @@ from collections import defaultdict from glob import glob -from yt.extern.six import string_types from yt.funcs import \ mylog, \ setdefaultattr @@ -376,7 +358,7 @@ def __init__(self, filename, dataset_type='ramses', extra_particle_fields=None, cosmological=None, bbox=None): # Here we want to initiate a traceback, if the reader is not built. - if isinstance(fields, string_types): + if isinstance(fields, str): fields = field_aliases[fields] ''' fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file diff --git a/yt/frontends/ramses/definitions.py b/yt/frontends/ramses/definitions.py index 78b20ed4e70..71871bc47f9 100644 --- a/yt/frontends/ramses/definitions.py +++ b/yt/frontends/ramses/definitions.py @@ -1,19 +1,3 @@ -""" -Definitions for RAMSES files - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - # These functions are RAMSES-specific from yt.config import ytcfg from yt.funcs import mylog diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 70b918aecfb..4e9e70919b4 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -1,18 +1,12 @@ import os from yt.utilities.cython_fortran_utils import FortranFile import glob -from yt.extern.six import add_metaclass, PY2 from yt.funcs import mylog from yt.config import ytcfg from .io import _read_fluid_file_descriptor from .io_utils import read_offset - -if PY2: - FileNotFoundError = IOError - - FIELD_HANDLERS = set() def get_field_handlers(): @@ -36,8 +30,7 @@ def __new__(meta, name, bases, class_dict): return cls -@add_metaclass(RAMSESFieldFileHandlerRegistry) -class FieldFileHandler(object): +class FieldFileHandler(metaclass = RAMSESFieldFileHandlerRegistry): ''' Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -47,6 +40,7 @@ class FieldFileHandler(object): See `SinkParticleFileHandler` for an example implementation.''' + # These properties are static properties ftype = None # The name to give to the field type fname = None # The name of the file(s) diff --git a/yt/frontends/ramses/fields.py b/yt/frontends/ramses/fields.py index 25b6565e036..c73efc70047 100644 --- a/yt/frontends/ramses/fields.py +++ b/yt/frontends/ramses/fields.py @@ -1,18 +1,3 @@ -""" -RAMSES-specific fields - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import numpy as np @@ -189,8 +174,10 @@ def _temperature(field, data): rv = data["gas", "pressure"]/data["gas", "density"] rv *= mass_hydrogen_cgs/boltzmann_constant_cgs return rv - self.add_field(("gas", "temperature"), sampling_type="cell", function=_temperature, - units=self.ds.unit_system["temperature"]) + self.add_field(("gas", "temperature"), + sampling_type="cell", + function=_temperature, + units=self.ds.unit_system["temperature"]) self.create_cooling_fields() # See if we need to load the rt fields diff --git a/yt/frontends/ramses/hilbert.py b/yt/frontends/ramses/hilbert.py index 42f1318f47a..9d58d416e7d 100644 --- a/yt/frontends/ramses/hilbert.py +++ b/yt/frontends/ramses/hilbert.py @@ -1,22 +1,3 @@ -""" -RAMSES-specific hilbert ordering routines. - -These functions were translated from their original files from the -RAMSES project with the agreement of the original developer. See -https://bitbucket.org/rteyssie/ramses. - - -""" -from __future__ import print_function, absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np def hilbert3d(X, bit_length): diff --git a/yt/frontends/ramses/io.py b/yt/frontends/ramses/io.py index bb92f388b71..696e566bcd8 100644 --- a/yt/frontends/ramses/io.py +++ b/yt/frontends/ramses/io.py @@ -1,18 +1,3 @@ -""" -RAMSES-specific IO - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import defaultdict import numpy as np diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 624fd39f9b5..0cc1c3154cd 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -1,14 +1,10 @@ import os from yt.utilities.cython_fortran_utils import FortranFile -from yt.extern.six import add_metaclass, PY2 from yt.funcs import mylog from yt.config import ytcfg from .io import _read_part_file_descriptor -if PY2: - FileNotFoundError = IOError - PARTICLE_HANDLERS = set() PRESENT_PART_FILES = {} @@ -30,9 +26,7 @@ def __new__(meta, name, bases, class_dict): register_particle_handler(cls) return cls - -@add_metaclass(RAMSESParticleFileHandlerRegistry) -class ParticleFileHandler(object): +class ParticleFileHandler(metaclass = RAMSESParticleFileHandlerRegistry): ''' Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -42,6 +36,7 @@ class ParticleFileHandler(object): See `SinkParticleFileHandler` for an example implementation.''' + # These properties are static properties ptype = None # The name to give to the particle type fname = None # The name of the file(s). diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index eade448ae1b..109ded6608c 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -1,19 +1,3 @@ -""" -RAMSES frontend tests - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import \ assert_equal, \ requires_file, \ @@ -33,14 +17,14 @@ import yt import numpy as np -_fields = ("temperature", "density", "velocity_magnitude", - ("deposit", "all_density"), ("deposit", "all_count")) +_fields = ("temperature", "density", "velocity_magnitude") output_00080 = "output_00080/info_00080.txt" @requires_ds(output_00080) def test_output_00080(): ds = data_dir_load(output_00080) assert_equal(str(ds), "info_00080") + assert_equal(ds.particle_type_counts, {'io': 1090895, 'nbody': 0}) dso = [ None, ("sphere", ("max", (0.1, 'unitary')))] for dobj_name in dso: for field in _fields: @@ -55,7 +39,6 @@ def test_output_00080(): s2 = sum(mask.sum() for block, mask in dobj.blocks) assert_equal(s1, s2) - assert_equal(ds.particle_type_counts, {'io': 1090895}) @requires_file(output_00080) def test_RAMSESDataset(): diff --git a/yt/frontends/rockstar/__init__.py b/yt/frontends/rockstar/__init__.py index 6e14c8d2592..ff6a8d86064 100644 --- a/yt/frontends/rockstar/__init__.py +++ b/yt/frontends/rockstar/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/rockstar/api.py b/yt/frontends/rockstar/api.py index 654e1d77031..f5bf766246d 100644 --- a/yt/frontends/rockstar/api.py +++ b/yt/frontends/rockstar/api.py @@ -1,19 +1,3 @@ -""" -API for Rockstar frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ RockstarDataset diff --git a/yt/frontends/rockstar/data_structures.py b/yt/frontends/rockstar/data_structures.py index 26cd0b935a2..12a5e29c608 100644 --- a/yt/frontends/rockstar/data_structures.py +++ b/yt/frontends/rockstar/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for Rockstar frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import stat import glob @@ -23,7 +7,7 @@ RockstarFieldInfo from yt.data_objects.static_output import \ - Dataset + ParticleDataset from yt.frontends.halo_catalog.data_structures import \ HaloCatalogFile from yt.funcs import \ @@ -37,14 +21,15 @@ header_dt class RockstarBinaryFile(HaloCatalogFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range): with open(filename, "rb") as f: self.header = fpu.read_cattrs(f, header_dt, "=") self._position_offset = f.tell() f.seek(0, os.SEEK_END) self._file_size = f.tell() - super(RockstarBinaryFile, self).__init__(ds, io, filename, file_id) + super(RockstarBinaryFile, self).__init__( + ds, io, filename, file_id, range) def _read_particle_positions(self, ptype, f=None): """ @@ -69,17 +54,16 @@ def _read_particle_positions(self, ptype, f=None): return pos -class RockstarDataset(Dataset): + +class RockstarDataset(ParticleDataset): _index_class = ParticleIndex _file_class = RockstarBinaryFile _field_info_class = RockstarFieldInfo _suffix = ".bin" def __init__(self, filename, dataset_type="rockstar_binary", - n_ref = 16, over_refine_factor = 1, - units_override=None, unit_system="cgs"): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor + units_override=None, unit_system="cgs", + index_order=None, index_filename=None): super(RockstarDataset, self).__init__(filename, dataset_type, units_override=units_override, unit_system=unit_system) @@ -114,8 +98,7 @@ def _parse_parameter_file(self): self.domain_left_edge = np.array([0.0,0.0,0.0]) self.domain_right_edge = np.array([hvals['box_size']] * 3) - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") self.parameters.update(hvals) def _set_code_unit_attributes(self): diff --git a/yt/frontends/rockstar/definitions.py b/yt/frontends/rockstar/definitions.py index 70ff56d4069..46df7aeb60c 100644 --- a/yt/frontends/rockstar/definitions.py +++ b/yt/frontends/rockstar/definitions.py @@ -1,18 +1,3 @@ -""" -Data structures for Rockstar - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np BINARY_HEADER_SIZE=256 diff --git a/yt/frontends/rockstar/fields.py b/yt/frontends/rockstar/fields.py index 11cc0824f85..5f5eb074c14 100644 --- a/yt/frontends/rockstar/fields.py +++ b/yt/frontends/rockstar/fields.py @@ -1,19 +1,3 @@ -""" -Rockstar-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/rockstar/io.py b/yt/frontends/rockstar/io.py index ca169fd1e2b..a8a4c14d2b4 100644 --- a/yt/frontends/rockstar/io.py +++ b/yt/frontends/rockstar/io.py @@ -1,19 +1,3 @@ -""" -Rockstar data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os @@ -28,8 +12,6 @@ from .definitions import halo_dts from yt.utilities.lib.geometry_utils import compute_morton -from operator import attrgetter - class IOHandlerRockstarBinary(BaseIOHandler): _dataset_type = "rockstar_binary" @@ -51,7 +33,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files,key=attrgetter("filename")): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): pcount = data_file.header['num_halos'] if pcount == 0: continue @@ -69,7 +51,8 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files,key=attrgetter("filename")): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + si, ei = data_file.start, data_file.end pcount = data_file.header['num_halos'] if pcount == 0: continue @@ -83,9 +66,21 @@ def _read_particle_fields(self, chunks, ptf, selector): halos = np.fromfile(f, dtype=self._halo_dt, count = pcount) if mask is None: continue for field in field_list: - data = halos[field][mask].astype("float64") + data = halos[field][si:ei][mask].astype("float64") yield (ptype, field), data + def _yield_coordinates(self, data_file): + # Just does halos + pcount = data_file.header['num_halos'] + with open(data_file.filename, "rb") as f: + f.seek(data_file._position_offset, os.SEEK_SET) + halos = np.fromfile(f, dtype=self._halo_dt, count = pcount) + pos = np.empty((halos.size, 3), dtype="float64") + pos[:,0] = halos["particle_position_x"] + pos[:,1] = halos["particle_position_y"] + pos[:,2] = halos["particle_position_z"] + yield 'halos', pos + def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') @@ -112,7 +107,11 @@ def _initialize_index(self, data_file, regions): return morton def _count_particles(self, data_file): - return {'halos': data_file.header['num_halos']} + nhalos = data_file.header['num_halos'] + si, ei = data_file.start, data_file.end + if None not in (si, ei): + nhalos = np.clip(nhalos - si, 0, ei - si) + return {'halos': nhalos} def _identify_fields(self, data_file): fields = [("halos", f) for f in self._halo_dt.fields if diff --git a/yt/frontends/rockstar/tests/test_outputs.py b/yt/frontends/rockstar/tests/test_outputs.py index abff690a98f..e73f08d9e64 100644 --- a/yt/frontends/rockstar/tests/test_outputs.py +++ b/yt/frontends/rockstar/tests/test_outputs.py @@ -1,22 +1,8 @@ -""" -Rockstar frontend tests using rockstar_halos dataset - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os.path from yt.testing import \ assert_equal, \ - requires_file + requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ FieldValuesTest, \ requires_ds, \ @@ -38,3 +24,9 @@ def test_fields_r1(): @requires_file(r1) def test_RockstarDataset(): assert isinstance(data_dir_load(r1), RockstarDataset) + +@requires_file(r1) +def test_particle_selection(): + ds = data_dir_load(r1) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/sdf/__init__.py b/yt/frontends/sdf/__init__.py index 2483a898564..0b0b7c6914e 100644 --- a/yt/frontends/sdf/__init__.py +++ b/yt/frontends/sdf/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/sdf/api.py b/yt/frontends/sdf/api.py index fb2a267aa2c..cb431dd2289 100644 --- a/yt/frontends/sdf/api.py +++ b/yt/frontends/sdf/api.py @@ -1,19 +1,3 @@ -""" -API for yt.frontends.sdf - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ SDFDataset diff --git a/yt/frontends/sdf/data_structures.py b/yt/frontends/sdf/data_structures.py index 32ee43b4c74..741adab632c 100644 --- a/yt/frontends/sdf/data_structures.py +++ b/yt/frontends/sdf/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for a generic SDF frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import stat @@ -67,7 +51,8 @@ class SDFDataset(ParticleDataset): def __init__(self, filename, dataset_type="sdf_particles", - n_ref=64, over_refine_factor=1, + index_order=None, + index_filename=None, bounding_box=None, sdf_header=None, midx_filename=None, @@ -77,6 +62,8 @@ def __init__(self, filename, dataset_type="sdf_particles", units_override=None, unit_system="cgs"): if bounding_box is not None: + # This ensures that we know a bounding box has been applied + self._domain_override = True self._subspace = True bbox = np.array(bounding_box, dtype="float64") if bbox.shape == (2, 3): @@ -101,7 +88,7 @@ def __init__(self, filename, dataset_type="sdf_particles", super(SDFDataset, self).__init__( filename, dataset_type=dataset_type, units_override=units_override, unit_system=unit_system, - n_ref=n_ref, over_refine_factor=over_refine_factor) + index_order=index_order, index_filename=index_filename) def _parse_parameter_file(self): if self.parameter_filename.startswith("http"): @@ -137,8 +124,8 @@ def _parse_parameter_file(self): dtype=np.float64) self.domain_left_edge *= self.parameters.get("a", 1.0) self.domain_right_edge *= self.parameters.get("a", 1.0) - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + + self.domain_dimensions = np.ones(3, "int32") if "do_periodic" in self.parameters and self.parameters["do_periodic"]: self.periodicity = (True, True, True) else: diff --git a/yt/frontends/sdf/fields.py b/yt/frontends/sdf/fields.py index 0aa707f8f3f..2abd0fbe167 100644 --- a/yt/frontends/sdf/fields.py +++ b/yt/frontends/sdf/fields.py @@ -1,19 +1,3 @@ -""" -SDF-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer diff --git a/yt/frontends/sdf/io.py b/yt/frontends/sdf/io.py index bfb3c6fd1df..b1944b3a425 100644 --- a/yt/frontends/sdf/io.py +++ b/yt/frontends/sdf/io.py @@ -1,19 +1,3 @@ -""" -SDF data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.io_handler import \ @@ -44,7 +28,7 @@ def _read_particle_coords(self, chunks, ptf): for obj in chunk.objs: data_files.update(obj.data_files) assert(len(data_files) == 1) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): yield "dark_matter", ( self._handle['x'], self._handle['y'], self._handle['z']) @@ -57,7 +41,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for obj in chunk.objs: data_files.update(obj.data_files) assert(len(data_files) == 1) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): for ptype, field_list in sorted(ptf.items()): x = self._handle['x'] y = self._handle['y'] diff --git a/yt/frontends/sdf/tests/test_outputs.py b/yt/frontends/sdf/tests/test_outputs.py index df123a77f8f..24f0497ef69 100644 --- a/yt/frontends/sdf/tests/test_outputs.py +++ b/yt/frontends/sdf/tests/test_outputs.py @@ -1,23 +1,10 @@ -""" -SDF frontend tests - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import socket +import urllib from yt.testing import assert_equal from yt.frontends.sdf.api import SDFDataset from yt.visualization.api import ProjectionPlot from yt.testing import requires_module -from yt.extern.six.moves import urllib _fields = (('deposit', 'all_cic')) diff --git a/yt/frontends/sph/__init__.py b/yt/frontends/sph/__init__.py index 63fa39200cf..377e37fa8ab 100644 --- a/yt/frontends/sph/__init__.py +++ b/yt/frontends/sph/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/sph/api.py b/yt/frontends/sph/api.py index e99f3a827c2..8177a1f3f08 100644 --- a/yt/frontends/sph/api.py +++ b/yt/frontends/sph/api.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/sph/data_structures.py b/yt/frontends/sph/data_structures.py index d271cfcd635..b2d420b43a2 100644 --- a/yt/frontends/sph/data_structures.py +++ b/yt/frontends/sph/data_structures.py @@ -1,74 +1,127 @@ -""" -Data structures for SPH frontends. - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +import numpy as np +import os from yt.data_objects.static_output import \ ParticleDataset - +from yt.funcs import \ + mylog +from yt.geometry.particle_geometry_handler import \ + ParticleIndex class SPHDataset(ParticleDataset): default_kernel_name = "cubic" + _sph_smoothing_styles = ["scatter", "gather"] + _sph_smoothing_style = "scatter" + _num_neighbors = 32 + _use_sph_normalization = True def __init__(self, filename, dataset_type=None, file_style=None, units_override=None, unit_system="cgs", - n_ref=64, over_refine_factor=1, - kernel_name=None): + index_order=None, index_filename=None, + kdtree_filename=None, kernel_name=None): if kernel_name is None: self.kernel_name = self.default_kernel_name else: self.kernel_name = kernel_name + self.kdtree_filename = kdtree_filename super(SPHDataset, self).__init__( filename, dataset_type=dataset_type, file_style=file_style, units_override=units_override, unit_system=unit_system, - n_ref=n_ref, over_refine_factor=over_refine_factor) - - def add_smoothed_particle_field(self, smooth_field, - method="volume_weighted", nneighbors=64, - kernel_name=None): - """Add a new smoothed particle field - - Creates a new smoothed field based on the particle *smooth_field*. - - Parameters - ---------- - - smooth_field : tuple - The field name tuple of the particle field the smoothed field will - be created from. This must be a field name tuple so yt can - appropriately infer the correct particle type. - method : string, default 'volume_weighted' - The particle smoothing method to use. Can only be 'volume_weighted' - for now. - nneighbors : int, default 64 - The number of neighbors to examine during the process. - kernel_name : string or None, default None - This is the name of the smoothing kernel to use. Current supported - kernel names include `cubic`, `quartic`, `quintic`, `wendland2`, - `wendland4`, and `wendland6`. If left as None, - :attr:`~yt.frontends.sph.data_structures.SPHDataset.kernel_name` - will be used. - - Returns - ------- - - The field name tuple for the newly created field. - """ - if kernel_name is None: - kernel_name = self.kernel_name - return super(SPHDataset, self).add_smoothed_particle_field( - smooth_field=smooth_field, method=method, nneighbors=nneighbors, - kernel_name=kernel_name + index_order=index_order, index_filename=index_filename) + + @property + def num_neighbors(self): + return self._num_neighbors + + @num_neighbors.setter + def num_neighbors(self, value): + if value < 0: + raise ValueError("Negative value not allowed: %s" % value) + self._num_neighbors = value + + @property + def sph_smoothing_style(self): + return self._sph_smoothing_style + + @sph_smoothing_style.setter + def sph_smoothing_style(self, value): + if value not in self._sph_smoothing_styles: + raise ValueError("Smoothing style not implemented: %s, please " + "select one of the following: " % value, + self._sph_smoothing_styles) + + self._sph_smoothing_style = value + + @property + def use_sph_normalization(self): + return self._use_sph_normalization + + @use_sph_normalization.setter + def use_sph_normalization(self, value): + if value is not True and value is not False: + raise ValueError("SPH normalization needs to be True or False!") + self._use_sph_normalization = value + + +class SPHParticleIndex(ParticleIndex): + def _initialize_index(self): + ds = self.dataset + + ds._file_hash = self._generate_hash() + + if hasattr(self.io, '_generate_smoothing_length'): + self.io._generate_smoothing_length(self.data_files, self.kdtree) + + super(SPHParticleIndex, self)._initialize_index() + + def _generate_kdtree(self, fname): + from yt.utilities.lib.cykdtree import PyKDTree + if fname is not None: + if os.path.exists(fname): + mylog.info('Loading KDTree from %s' % os.path.basename(fname)) + kdtree = PyKDTree.from_file(fname) + if kdtree.data_version != self.ds._file_hash: + mylog.info('Detected hash mismatch, regenerating KDTree') + else: + self._kdtree = kdtree + return + positions = [] + for data_file in self.data_files: + for _, ppos in self.io._yield_coordinates( + data_file, needed_ptype=self.ds._sph_ptypes[0]): + positions.append(ppos) + if positions == []: + self._kdtree = None + return + positions = np.concatenate(positions) + mylog.info('Allocating KDTree for %s particles' % positions.shape[0]) + self._kdtree = PyKDTree( + positions.astype('float64'), + left_edge=self.ds.domain_left_edge, + right_edge=self.ds.domain_right_edge, + periodic=np.array(self.ds.periodicity), + leafsize=2*int(self.ds.num_neighbors), + data_version=self.ds._file_hash ) + if fname is not None: + self._kdtree.save(fname) + + @property + def kdtree(self): + if hasattr(self, '_kdtree'): + return self._kdtree + + ds = self.ds + + if getattr(ds, 'kdtree_filename', None) is None: + if os.path.exists(ds.parameter_filename): + fname = ds.parameter_filename + ".kdtree" + else: + # we don't want to write to disk for in-memory data + fname = None + else: + fname = ds.kdtree_filename + + self._generate_kdtree(fname) + + return self._kdtree diff --git a/yt/frontends/sph/fields.py b/yt/frontends/sph/fields.py index 8be620fa84e..3bbeb7a3c36 100644 --- a/yt/frontends/sph/fields.py +++ b/yt/frontends/sph/fields.py @@ -1,20 +1,3 @@ -""" -SPH fields - - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer from yt.fields.species_fields import \ @@ -47,3 +30,6 @@ class SPHFieldInfo(FieldInfoContainer): def setup_particle_fields(self, ptype, *args, **kwargs): super(SPHFieldInfo, self).setup_particle_fields(ptype, *args, **kwargs) setup_species_fields(self, ptype) + + def setup_fluid_index_fields(self): + pass diff --git a/yt/frontends/sph/io.py b/yt/frontends/sph/io.py new file mode 100644 index 00000000000..644dbafb9e3 --- /dev/null +++ b/yt/frontends/sph/io.py @@ -0,0 +1,33 @@ +""" +Generic file-handing functions for SPH data + + + + +""" +from yt.utilities.io_handler import \ + BaseIOHandler + +class IOHandlerSPH(BaseIOHandler): + """IOHandler implementation specifically for SPH data + + This exists to handle particles with smoothing lengths, which require us + to read in smoothing lengths along with the the particle coordinates to + determine particle extents. + """ + + def _count_particles_chunks(self, psize, chunks, ptf, selector): + if getattr(selector, 'is_all_data', False): + chunks = list(chunks) + data_files = set([]) + for chunk in chunks: + for obj in chunk.objs: + data_files.update(obj.data_files) + data_files = sorted(data_files, key=lambda x: (x.filename, x.start)) + for data_file in data_files: + for ptype in ptf.keys(): + psize[ptype] += data_file.total_particles[ptype] + else: + for ptype, (x, y, z), hsml in self._read_particle_coords(chunks, ptf): + psize[ptype] += selector.count_points(x, y, z, hsml) + return dict(psize) diff --git a/yt/frontends/stream/api.py b/yt/frontends/stream/api.py index c2916790046..5b751ee8bc1 100644 --- a/yt/frontends/stream/api.py +++ b/yt/frontends/stream/api.py @@ -1,18 +1,3 @@ -""" -API for yt.frontends.stream - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ StreamGrid, \ StreamHierarchy, \ diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 039166d4afb..695b248aa0e 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -1,18 +1,3 @@ -""" -Data structures for Streaming, in-memory datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import time import weakref @@ -26,6 +11,7 @@ from numbers import Number as numeric_type +from yt.utilities.lib.cykdtree import PyKDTree from yt.funcs import \ iterable, \ ensure_list, \ @@ -39,6 +25,8 @@ AMRGridPatch from yt.data_objects.static_output import \ ParticleFile +from yt.frontends.sph.data_structures import \ + SPHParticleIndex from yt.geometry.geometry_handler import \ YTDataChunk from yt.geometry.grid_geometry_handler import \ @@ -47,8 +35,6 @@ OctreeSubset from yt.geometry.oct_geometry_handler import \ OctreeIndex -from yt.geometry.particle_geometry_handler import \ - ParticleIndex from yt.geometry.oct_container import \ OctreeContainer from yt.geometry.unstructured_mesh_handler import \ @@ -58,6 +44,9 @@ from yt.utilities.logger import ytLogger as mylog from yt.utilities.lib.misc_utilities import \ get_box_grids_level +from yt.utilities.lib.particle_kdtree_tools import \ + generate_smoothing_length, \ + estimate_density from yt.geometry.grid_container import \ GridTree, \ MatchPointsToGrids @@ -76,7 +65,6 @@ from yt.data_objects.unstructured_mesh import \ SemiStructuredMesh, \ UnstructuredMesh -from yt.extern.six import string_types from .fields import \ StreamFieldInfo from yt.frontends.exodus_ii.util import \ @@ -336,7 +324,7 @@ def _set_code_unit_attributes(self): attrs = ('length_unit', 'mass_unit', 'time_unit', 'velocity_unit', 'magnetic_unit') cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss') for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): - if isinstance(unit, string_types): + if isinstance(unit, str): uq = self.quan(1.0, unit) elif isinstance(unit, numeric_type): uq = self.quan(unit, cgs_unit) @@ -510,11 +498,11 @@ def process_data(data, grid_dims=None): # val is a tuple of (data, units) elif isinstance(val, tuple) and len(val) == 2: try: - assert isinstance(field, (string_types, tuple)), \ + assert isinstance(field, (str, tuple)), \ "Field name is not a string!" assert isinstance(val[0], np.ndarray), \ "Field data is not an ndarray!" - assert isinstance(val[1], string_types), \ + assert isinstance(val[1], str), \ "Unit specification is not a string!" field_units[field] = val[1] new_data[field] = val[0] @@ -998,7 +986,7 @@ def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, if not isinstance(field, tuple): field = ("unknown", field) fi = base_ds._get_field_info(*field) - if fi.particle_type and field[0] in base_ds.particle_types_raw: + if fi.sampling_type == "particle" and field[0] in base_ds.particle_types_raw: pdata[field] = uconcatenate([grid[field] for grid in base_ds.index.grids]) pdata["number_of_particles"] = number_of_particles @@ -1024,7 +1012,7 @@ def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, if not isinstance(field, tuple): field = ("unknown", field) fi = ds._get_field_info(*field) - if not fi.particle_type: + if not fi.sampling_type == "particle": gd[field] = g[field] grid_data.append(gd) if g.Level < ds.index.max_level: continue @@ -1040,7 +1028,7 @@ def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, if not isinstance(field, tuple): field = ("unknown", field) fi = ds._get_field_info(*field) - if not fi.particle_type: + if not fi.sampling_type == "particle": gd[field] = grid[field] grid_data.append(gd) @@ -1058,7 +1046,7 @@ def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, return ds -class StreamParticleIndex(ParticleIndex): +class StreamParticleIndex(SPHParticleIndex): def __init__(self, ds, dataset_type = None): self.stream_handler = ds.stream_handler @@ -1070,6 +1058,49 @@ def _setup_data_io(self): else: self.io = io_registry[self.dataset_type](self.ds) + def update_data(self, data): + """ + Update the stream data with a new data dict. If fields already exist, + they will be replaced, but if they do not, they will be added. Fields + already in the stream but not part of the data dict will be left + alone. + """ + # Alias + ds = self.ds + handler = ds.stream_handler + + # Preprocess + field_units, data, _ = process_data(data) + pdata = {} + for key in data.keys(): + if not isinstance(key, tuple): + field = ("io", key) + mylog.debug("Reassigning '%s' to '%s'", key, field) + else: + field = key + pdata[field] = data[key] + data = pdata # Drop reference count + particle_types = set_particle_types(data) + + # Update particle types + handler.particle_types.update(particle_types) + ds._find_particle_types() + + # Update fields + handler.field_units.update(field_units) + fields = handler.fields + for field in data.keys(): + if field not in fields._additional_fields: + fields._additional_fields += (field,) + fields["stream_file"].update(data) + + # Update field list + for field in self.ds.field_list: + if field[0] in ["all", "nbody"]: + self.ds.field_list.remove(field) + self._detect_output_fields() + self.ds.create_field_info() + class StreamParticleFile(ParticleFile): pass @@ -1080,36 +1111,132 @@ class StreamParticlesDataset(StreamDataset): _dataset_type = "stream_particles" file_count = 1 filename_template = "stream_file" - n_ref = 64 - over_refine_factor = 1 + _proj_type = 'particle_proj' + + def __init__(self, stream_handler, storage_filename=None, + geometry='cartesian', unit_system='cgs'): + super(StreamParticlesDataset, self).__init__( + stream_handler, storage_filename=storage_filename, + geometry=geometry, unit_system=unit_system) + fields = list(stream_handler.fields['stream_file'].keys()) + # This is the current method of detecting SPH data. + # This should be made more flexible in the future. + if ('io', 'density') in fields and ('io', 'smoothing_length') in fields: + self._sph_ptypes = ('io',) + + def add_sph_fields(self, n_neighbors=32, kernel="cubic", sph_ptype="io"): + """Add SPH fields for the specified particle type. + + For a particle type with "particle_position" and "particle_mass" already + defined, this method adds the "smoothing_length" and "density" fields. + "smoothing_length" is computed as the distance to the nth nearest + neighbor. "density" is computed as the SPH (gather) smoothed mass. The + SPH fields are added only if they don't already exist. + + Parameters + ---------- + n_neighbors : int + The number of neighbors to use in smoothing length computation. + kernel : str + The kernel function to use in density estimation. + sph_ptype : str + The SPH particle type. Each dataset has one sph_ptype only. This + method will overwrite existing sph_ptype of the dataset. -def load_particles(data, length_unit = None, bbox=None, - sim_time=0.0, mass_unit = None, time_unit = None, + """ + mylog.info("Generating SPH fields") + + # Unify units + l_unit = "code_length" + m_unit = "code_mass" + d_unit = "code_mass / code_length**3" + + # Read basic fields + ad = self.all_data() + pos = ad[sph_ptype, "particle_position"].to(l_unit).d + mass = ad[sph_ptype, "particle_mass"].to(m_unit).d + + # Construct k-d tree + kdtree = PyKDTree( + pos.astype("float64"), + left_edge=self.domain_left_edge.to_value(l_unit), + right_edge=self.domain_right_edge.to_value(l_unit), + periodic=self.periodicity, + leafsize=2*int(n_neighbors), + ) + order = np.argsort(kdtree.idx) + + def exists(fname): + if (sph_ptype, fname) in self.derived_field_list: + mylog.info("Field ('%s','%s') already exists. Skipping", + sph_ptype, fname) + return True + else: + mylog.info("Generating field ('%s','%s')", + sph_ptype, fname) + return False + data = {} + + # Add smoothing length field + fname = "smoothing_length" + if not exists(fname): + hsml = generate_smoothing_length( + pos[kdtree.idx], kdtree, n_neighbors + ) + hsml = hsml[order] + data[(sph_ptype, "smoothing_length")] = (hsml, l_unit) + else: + hsml = ad[sph_ptype, fname].to(l_unit).d + + # Add density field + fname = "density" + if not exists(fname): + dens = estimate_density( + pos[kdtree.idx], mass[kdtree.idx], hsml[kdtree.idx], + kdtree, kernel_name=kernel, + ) + dens = dens[order] + data[(sph_ptype, "density")] = (dens, d_unit) + + # Add fields + self._sph_ptypes = (sph_ptype,) + self.index.update_data(data) + self.num_neighbors = n_neighbors + +def load_particles(data, length_unit=None, bbox=None, + sim_time=None, mass_unit=None, time_unit=None, velocity_unit=None, magnetic_unit=None, periodicity=(True, True, True), - n_ref = 64, over_refine_factor = 1, geometry = "cartesian", - unit_system="cgs"): + geometry="cartesian", unit_system="cgs", + data_source=None): r"""Load a set of particles into yt as a :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`. - This should allow a collection of particle data to be loaded directly into + This will allow a collection of particle data to be loaded directly into yt and analyzed as would any others. This comes with several caveats: - * There must be sufficient space in memory to contain both the particle - data and the octree used to index the particles. + * There must be sufficient space in memory to contain all the particle + data. * Parallelism will be disappointing or non-existent in most cases. + * Fluid fields are not supported. - This will initialize an Octree of data. Note that fluid fields will not - work yet, or possibly ever. + Note: in order for the dataset to take advantage of SPH functionality, + the following two fields must be provided: + * ('io', 'density') + * ('io', 'smoothing_length') Parameters ---------- data : dict - This is a dict of numpy arrays or (numpy array, unit name) tuples, - where the keys are the field names. Particles positions must be named + This is a dict of numpy arrays or (numpy array, unit name) tuples, + where the keys are the field names. Particles positions must be named "particle_position_x", "particle_position_y", and "particle_position_z". length_unit : float Conversion factor from simulation length units to centimeters + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length_unit + sim_time : float, optional + The simulation time in seconds mass_unit : float Conversion factor from simulation mass units to grams time_unit : float @@ -1118,16 +1245,12 @@ def load_particles(data, length_unit = None, bbox=None, Conversion factor from simulation velocity units to cm/s magnetic_unit : float Conversion factor from simulation magnetic units to gauss - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length_unit - sim_time : float, optional - The simulation time in seconds periodicity : tuple of booleans Determines whether the data will be treated as periodic along each axis - n_ref : int - The number of particles that result in refining an oct used for - indexing the particles. + data_source : YTSelectionContainer, optional + If set, parameters like `bbox`, `sim_time`, and code units are derived + from it. Examples -------- @@ -1141,8 +1264,15 @@ def load_particles(data, length_unit = None, bbox=None, """ - domain_dimensions = np.ones(3, "int32") * (1 << over_refine_factor) + domain_dimensions = np.ones(3, "int32") nprocs = 1 + + # Parse bounding box + if data_source is not None: + le, re = data_source.get_bbox() + le = le.to_value("code_length") + re = re.to_value("code_length") + bbox = list(zip(le, re)) if bbox is None: bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64') else: @@ -1151,6 +1281,29 @@ def load_particles(data, length_unit = None, bbox=None, domain_right_edge = np.array(bbox[:, 1], 'float64') grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1)) + # Parse simulation time + if data_source is not None: + sim_time = data_source.ds.current_time + if sim_time is None: + sim_time = 0.0 + else: + sim_time = float(sim_time) + + # Parse units + def parse_unit(unit, dimension): + if unit is None: + unit = "code_" + dimension + if data_source is not None: + unit = getattr(data_source.ds, dimension + '_unit', unit) + return unit + + length_unit = parse_unit(length_unit, "length") + mass_unit = parse_unit(mass_unit, "mass") + time_unit = parse_unit(time_unit, "time") + velocity_unit = parse_unit(velocity_unit, "velocity") + magnetic_unit = parse_unit(magnetic_unit, "magnetic") + + # Preprocess data field_units, data, _ = process_data(data) sfh = StreamDictFieldHandler() @@ -1170,17 +1323,6 @@ def load_particles(data, length_unit = None, bbox=None, grid_right_edges = domain_right_edge grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32") - if length_unit is None: - length_unit = 'code_length' - if mass_unit is None: - mass_unit = 'code_mass' - if time_unit is None: - time_unit = 'code_time' - if velocity_unit is None: - velocity_unit = 'code_velocity' - if magnetic_unit is None: - magnetic_unit = 'code_magnetic' - # I'm not sure we need any of this. handler = StreamHandler( grid_left_edges, @@ -1207,8 +1349,6 @@ def load_particles(data, length_unit = None, bbox=None, handler.cosmology_simulation = 0 sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system) - sds.n_ref = n_ref - sds.over_refine_factor = over_refine_factor return sds @@ -1505,8 +1645,6 @@ def _chunk_all(self, dobj): def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None): sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - # We actually do not really use the data files except as input to the - # ParticleOctreeSubset. # This is where we will perform cutting of the Octree and # load-balancing. That may require a specialized selector object to # cut based on some space-filling curve index. diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index 63753b61341..e69de29bb2d 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -1,15 +0,0 @@ -""" -Definitions specific to the Streaming API - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/stream/fields.py b/yt/frontends/stream/fields.py index 115709011b9..8c43a8027e1 100644 --- a/yt/frontends/stream/fields.py +++ b/yt/frontends/stream/fields.py @@ -1,18 +1,3 @@ -""" -Fields specific to Streaming data - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer @@ -78,11 +63,14 @@ def setup_fluid_fields(self): if field[0] in self.ds.particle_types: continue units = self.ds.stream_handler.field_units[field] - if units != '': - self.add_output_field(field, sampling_type="cell", units=units) + if units != '': + self.add_output_field(field, + sampling_type="cell", + units=units) setup_magnetic_field_aliases(self, "stream", ["magnetic_field_%s" % ax for ax in "xyz"]) def add_output_field(self, name, sampling_type, **kwargs): if name in self.ds.stream_handler.field_units: kwargs['units'] = self.ds.stream_handler.field_units[name] - super(StreamFieldInfo, self).add_output_field(name, sampling_type, **kwargs) + super(StreamFieldInfo, self).add_output_field( + name, sampling_type, **kwargs) diff --git a/yt/frontends/stream/io.py b/yt/frontends/stream/io.py index 75c25c97cb0..70717a4d142 100644 --- a/yt/frontends/stream/io.py +++ b/yt/frontends/stream/io.py @@ -1,24 +1,8 @@ -""" -Enzo-specific IO functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.io_handler import \ BaseIOHandler from yt.utilities.logger import ytLogger as mylog -from yt.utilities.lib.geometry_utils import compute_morton from yt.utilities.exceptions import YTDomainOverflow class IOHandlerStream(BaseIOHandler): @@ -109,67 +93,69 @@ def __init__(self, ds): super(StreamParticleIOHandler, self).__init__(ds) def _read_particle_coords(self, chunks, ptf): - chunks = list(chunks) - data_files = set([]) - for chunk in chunks: - for obj in chunk.objs: - data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(self._get_data_files(chunks), + key=lambda x: (x.filename, + x.start)): f = self.fields[data_file.filename] # This double-reads for ptype, field_list in sorted(ptf.items()): yield ptype, (f[ptype, "particle_position_x"], f[ptype, "particle_position_y"], f[ptype, "particle_position_z"]) + + def _read_smoothing_length(self, chunks, ptf, ptype): + for data_file in sorted(self._get_data_files(chunks), + key=lambda x: (x.filename, + x.start)): + f = self.fields[data_file.filename] + return f[ptype, 'smoothing_length'] - def __count_particles_chunks(self, chunks, ptf, selector): - # DISABLED - # I have left this in here, but disabled, because of two competing - # problems: - # * The IndexedOctreeSubsetSelector currently selects *all* particles - # * Slicing a deposited field thus throws an error, since the octree - # estimate fails. - # * BUT, it provides considerable speedup in some situations for - # stream datasets. - # So, pending its re-enabling, we'll leave it here. - # - # This is allowed to over-estimate. We probably *will*, too, because - # we're going to count *all* of the particles, not just individual - # types. - count = 0 - psize = {} + def _get_data_files(self, chunks): + data_files = set([]) for chunk in chunks: for obj in chunk.objs: - count += selector.count_octs(obj.oct_handler, obj.domain_id) - for ptype in ptf: - psize[ptype] = self.ds.n_ref * count + data_files.update(obj.data_files) + return data_files + + def _count_particles_chunks(self, psize, chunks, ptf, selector): + for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf): + if (ptype, 'smoothing_length') in self.ds.field_list: + hsml = self._read_smoothing_length(chunks, ptf, ptype) + else: + hsml = 0.0 + psize[ptype] += selector.count_points(x, y, z, hsml) return psize def _read_particle_fields(self, chunks, ptf, selector): - data_files = set([]) - for chunk in chunks: - for obj in chunk.objs: - data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(self._get_data_files(chunks), + key=lambda x: (x.filename, + x.start)): f = self.fields[data_file.filename] for ptype, field_list in sorted(ptf.items()): if (ptype, "particle_position") in f: - x = f[ptype, "particle_position"][:,0] - y = f[ptype, "particle_position"][:,1] - z = f[ptype, "particle_position"][:,2] + ppos = f[ptype, "particle_position"] + x = ppos[:,0] + y = ppos[:,1] + z = ppos[:,2] else: x, y, z = (f[ptype, "particle_position_%s" % ax] for ax in 'xyz') - mask = selector.select_points(x, y, z, 0.0) - if mask is None: continue + if (ptype, 'smoothing_length') in self.ds.field_list: + hsml = f[ptype, 'smoothing_length'] + else: + hsml = 0.0 + mask = selector.select_points(x, y, z, hsml) + if mask is None: + continue for field in field_list: data = f[ptype, field][mask] yield (ptype, field), data - def _initialize_index(self, data_file, regions): + def _yield_coordinates(self, data_file, needed_ptype=None): # self.fields[g.id][fname] is the pattern here - morton = [] for ptype in self.ds.particle_types_raw: + if needed_ptype is not None and needed_ptype is not ptype: + continue try: pos = np.column_stack([self.fields[data_file.filename][ (ptype, "particle_position_%s" % ax)] for ax in 'xyz']) @@ -180,15 +166,19 @@ def _initialize_index(self, data_file, regions): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) - regions.add_data_file(pos, data_file.file_id) - morton.append(compute_morton( - pos[:,0], pos[:,1], pos[:,2], - data_file.ds.domain_left_edge, - data_file.ds.domain_right_edge)) - return np.concatenate(morton) + yield ptype, pos + + def _get_smoothing_length(self, data_file, dtype, shape): + ptype = self.ds._sph_ptypes[0] + return self.fields[data_file.filename][ptype, 'smoothing_length'] def _count_particles(self, data_file): pcount = {} + for ptype in self.ds.particle_types_raw: + pcount[ptype] = 0 + # stream datasets only have one "file" + if data_file.file_id > 0: + return pcount for ptype in self.ds.particle_types_raw: d = self.fields[data_file.filename] try: diff --git a/yt/frontends/stream/misc.py b/yt/frontends/stream/misc.py index a1cd631b85e..e69de29bb2d 100644 --- a/yt/frontends/stream/misc.py +++ b/yt/frontends/stream/misc.py @@ -1,15 +0,0 @@ -""" -Miscellaneous functions that are Streaming-specific - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - diff --git a/yt/frontends/stream/tests/test_outputs.py b/yt/frontends/stream/tests/test_outputs.py index 9a7f0f4d38c..a3c561aaceb 100644 --- a/yt/frontends/stream/tests/test_outputs.py +++ b/yt/frontends/stream/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -Tests for loading in-memory datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os import shutil @@ -63,7 +48,7 @@ def test_dimensionless_field_units(): dd = ds.all_data() - assert_equal(Z.max(), dd["metallicity"].max()) + assert_equal(Z.max(), float(dd["stream", "metallicity"].max())) def test_inconsistent_field_shape(): @@ -95,4 +80,4 @@ def load_particle_fields_mismatch(): load_particles(data) assert_raises(YTInconsistentParticleFieldShape, - load_particle_fields_mismatch) \ No newline at end of file + load_particle_fields_mismatch) diff --git a/yt/frontends/stream/tests/test_stream_particles.py b/yt/frontends/stream/tests/test_stream_particles.py index 8b1c124b253..cfc5336eebe 100644 --- a/yt/frontends/stream/tests/test_stream_particles.py +++ b/yt/frontends/stream/tests/test_stream_particles.py @@ -1,7 +1,9 @@ import numpy as np from yt.testing import \ - assert_equal + assert_equal, \ + fake_particle_ds, \ + fake_sph_orientation_ds from yt.frontends.stream.api import load_uniform_grid, \ refine_amr, \ load_amr_grids, \ @@ -77,18 +79,18 @@ def test_stream_particles(): # Check to make sure the fields have been defined correctly for ptype in ("all", "io"): - assert ug1._get_field_info(ptype, "particle_position_x").particle_type - assert ug1._get_field_info(ptype, "particle_position_y").particle_type - assert ug1._get_field_info(ptype, "particle_position_z").particle_type - assert ug1._get_field_info(ptype, "particle_mass").particle_type - assert not ug1._get_field_info("gas", "density").particle_type + assert ug1._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert ug1._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert ug1._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert ug1._get_field_info(ptype, "particle_mass").sampling_type == "particle" + assert not ug1._get_field_info("gas", "density").sampling_type == "particle" for ptype in ("all", "io"): - assert ug2._get_field_info(ptype, "particle_position_x").particle_type - assert ug2._get_field_info(ptype, "particle_position_y").particle_type - assert ug2._get_field_info(ptype, "particle_position_z").particle_type - assert ug2._get_field_info(ptype, "particle_mass").particle_type - assert not ug2._get_field_info("gas", "density").particle_type + assert ug2._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert ug2._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert ug2._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert ug2._get_field_info(ptype, "particle_mass").sampling_type == "particle" + assert not ug2._get_field_info("gas", "density").sampling_type == "particle" # Now refine this @@ -106,7 +108,7 @@ def test_stream_particles(): dimensions=grid.ActiveDimensions) for field in amr1.field_list: - if field[0] != "all": + if field[0] not in ("all", "nbody"): data[field] = grid[field] grid_data.append(data) @@ -133,19 +135,19 @@ def test_stream_particles(): assert tot_parts == grid.NumberOfParticles assert tot_all_parts == grid.NumberOfParticles - assert amr1._get_field_info("all", "particle_position_x").particle_type - assert amr1._get_field_info("all", "particle_position_y").particle_type - assert amr1._get_field_info("all", "particle_position_z").particle_type - assert amr1._get_field_info("all", "particle_mass").particle_type - assert not amr1._get_field_info("gas", "density").particle_type + assert amr1._get_field_info("all", "particle_position_x").sampling_type == "particle" + assert amr1._get_field_info("all", "particle_position_y").sampling_type == "particle" + assert amr1._get_field_info("all", "particle_position_z").sampling_type == "particle" + assert amr1._get_field_info("all", "particle_mass").sampling_type == "particle" + assert not amr1._get_field_info("gas", "density").sampling_type == "particle" - assert amr2._get_field_info("all", "particle_position_x").particle_type - assert amr2._get_field_info("all", "particle_position_y").particle_type - assert amr2._get_field_info("all", "particle_position_z").particle_type - assert amr2._get_field_info("all", "particle_mass").particle_type - assert not amr2._get_field_info("gas", "density").particle_type + assert amr2._get_field_info("all", "particle_position_x").sampling_type == "particle" + assert amr2._get_field_info("all", "particle_position_y").sampling_type == "particle" + assert amr2._get_field_info("all", "particle_position_z").sampling_type == "particle" + assert amr2._get_field_info("all", "particle_mass").sampling_type == "particle" + assert not amr2._get_field_info("gas", "density").sampling_type == "particle" - # Now perform similar checks, but with multiple particle types +# Now perform similar checks, but with multiple particle types num_dm_particles = 30000 xd = np.random.uniform(size=num_dm_particles) @@ -194,14 +196,14 @@ def test_stream_particles(): # Check to make sure the fields have been defined correctly for ptype in ("dm", "star"): - assert ug3._get_field_info(ptype, "particle_position_x").particle_type - assert ug3._get_field_info(ptype, "particle_position_y").particle_type - assert ug3._get_field_info(ptype, "particle_position_z").particle_type - assert ug3._get_field_info(ptype, "particle_mass").particle_type - assert ug4._get_field_info(ptype, "particle_position_x").particle_type - assert ug4._get_field_info(ptype, "particle_position_y").particle_type - assert ug4._get_field_info(ptype, "particle_position_z").particle_type - assert ug4._get_field_info(ptype, "particle_mass").particle_type + assert ug3._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert ug3._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert ug3._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert ug3._get_field_info(ptype, "particle_mass").sampling_type == "particle" + assert ug4._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert ug4._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert ug4._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert ug4._get_field_info(ptype, "particle_mass").sampling_type == "particle" # Now refine this @@ -219,7 +221,7 @@ def test_stream_particles(): dimensions=grid.ActiveDimensions) for field in amr3.field_list: - if field[0] != "all": + if field[0] not in ("all", "nbody"): data[field] = grid[field] grid_data.append(data) @@ -235,14 +237,14 @@ def test_stream_particles(): assert_equal(number_of_particles3, number_of_particles4) for ptype in ("dm", "star"): - assert amr3._get_field_info(ptype, "particle_position_x").particle_type - assert amr3._get_field_info(ptype, "particle_position_y").particle_type - assert amr3._get_field_info(ptype, "particle_position_z").particle_type - assert amr3._get_field_info(ptype, "particle_mass").particle_type - assert amr4._get_field_info(ptype, "particle_position_x").particle_type - assert amr4._get_field_info(ptype, "particle_position_y").particle_type - assert amr4._get_field_info(ptype, "particle_position_z").particle_type - assert amr4._get_field_info(ptype, "particle_mass").particle_type + assert amr3._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert amr3._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert amr3._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert amr3._get_field_info(ptype, "particle_mass").sampling_type == "particle" + assert amr4._get_field_info(ptype, "particle_position_x").sampling_type == "particle" + assert amr4._get_field_info(ptype, "particle_position_y").sampling_type == "particle" + assert amr4._get_field_info(ptype, "particle_position_z").sampling_type == "particle" + assert amr4._get_field_info(ptype, "particle_mass").sampling_type == "particle" for grid in amr3.index.grids: tot_parts = grid["dm", "particle_position_x"].size @@ -270,13 +272,14 @@ def test_load_particles_types(): ds1 = load_particles(data1) ds1.index - assert set(ds1.particle_types) == {"all", "io"} + assert set(ds1.particle_types) == {"all", "io", "nbody"} dd = ds1.all_data() for ax in "xyz": assert dd["io", "particle_position_%s" % ax].size == num_particles assert dd["all", "particle_position_%s" % ax].size == num_particles + assert dd["nbody", "particle_position_%s" % ax].size == num_particles num_dm_particles = 10000 num_star_particles = 50000 @@ -294,7 +297,7 @@ def test_load_particles_types(): ds2 = load_particles(data2) ds2.index - assert set(ds2.particle_types) == {"all", "star", "dm"} + assert set(ds2.particle_types) == {"all", "star", "dm", "nbody"} dd = ds2.all_data() @@ -305,6 +308,45 @@ def test_load_particles_types(): assert npart == num_tot_particles assert dd["all", "particle_position_%s" % ax].size == num_tot_particles +def test_load_particles_with_data_source(): + ds1 = fake_particle_ds() + + # Load from dataset + ad = ds1.all_data() + fields = ['particle_mass'] + fields += ['particle_position_{}'.format(ax) for ax in 'xyz'] + data = {field: ad[field] for field in fields} + ds2 = load_particles(data, data_source=ad) + + def in_cgs(quan): + return quan.in_cgs().v + + # Test bbox is parsed correctly + for attr in ['domain_left_edge', 'domain_right_edge']: + assert np.allclose( + in_cgs(getattr(ds1, attr)), + in_cgs(getattr(ds2, attr)) + ) + + # Test sim_time is parsed correctly + assert in_cgs(ds1.current_time) == in_cgs(ds2.current_time) + + # Test code units are parsed correctly + def get_cu(ds, dim): + return ds.quan(1, 'code_' + dim) + for dim in ['length', 'mass', 'time', 'velocity', 'magnetic']: + assert in_cgs(get_cu(ds1, dim)) == in_cgs(get_cu(ds2, dim)) + +def test_add_sph_fields(): + ds = fake_particle_ds() + ds.index + assert set(ds.particle_types) == {'io', 'all', 'nbody'} + + ds.add_sph_fields() + assert set(ds.particle_types) == {'io', 'all'} + assert ('io', 'smoothing_length') in ds.field_list + assert ('io', 'density') in ds.field_list + def test_particles_outside_domain(): np.random.seed(0x4d3d3d3) posx_arr = np.random.uniform(low=-1.6, high=1.5, size=1000) @@ -322,3 +364,11 @@ def test_particles_outside_domain(): assert wh.size == 1000 - ds.particle_type_counts['io'] ad = ds.all_data() assert ds.particle_type_counts['io'] == ad['particle_position_x'].size + +def test_stream_sph_projection(): + ds = fake_sph_orientation_ds() + proj = ds.proj(('gas', 'density'), 2) + frb = proj.to_frb(ds.domain_width[0], (256, 256)) + image = frb['gas', 'density'] + assert image.max() > 0 + assert image.shape == (256, 256) diff --git a/yt/frontends/stream/tests/test_update_data.py b/yt/frontends/stream/tests/test_update_data.py index 842a5e0314c..195031ab9f4 100644 --- a/yt/frontends/stream/tests/test_update_data.py +++ b/yt/frontends/stream/tests/test_update_data.py @@ -1,12 +1,14 @@ -from yt.testing import fake_random_ds +import numpy as np +from yt.testing import \ + fake_particle_ds, \ + fake_random_ds from yt.data_objects.profiles import create_profile -from numpy.random import uniform -def test_update_data(): +def test_update_data_grid(): ds = fake_random_ds(64, nprocs=8) ds.index dims = (32,32,32) - grid_data = [{"temperature": uniform(size=dims)} + grid_data = [{"temperature": np.random.uniform(size=dims)} for i in range(ds.index.num_grids)] ds.index.update_data(grid_data) prj = ds.proj("temperature", 2) @@ -14,3 +16,12 @@ def test_update_data(): dd = ds.all_data() profile = create_profile(dd, "density", "temperature", 10) profile["temperature"] + +def test_update_data_particle(): + npart = 100 + ds = fake_particle_ds(npart=npart) + part_data = {"temperature": np.random.rand(npart)} + ds.index.update_data(part_data) + assert ("io", "temperature") in ds.field_list + dd = ds.all_data() + dd["temperature"] diff --git a/yt/analysis_modules/cosmological_observation/light_ray/__init__.py b/yt/frontends/swift/__init__.py similarity index 100% rename from yt/analysis_modules/cosmological_observation/light_ray/__init__.py rename to yt/frontends/swift/__init__.py diff --git a/yt/frontends/swift/api.py b/yt/frontends/swift/api.py new file mode 100644 index 00000000000..960d56cdf62 --- /dev/null +++ b/yt/frontends/swift/api.py @@ -0,0 +1,10 @@ +from .data_structures import \ + SwiftDataset + +from yt.frontends.sph.fields import \ + SPHFieldInfo + +from .io import \ + IOHandlerSwift + +from . import tests diff --git a/yt/frontends/swift/data_structures.py b/yt/frontends/swift/data_structures.py new file mode 100644 index 00000000000..d56360dc157 --- /dev/null +++ b/yt/frontends/swift/data_structures.py @@ -0,0 +1,183 @@ +import numpy as np +from yt.utilities.on_demand_imports import _h5py as h5py +from uuid import uuid4 + +from yt.utilities.logger import ytLogger as mylog +from yt.frontends.sph.data_structures import \ + SPHDataset, \ + SPHParticleIndex +from yt.frontends.sph.fields import SPHFieldInfo +from yt.data_objects.static_output import \ + ParticleFile +from yt.funcs import only_on_root + +class SwiftDataset(SPHDataset): + _index_class = SPHParticleIndex + _field_info_class = SPHFieldInfo + _file_class = ParticleFile + + _particle_mass_name = "Masses" + _particle_coordinates_name = "Coordinates" + _particle_velocity_name = "Velocities" + _sph_ptypes = ("PartType0",) + _suffix = ".hdf5" + + def __init__(self, filename, dataset_type='swift', + storage_filename=None, + units_override=None): + + self.filename = filename + + super().__init__(filename, dataset_type, units_override=units_override) + self.storage_filename = storage_filename + + def _set_code_unit_attributes(self): + """ + Sets the units from the SWIFT internal unit system. + + Currently sets length, mass, time, and temperature. + + SWIFT uses comoving co-ordinates without the usual h-factors. + """ + units = self._get_info_attributes("Units") + + if self.cosmological_simulation == 1: + msg = "Assuming length units are in comoving centimetres" + only_on_root(mylog.info, msg) + self.length_unit = self.quan( + float(units["Unit length in cgs (U_L)"]), "cmcm") + else: + msg = "Assuming length units are in physical centimetres" + only_on_root(mylog.info, msg) + self.length_unit = self.quan( + float(units["Unit length in cgs (U_L)"]), "cm") + + self.mass_unit = self.quan( + float(units["Unit mass in cgs (U_M)"]), "g") + self.time_unit = self.quan( + float(units["Unit time in cgs (U_t)"]), "s") + self.temperature_unit = self.quan( + float(units["Unit temperature in cgs (U_T)"]), "K") + + return + + def _get_info_attributes(self, dataset): + """ + Gets the information from a header-style dataset and returns it as a + python dictionary. + + Example: self._get_info_attributes(header) returns a dictionary of all + of the information in the Header.attrs. + """ + + with h5py.File(self.filename, "r") as handle: + header = dict(handle[dataset].attrs) + + return header + + def _parse_parameter_file(self): + """ + Parse the SWIFT "parameter file" -- really this actually reads info + from the main HDF5 file as everything is replicated there and usually + parameterfiles are not transported. + + The header information from the HDF5 file is stored in an un-parsed + format in self.parameters should users wish to use it. + """ + + self.unique_identifier = uuid4() + + # Read from the HDF5 file, this gives us all the info we need. The rest + # of this function is just parsing. + header = self._get_info_attributes("Header") + runtime_parameters = self._get_info_attributes("RuntimePars") + + policy = self._get_info_attributes("Policy") + # These are the parameterfile parameters from *.yml at runtime + parameters = self._get_info_attributes("Parameters") + + # Not used in this function, but passed to parameters + hydro = self._get_info_attributes("HydroScheme") + subgrid = self._get_info_attributes("SubgridScheme") + + self.domain_right_edge = header["BoxSize"] + self.domain_left_edge = np.zeros_like(self.domain_right_edge) + + self.dimensionality = int(header["Dimension"]) + + # SWIFT is either all periodic, or not periodic at all + periodic = int(runtime_parameters["PeriodicBoundariesOn"]) + + if periodic: + self.periodicity = [True] * self.dimensionality + else: + self.periodicity = [False] * self.dimensionality + + # Units get attached to this + self.current_time = float(header["Time"]) + + # Now cosmology enters the fray, as a runtime parameter. + self.cosmological_simulation = int(policy["cosmological integration"]) + + if self.cosmological_simulation: + try: + self.current_redshift = float(header["Redshift"]) + # These won't be present if self.cosmological_simulation is false + self.omega_lambda = float(parameters["Cosmology:Omega_lambda"]) + self.omega_matter = float(parameters["Cosmology:Omega_m"]) + # This is "little h" + self.hubble_constant = float(parameters["Cosmology:h"]) + except KeyError: + mylog.warn( + ("Could not find cosmology information in Parameters," + + " despite having ran with -c signifying a cosmological" + + " run.") + ) + mylog.info( + "Setting up as a non-cosmological run. Check this!" + ) + self.cosmological_simulation = 0 + self.current_redshift = 0.0 + self.omega_lambda = 0.0 + self.omega_matter = 0.0 + self.hubble_constant = 0.0 + else: + self.current_redshift = 0.0 + self.omega_lambda = 0.0 + self.omega_matter = 0.0 + self.hubble_constant = 0.0 + + + # Store the un-parsed information should people want it. + self.parameters = dict( + header=header, + runtime_parameters=runtime_parameters, + policy=policy, + parameters=parameters, + hydro=hydro, + subgrid=subgrid) + + # SWIFT never has multi file snapshots + self.file_count = 1 + self.filename_template = self.parameter_filename + + return + + @classmethod + def _is_valid(self, *args, **kwargs): + """ + Checks to see if the file is a valid output from SWIFT. + This requires the file to have the Code attribute set in the + Header dataset to "SWIFT". + """ + filename = args[0] + valid = True + # Attempt to open the file, if it's not a hdf5 then this will fail: + try: + handle = h5py.File(filename, "r") + valid = handle["Header"].attrs["Code"].decode("utf-8") == "SWIFT" + handle.close() + except (IOError, KeyError, ImportError): + valid = False + + return valid diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py new file mode 100644 index 00000000000..54a529d85d6 --- /dev/null +++ b/yt/frontends/swift/io.py @@ -0,0 +1,148 @@ +from yt.utilities.on_demand_imports import _h5py as h5py +import numpy as np + +from yt.frontends.sph.io import \ + IOHandlerSPH + +class IOHandlerSwift(IOHandlerSPH): + _dataset_type = "swift" + + def __init__(self, ds, *args, **kwargs): + super(IOHandlerSwift, self).__init__(ds, *args, **kwargs) + + def _read_fluid_selection(self, chunks, selector, fields, size): + raise NotImplementedError + + # NOTE: we refer to sub_files in the next sections, these sub_files may + # actually be full data_files. + # In the event data_files are too big, yt breaks them up into sub_files and + # we sort of treat them as files in the chunking system + def _read_particle_coords(self, chunks, ptf): + # This will read chunks and yield the results. + # yt has the concept of sub_files, i.e, we break up big files into + # virtual sub_files to deal with the chunking system + chunks = list(chunks) + sub_files = set([]) + for chunk in chunks: + for obj in chunk.objs: + sub_files.update(obj.data_files) + for sub_file in sorted(sub_files, key=lambda x: x.filename): + si, ei = sub_file.start, sub_file.end + f = h5py.File(sub_file.filename, "r") + # This double-reads + for ptype, field_list in sorted(ptf.items()): + if sub_file.total_particles[ptype] == 0: + continue + pos = f["/%s/Coordinates" % ptype][si:ei, :] + pos = pos.astype("float64", copy=False) + if ptype == self.ds._sph_ptypes[0]: + hsml = self._get_smoothing_length(sub_file) + else: + hsml = 0.0 + yield ptype, (pos[:, 0], pos[:, 1], pos[:, 2]), hsml + f.close() + + def _yield_coordinates(self, sub_file, needed_ptype=None): + si, ei = sub_file.start, sub_file.end + f = h5py.File(sub_file.filename, "r") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + np.clip(pcount - si, 0, ei - si, out=pcount) + pcount = pcount.sum() + for key in f.keys(): + if (not key.startswith("PartType") or "Coordinates" not in f[key] + or needed_ptype and key != needed_ptype): + continue + pos = f[key]["Coordinates"][si:ei,...] + pos = pos.astype("float64", copy=False) + yield key, pos + f.close() + + def _get_smoothing_length(self, sub_file, pdtype=None, pshape=None): + # We do not need the pdtype and the pshape, but some frontends do so we + # accept them and then just ignore them + ptype = self.ds._sph_ptypes[0] + ind = int(ptype[-1]) + si, ei = sub_file.start, sub_file.end + with h5py.File(sub_file.filename, "r") as f: + pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") + pcount = np.clip(pcount - si, 0, ei - si) + # we upscale to float64 + hsml = f[ptype]["SmoothingLength"][si:ei,...] + hsml = hsml.astype("float64", copy=False) + return hsml + + def _read_particle_fields(self, chunks, ptf, selector): + # Now we have all the sizes, and we can allocate + sub_files = set([]) + for chunk in chunks: + for obj in chunk.objs: + sub_files.update(obj.data_files) + + for sub_file in sorted(sub_files, key=lambda x: x.filename): + si, ei = sub_file.start, sub_file.end + f = h5py.File(sub_file.filename, "r") + for ptype, field_list in sorted(ptf.items()): + if sub_file.total_particles[ptype] == 0: + continue + g = f["/%s" % ptype] + # this should load as float64 + coords = g["Coordinates"][si:ei] + if ptype == 'PartType0': + hsmls = self._get_smoothing_length(sub_file) + else: + hsmls = 0.0 + mask = selector.select_points( + coords[:,0], coords[:,1], coords[:,2], hsmls) + del coords + if mask is None: + continue + for field in field_list: + if field in ("Mass", "Masses"): + data = g[self.ds._particle_mass_name][si:ei][mask, ...] + else: + data = g[field][si:ei][mask, ...] + + data.astype("float64", copy=False) + yield (ptype, field), data + f.close() + + def _count_particles(self, data_file): + si, ei = data_file.start, data_file.end + f = h5py.File(data_file.filename, "r") + pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") + f.close() + # if this data_file was a sub_file, then we just extract the region + # defined by the subfile + if None not in (si, ei): + np.clip(pcount - si, 0, ei - si, out=pcount) + npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) + return npart + + def _identify_fields(self, data_file): + f = h5py.File(data_file.filename, "r") + fields = [] + cname = self.ds._particle_coordinates_name # Coordinates + mname = self.ds._particle_mass_name # Coordinates + + for key in f.keys(): + if not key.startswith("PartType"): + continue + + g = f[key] + if cname not in g: + continue + + ptype = str(key) + for k in g.keys(): + kk = k + if str(kk) == mname: + fields.append((ptype, "Mass")) + continue + if not hasattr(g[kk], "shape"): + continue + if len(g[kk].shape) > 1: + self._vector_fields[kk] = g[kk].shape[1] + fields.append((ptype, str(kk))) + + f.close() + return fields, {} diff --git a/yt/analysis_modules/cosmological_observation/light_ray/tests/__init__.py b/yt/frontends/swift/tests/__init__.py similarity index 100% rename from yt/analysis_modules/cosmological_observation/light_ray/tests/__init__.py rename to yt/frontends/swift/tests/__init__.py diff --git a/yt/frontends/swift/tests/test_outputs.py b/yt/frontends/swift/tests/test_outputs.py new file mode 100644 index 00000000000..0e0ff6126e1 --- /dev/null +++ b/yt/frontends/swift/tests/test_outputs.py @@ -0,0 +1,108 @@ +from yt import load + +import numpy as np + +from yt.testing import requires_file +from yt.frontends.swift.api import SwiftDataset +from yt.testing import assert_almost_equal, ParticleSelectionComparison +from yt.utilities.on_demand_imports import _h5py as h5py + +keplerian_ring = "KeplerianRing/keplerian_ring_0020.hdf5" +EAGLE_6 = "EAGLE_6/eagle_0005.hdf5" + +# Combined the tests for loading a file and ensuring the units have been +# implemented correctly to save time on re-loading a dataset +@requires_file(keplerian_ring) +def test_non_cosmo_dataset(): + ds = load(keplerian_ring) + assert(type(ds) == SwiftDataset) + + field = ('gas', 'density') + ad = ds.all_data() + yt_density = ad[field] + yt_coords = ad[(field[0], 'position')] + + # load some data the old fashioned way + fh = h5py.File(ds.parameter_filename, "r") + part_data = fh['PartType0'] + + # set up a conversion factor by loading the unit mas and unit length in cm, + # and then converting to proper coordinates + units = fh["Units"] + units = dict(units.attrs) + density_factor = float(units["Unit mass in cgs (U_M)"]) + density_factor /= float(units["Unit length in cgs (U_L)"])**3 + + # now load the raw density and coordinates + raw_density = part_data["Density"][:].astype("float64") * density_factor + raw_coords = part_data["Coordinates"][:].astype("float64") + fh.close() + + # sort by the positions - yt often loads in a different order + ind_raw = np.lexsort((raw_coords[:, 2], raw_coords[:, 1], + raw_coords[:, 0])) + ind_yt = np.lexsort((yt_coords[:, 2], yt_coords[:, 1], yt_coords[:, 0])) + raw_density = raw_density[ind_raw] + yt_density = yt_density[ind_yt] + + # make sure we are comparing fair units + assert(str(yt_density.units) == 'g/cm**3') + + # make sure the actual values are the same + assert_almost_equal(yt_density.d, raw_density) + +@requires_file(keplerian_ring) +def test_non_cosmo_dataset_selection(): + ds = load(keplerian_ring) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() + +@requires_file(EAGLE_6) +def test_cosmo_dataset(): + ds = load(EAGLE_6) + assert(type(ds) == SwiftDataset) + + field = ('gas', 'density') + ad = ds.all_data() + yt_density = ad[field] + yt_coords = ad[(field[0], 'position')] + + # load some data the old fashioned way + fh = h5py.File(ds.parameter_filename, "r") + part_data = fh['PartType0'] + + # set up a conversion factor by loading the unit mas and unit length in cm, + # and then converting to proper coordinates + units = fh["Units"] + units = dict(units.attrs) + density_factor = float(units["Unit mass in cgs (U_M)"]) + density_factor /= float(units["Unit length in cgs (U_L)"])**3 + + # add the redshift factor + header = fh["Header"] + header = dict(header.attrs) + density_factor *= (1.0 + float(header["Redshift"]))**3 + + # now load the raw density and coordinates + raw_density = part_data["Density"][:].astype("float64") * density_factor + raw_coords = part_data["Coordinates"][:].astype("float64") + fh.close() + + # sort by the positions - yt often loads in a different order + ind_raw = np.lexsort((raw_coords[:, 2], raw_coords[:, 1], + raw_coords[:, 0])) + ind_yt = np.lexsort((yt_coords[:, 2], yt_coords[:, 1], yt_coords[:, 0])) + raw_density = raw_density[ind_raw] + yt_density = yt_density[ind_yt] + + # make sure we are comparing fair units + assert(str(yt_density.units) == 'g/cm**3') + + # make sure the actual values are the same + assert_almost_equal(yt_density.d, raw_density) + +@requires_file(EAGLE_6) +def test_cosmo_dataset_selection(): + ds = load(EAGLE_6) + psc = ParticleSelectionComparison(ds) + psc.run_defaults() diff --git a/yt/frontends/tipsy/api.py b/yt/frontends/tipsy/api.py index e56d280f98d..a0fd0ad2986 100644 --- a/yt/frontends/tipsy/api.py +++ b/yt/frontends/tipsy/api.py @@ -1,19 +1,3 @@ -""" -API for Tipsy frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ TipsyDataset diff --git a/yt/frontends/tipsy/data_structures.py b/yt/frontends/tipsy/data_structures.py index 2c5e8c584ae..5eb230d577e 100644 --- a/yt/frontends/tipsy/data_structures.py +++ b/yt/frontends/tipsy/data_structures.py @@ -1,20 +1,3 @@ -""" -Data structures for Tipsy frontend - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import stat import struct @@ -22,10 +5,9 @@ import os from yt.frontends.sph.data_structures import \ - SPHDataset + SPHDataset, \ + SPHParticleIndex from yt.funcs import deprecate -from yt.geometry.particle_geometry_handler import \ - ParticleIndex from yt.data_objects.static_output import \ ParticleFile from yt.utilities.cosmology import \ @@ -43,23 +25,24 @@ long = int class TipsyFile(ParticleFile): - def __init__(self, ds, io, filename, file_id): - # To go above 1 domain, we need to include an indexing step in the - # IOHandler, rather than simply reading from a single file. - assert file_id == 0 - super(TipsyFile, self).__init__(ds, io, filename, file_id) - io._create_dtypes(self) - io._update_domain(self)#Check automatically what the domain size is + def __init__(self, ds, io, filename, file_id, range=None): + super(TipsyFile, self).__init__(ds, io, filename, file_id, range) + if not hasattr(io, '_field_list'): + io._create_dtypes(self) + # Check automatically what the domain size is + io._update_domain(self) + self._calculate_offsets(io._field_list) - def _calculate_offsets(self, field_list): - self.field_offsets = self.io._calculate_particle_offsets(self) + def _calculate_offsets(self, field_list, pcounts=None): + self.field_offsets = self.io._calculate_particle_offsets(self, None) class TipsyDataset(SPHDataset): - _index_class = ParticleIndex + _index_class = SPHParticleIndex _file_class = TipsyFile _field_info_class = TipsyFieldInfo _particle_mass_name = "Mass" _particle_coordinates_name = "Coordinates" + _sph_ptypes = ("Gas",) _header_spec = (('time', 'd'), ('nbodies', 'i'), ('ndim', 'i'), @@ -73,7 +56,9 @@ def __init__(self, filename, dataset_type="tipsy", unit_base=None, parameter_file=None, cosmology_parameters=None, - n_ref=64, over_refine_factor=1, + index_order=None, + index_filename=None, + kdtree_filename=None, kernel_name=None, bounding_box=None, units_override=None, @@ -114,8 +99,8 @@ def __init__(self, filename, dataset_type="tipsy", "Use unit_base instead.") super(TipsyDataset, self).__init__( filename, dataset_type=dataset_type, unit_system=unit_system, - n_ref=n_ref, over_refine_factor=over_refine_factor, - kernel_name=kernel_name) + index_order=index_order, index_filename=index_filename, + kdtree_filename=kdtree_filename, kernel_name=kernel_name) def __repr__(self): return os.path.basename(self.parameter_filename) @@ -171,8 +156,7 @@ def _parse_parameter_file(self): self.parameters[param] = val self.current_time = hvals["time"] - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") periodic = self.parameters.get('bPeriodic', True) period = self.parameters.get('dPeriod', None) self.periodicity = (periodic, periodic, periodic) @@ -190,6 +174,8 @@ def _parse_parameter_file(self): self.domain_left_edge = None self.domain_right_edge = None else: + # This ensures that we know a bounding box has been applied + self._domain_override = True bbox = np.array(self.bounding_box, dtype="float64") if bbox.shape == (2, 3): bbox = bbox.transpose() @@ -248,7 +234,7 @@ def _set_code_unit_attributes(self): self.hubble_constant /= self.quan(100, 'km/s/Mpc') # If we leave it as a YTQuantity, the cosmology object # used below will add units back on. - self.hubble_constant = self.hubble_constant.in_units("").d + self.hubble_constant = self.hubble_constant.to_value("") else: mu = self.parameters.get('dMsolUnit', 1.0) self.mass_unit = self.quan(mu, 'Msun') @@ -282,7 +268,7 @@ def _set_code_unit_attributes(self): density_unit = self.mass_unit / self.length_unit**3 if not hasattr(self, "time_unit"): - self.time_unit = 1.0 / np.sqrt(G * density_unit) + self.time_unit = 1.0 / np.sqrt(density_unit * G) @staticmethod def _validate_header(filename): @@ -330,3 +316,8 @@ def _is_valid(self, *args, **kwargs): @deprecate(replacement='cosmological_simulation') def comoving(self): return self.cosmological_simulation == 1.0 + + # _instantiated_index = None + # @property + # def index(self): + # index_nosoft = super(TipsyDataset, self).index diff --git a/yt/frontends/tipsy/definitions.py b/yt/frontends/tipsy/definitions.py new file mode 100644 index 00000000000..ae9b2a4446d --- /dev/null +++ b/yt/frontends/tipsy/definitions.py @@ -0,0 +1,5 @@ +npart_mapping = { + 'Gas': 'nsph', + 'DarkMatter': 'ndark', + 'Stars': 'nstar' +} diff --git a/yt/frontends/tipsy/fields.py b/yt/frontends/tipsy/fields.py index 4614d9746a0..2cfd0c04a20 100644 --- a/yt/frontends/tipsy/fields.py +++ b/yt/frontends/tipsy/fields.py @@ -1,24 +1,8 @@ -""" -Tipsy fields - - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.sph.fields import SPHFieldInfo -from yt.fields.particle_fields import add_nearest_neighbor_field class TipsyFieldInfo(SPHFieldInfo): + known_particle_fields = SPHFieldInfo.known_particle_fields + \ + (("smoothing_length", ("code_length", [], None)),) aux_particle_fields = { 'uDotFB':("uDotFB", ("code_mass * code_velocity**2", [""], None)), 'uDotAV':("uDotAV", ("code_mass * code_velocity**2", [""], None)), @@ -46,33 +30,3 @@ def __init__(self, ds, field_list, slice_info = None): self.aux_particle_fields[field[1]] not in self.known_particle_fields: self.known_particle_fields += (self.aux_particle_fields[field[1]],) super(TipsyFieldInfo,self).__init__(ds, field_list, slice_info) - - def setup_particle_fields(self, ptype, *args, **kwargs): - - # setup some special fields that only make sense for SPH particles - - if ptype in ("PartType0", "Gas"): - self.setup_gas_particle_fields(ptype) - - super(TipsyFieldInfo, self).setup_particle_fields( - ptype, *args, **kwargs) - - - def setup_gas_particle_fields(self, ptype): - - num_neighbors = 65 - fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors) - def _func(): - def _smoothing_length(field, data): - # For now, we hardcode num_neighbors. We should make this configurable - # in the future. - rv = data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors] - #np.maximum(rv, 0.5*data[ptype, "Epsilon"], rv) - return rv - return _smoothing_length - - self.add_field( - (ptype, "smoothing_length"), - sampling_type="particle", - function=_func(), - units="code_length") diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index 8fb419871ab..11dcceffce8 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -1,36 +1,22 @@ -""" -Tipsy data-file handling function - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import glob import numpy as np from numpy.lib.recfunctions import append_fields import os - -from yt.utilities.io_handler import \ - BaseIOHandler -from yt.utilities.lib.geometry_utils import \ - compute_morton +import struct + +from yt.geometry.particle_geometry_handler import \ + CHUNKSIZE +from yt.frontends.sph.io import \ + IOHandlerSPH +from yt.frontends.tipsy.definitions import \ + npart_mapping +from yt.utilities.lib.particle_kdtree_tools import \ + generate_smoothing_length from yt.utilities.logger import ytLogger as \ mylog -CHUNKSIZE = 10000000 - -class IOHandlerTipsyBinary(BaseIOHandler): +class IOHandlerTipsyBinary(IOHandlerSPH): _dataset_type = "tipsy" _vector_fields = ("Coordinates", "Velocity", "Velocities") @@ -40,7 +26,7 @@ class IOHandlerTipsyBinary(BaseIOHandler): _ptypes = ("Gas", "DarkMatter", "Stars") - _chunksize = 64 * 64 * 64 + _chunksize = CHUNKSIZE _aux_fields = None _fields = (("Gas", "Mass"), @@ -72,9 +58,11 @@ def __init__(self, *args, **kwargs): def _read_fluid_selection(self, chunks, selector, fields, size): raise NotImplementedError - def _fill_fields(self, fields, vals, mask, data_file): + def _fill_fields(self, fields, vals, hsml, mask, data_file): if mask is None: size = 0 + elif isinstance(mask, slice): + size = vals[fields[0]].size else: size = mask.sum() rv = {} @@ -87,6 +75,8 @@ def _fill_fields(self, fields, vals, mask, data_file): rv[field][:, 0] = vals[field]['x'][mask] rv[field][:, 1] = vals[field]['y'][mask] rv[field][:, 2] = vals[field]['z'][mask] + elif field == 'smoothing_length': + rv[field] = hsml[mask] else: rv[field] = np.empty(size, dtype="float64") if size == 0: @@ -95,9 +85,10 @@ def _fill_fields(self, fields, vals, mask, data_file): if field == "Coordinates": eps = np.finfo(rv[field].dtype).eps for i in range(3): - rv[field][:, i] = np.clip(rv[field][:, i], - self.domain_left_edge[i] + eps, - self.domain_right_edge[i] - eps) + rv[field][:, i] = np.clip( + rv[field][:, i], + self.ds.domain_left_edge[i].v + eps, + self.ds.domain_right_edge[i].v - eps) return rv def _read_particle_coords(self, chunks, ptf): @@ -105,13 +96,15 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): poff = data_file.field_offsets tp = data_file.total_particles f = open(data_file.filename, "rb") for ptype, field_list in sorted(ptf.items(), - key=lambda a: poff[a[0]]): - f.seek(poff[ptype], os.SEEK_SET) + key=lambda a: poff.get(a[0], -1)): + if data_file.total_particles[ptype] == 0: + continue + f.seek(poff[ptype]) total = 0 while total < tp[ptype]: count = min(self._chunksize, tp[ptype] - total) @@ -120,7 +113,49 @@ def _read_particle_coords(self, chunks, ptf): d = [p["Coordinates"][ax].astype("float64") for ax in 'xyz'] del p - yield ptype, d + if ptype == self.ds._sph_ptypes[0]: + hsml = self._read_smoothing_length(data_file, count) + else: + hsml = 0.0 + yield ptype, d, hsml + + @property + def hsml_filename(self): + return '%s-%s' % (self.ds.parameter_filename, 'hsml') + + def _generate_smoothing_length(self, data_files, kdtree): + if os.path.exists(self.hsml_filename): + with open(self.hsml_filename, 'rb') as f: + file_hash = struct.unpack('q', f.read(struct.calcsize('q')))[0] + if file_hash != self.ds._file_hash: + os.remove(self.hsml_filename) + else: + return + positions = [] + for data_file in data_files: + for _, ppos in self._yield_coordinates( + data_file, needed_ptype=self.ds._sph_ptypes[0]): + positions.append(ppos) + if positions == []: + return + positions = np.concatenate(positions)[kdtree.idx] + hsml = generate_smoothing_length( + positions, kdtree, self.ds._num_neighbors) + hsml = hsml[np.argsort(kdtree.idx)] + dtype = self._pdtypes['Gas']['Coordinates'][0] + with open(self.hsml_filename, 'wb') as f: + f.write(struct.pack('q', self.ds._file_hash)) + f.write(hsml.astype(dtype).tostring()) + + def _read_smoothing_length(self, data_file, count): + dtype = self._pdtypes['Gas']['Coordinates'][0] + with open(self.hsml_filename, 'rb') as f: + f.seek(struct.calcsize('q') + data_file.start*dtype.itemsize) + hsmls = np.fromfile(f, dtype, count=count) + return hsmls.astype('float64') + + def _get_smoothing_length(self, data_file, dtype, shape): + return self._read_smoothing_length(data_file, shape[0]) def _read_particle_fields(self, chunks, ptf, selector): chunks = list(chunks) @@ -128,10 +163,9 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): poff = data_file.field_offsets - aux_fields_offsets = \ - self._calculate_particle_offsets_aux(data_file) + aux_fields_offsets = self._calculate_particle_offsets_aux(data_file) tp = data_file.total_particles f = open(data_file.filename, "rb") @@ -141,52 +175,55 @@ def _read_particle_fields(self, chunks, ptf, selector): aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb') for ptype, field_list in sorted(ptf.items(), - key=lambda a: poff[a[0]]): - f.seek(poff[ptype], os.SEEK_SET) + key=lambda a: poff.get(a[0], -1)): + if data_file.total_particles[ptype] == 0: + continue + f.seek(poff[ptype]) afields = list(set(field_list).intersection(self._aux_fields)) + count = min(self._chunksize, tp[ptype]) + p = np.fromfile(f, self._pdtypes[ptype], count=count) + auxdata = [] for afield in afields: - aux_fh[afield].seek( - aux_fields_offsets[afield][ptype][0], os.SEEK_SET) - - total = 0 - while total < tp[ptype]: - count = min(self._chunksize, tp[ptype] - total) - p = np.fromfile(f, self._pdtypes[ptype], count=count) - - auxdata = [] - for afield in afields: - if isinstance(self._aux_pdtypes[afield], np.dtype): - auxdata.append( - np.fromfile(aux_fh[afield], - self._aux_pdtypes[afield], - count=count) + aux_fh[afield].seek(aux_fields_offsets[afield][ptype]) + if isinstance(self._aux_pdtypes[afield], np.dtype): + auxdata.append(np.fromfile( + aux_fh[afield], + self._aux_pdtypes[afield], + count=count) + ) + else: + par = self.ds.parameters + nlines = 1 + par['nsph'] + par['ndark'] + par['nstar'] + aux_fh[afield].seek(0) + sh = aux_fields_offsets[afield][ptype] + sf = nlines - count - sh + if tp[ptype] > 0: + aux = np.genfromtxt( + aux_fh[afield], skip_header=sh, + skip_footer=sf ) - else: - aux_fh[afield].seek(0, os.SEEK_SET) - sh = aux_fields_offsets[afield][ptype][0] + total - sf = aux_fields_offsets[afield][ptype][1] + \ - tp[ptype] - count - total - if tp[ptype] > 0: - aux = np.genfromtxt( - aux_fh[afield], skip_header=sh, - skip_footer=sf - ) - if aux.ndim < 1: - aux = np.array([aux]) - auxdata.append(aux) - - total += p.size - if afields: - p = append_fields(p, afields, auxdata) - mask = selector.select_points( - p["Coordinates"]['x'].astype("float64"), - p["Coordinates"]['y'].astype("float64"), - p["Coordinates"]['z'].astype("float64"), 0.0) - if mask is None: - continue - tf = self._fill_fields(field_list, p, mask, data_file) - for field in field_list: - yield (ptype, field), tf.pop(field) + if aux.ndim < 1: + aux = np.array([aux]) + auxdata.append(aux) + if afields: + p = append_fields(p, afields, auxdata) + if ptype == 'Gas': + hsml = self._read_smoothing_length(data_file, count) + else: + hsml = 0. + if getattr(selector, 'is_all_data', False): + mask = slice(None, None, None) + else: + x = p["Coordinates"]['x'].astype("float64") + y = p["Coordinates"]['y'].astype("float64") + z = p["Coordinates"]['z'].astype("float64") + mask = selector.select_points(x, y, z, hsml) + del x, y, z + if mask is None: + continue + tf = self._fill_fields(field_list, p, hsml, mask, data_file) + for field in field_list: + yield (ptype, field), tf.pop(field) # close all file handles f.close() @@ -239,53 +276,44 @@ def _update_domain(self, data_file): ds.unit_registry.add("unitary", float(DW.max() * DW.units.base_value), DW.units.dimensions) - def _initialize_index(self, data_file, regions): - ds = data_file.ds - morton = np.empty(sum(list(data_file.total_particles.values())), - dtype="uint64") - ind = 0 - DLE, DRE = ds.domain_left_edge, ds.domain_right_edge - self.domain_left_edge = DLE.in_units("code_length").ndarray_view() - self.domain_right_edge = DRE.in_units("code_length").ndarray_view() + def _yield_coordinates(self, data_file, needed_ptype=None): with open(data_file.filename, "rb") as f: - f.seek(ds._header_offset) + poff = data_file.field_offsets for iptype, ptype in enumerate(self._ptypes): + if ptype not in poff: + continue + f.seek(poff[ptype]) + if needed_ptype is not None and ptype != needed_ptype: + continue # We'll just add the individual types separately count = data_file.total_particles[ptype] if count == 0: continue - stop = ind + count - while ind < stop: - c = min(CHUNKSIZE, stop - ind) - pp = np.fromfile(f, dtype=self._pdtypes[ptype], - count=c) - mis = np.empty(3, dtype="float64") - mas = np.empty(3, dtype="float64") - for axi, ax in enumerate('xyz'): - mi = pp["Coordinates"][ax].min() - ma = pp["Coordinates"][ax].max() - mylog.debug( - "Spanning: %0.3e .. %0.3e in %s", mi, ma, ax) - mis[axi] = mi - mas[axi] = ma - pos = np.empty((pp.size, 3), dtype="float64") - for i, ax in enumerate("xyz"): - pos[:, i] = pp["Coordinates"][ax] - regions.add_data_file(pos, data_file.file_id, - data_file.ds.filter_bbox) - morton[ind:ind + c] = compute_morton( - pos[:, 0], pos[:, 1], pos[:, 2], - DLE, DRE, data_file.ds.filter_bbox) - ind += c - mylog.info("Adding %0.3e particles", morton.size) - return morton + pp = np.fromfile(f, dtype=self._pdtypes[ptype], + count=count) + mis = np.empty(3, dtype="float64") + mas = np.empty(3, dtype="float64") + for axi, ax in enumerate('xyz'): + mi = pp["Coordinates"][ax].min() + ma = pp["Coordinates"][ax].max() + mylog.debug( + "Spanning: %0.3e .. %0.3e in %s", mi, ma, ax) + mis[axi] = mi + mas[axi] = ma + pos = np.empty((pp.size, 3), dtype="float64") + for i, ax in enumerate("xyz"): + pos[:,i] = pp["Coordinates"][ax] + yield ptype, pos def _count_particles(self, data_file): - npart = { - "Gas": data_file.ds.parameters['nsph'], - "Stars": data_file.ds.parameters['nstar'], - "DarkMatter": data_file.ds.parameters['ndark'] - } + pcount = np.array([data_file.ds.parameters['nsph'], + data_file.ds.parameters['nstar'], + data_file.ds.parameters['ndark']]) + si, ei = data_file.start, data_file.end + if None not in (si, ei): + np.clip(pcount - si, 0, ei - si, out=pcount) + ptypes = ['Gas', 'Stars', 'DarkMatter'] + npart = dict((ptype, v) for ptype, v in zip(ptypes, pcount)) return npart @classmethod @@ -317,8 +345,14 @@ def _create_dtypes(self, data_file): continue self._field_list.append((ptype, field)) + if 'Gas' in self._pdtypes.keys(): + self._field_list.append(('Gas', 'smoothing_length')) + # Find out which auxiliaries we have and what is their format - tot_parts = np.sum(list(data_file.total_particles.values())) + tot_parts = np.sum( + [data_file.ds.parameters['nsph'], + data_file.ds.parameters['nstar'], + data_file.ds.parameters['ndark']]) endian = data_file.ds.endian self._aux_pdtypes = {} self._aux_fields = [] @@ -327,6 +361,9 @@ def _create_dtypes(self, data_file): filename = data_file.filename + '.' + afield if not os.path.exists(filename): continue + if afield in ['log', 'parameter', 'kdtree']: + # Amiga halo finder makes files like this we need to ignore + continue self._aux_fields.append(afield) skip_afields = [] for afield in self._aux_fields: @@ -336,8 +373,9 @@ def _create_dtypes(self, data_file): # the binary files can be either floats, ints, or doubles. We're # going to use a try-catch cascade to determine the format. filesize = os.stat(filename).st_size - if np.fromfile(filename, np.dtype(endian + 'i4'), - count=1) != tot_parts: + dtype = np.dtype(endian + 'i4') + tot_parts_from_file = np.fromfile(filename, dtype, count=1) + if tot_parts_from_file != tot_parts: with open(filename, 'rb') as f: header_nparts = f.readline() try: @@ -369,34 +407,59 @@ def _create_dtypes(self, data_file): def _identify_fields(self, data_file): return self._field_list, {} - def _calculate_particle_offsets(self, data_file): + def _calculate_particle_offsets(self, data_file, pcounts): + # This computes the offsets for each particle type into a "data_file." Note that + # the term "data_file" here is a bit overloaded, and also refers to a + # "chunk" of particles inside a data file. + # data_file.start represents the *particle count* that we should start at. + # + # At this point, data_file will have the total number of particles + # that this chunk represents located in the property total_particles. + # Because in tipsy files the particles are stored sequentially, we can + # figure out where each one starts. + # We first figure out the global offsets, then offset them by the count + # and size of each individual particle type. field_offsets = {} + # Initialize pos to the point the first particle type would start pos = data_file.ds._header_offset - for ptype in self._ptypes: - field_offsets[ptype] = pos - if data_file.total_particles[ptype] == 0: + global_offsets = {} + field_offsets = {} + for i, ptype in enumerate(self._ptypes): + if ptype not in self._pdtypes: + # This means we don't have any, I think, and so we shouldn't + # stick it in the offsets. continue + # Note that much of this will be computed redundantly; future + # refactorings could fix this. + global_offsets[ptype] = pos size = self._pdtypes[ptype].itemsize - pos += data_file.total_particles[ptype] * size + npart = self.ds.parameters[npart_mapping[ptype]] + # Get the offset into just this particle type, and start at data_file.start + if npart > data_file.start: + field_offsets[ptype] = pos + size * data_file.start + pos += npart * size return field_offsets def _calculate_particle_offsets_aux(self, data_file): aux_fields_offsets = {} - tp = data_file.total_particles + params = self.ds.parameters for afield in self._aux_fields: aux_fields_offsets[afield] = {} if isinstance(self._aux_pdtypes[afield], np.dtype): pos = 4 # i4 - for ptype in self._ptypes: - aux_fields_offsets[afield][ptype] = (pos, 0) - if data_file.total_particles[ptype] == 0: - continue - size = np.dtype(self._aux_pdtypes[afield]).itemsize - pos += data_file.total_particles[ptype] * size + size = np.dtype(self._aux_pdtypes[afield]).itemsize else: - aux_fields_offsets[afield].update( - {'Gas': (1, tp["DarkMatter"] + tp["Stars"]), - 'DarkMatter': (1 + tp["Gas"], tp["Stars"]), - 'Stars': (1 + tp["DarkMatter"] + tp["Gas"], 0)} - ) + pos = 1 + size = 1 + for i, ptype in enumerate(self._ptypes): + if data_file.total_particles[ptype] == 0: + continue + elif params[npart_mapping[ptype]] > CHUNKSIZE: + for j in range(i): + npart = params[npart_mapping[self._ptypes[j]]] + if npart > CHUNKSIZE: + pos += npart*size + pos += data_file.start*size + aux_fields_offsets[afield][ptype] = pos + pos += data_file.total_particles[ptype] * size return aux_fields_offsets diff --git a/yt/frontends/tipsy/tests/test_outputs.py b/yt/frontends/tipsy/tests/test_outputs.py index 1f43a95ed60..146615c8cd0 100644 --- a/yt/frontends/tipsy/tests/test_outputs.py +++ b/yt/frontends/tipsy/tests/test_outputs.py @@ -1,69 +1,42 @@ -""" -Tipsy tests using the AGORA dataset - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import OrderedDict from yt.testing import \ - assert_equal, \ - requires_file + requires_file, \ + ParticleSelectionComparison from yt.utilities.answer_testing.framework import \ requires_ds, \ data_dir_load, \ sph_answer, \ - create_obj, \ - FieldValuesTest, \ - PixelizedProjectionValuesTest + nbody_answer from yt.frontends.tipsy.api import TipsyDataset -_fields = (("deposit", "all_density"), - ("deposit", "all_count"), - ("deposit", "DarkMatter_density"), +_fields = OrderedDict( + [ + (("all", "particle_mass"), None), + (("all", "particle_ones"), None), + (("all", "particle_velocity_x"), ("all", "particle_mass")), + (("all", "particle_velocity_y"), ("all", "particle_mass")), + (("all", "particle_velocity_z"), ("all", "particle_mass")), + ] ) pkdgrav = "halo1e11_run1.00400/halo1e11_run1.00400" -@requires_ds(pkdgrav, big_data = True, file_check = True) -def test_pkdgrav(): - cosmology_parameters = dict(current_redshift = 0.0, +pkdgrav_cosmology_parameters = dict(current_redshift = 0.0, omega_lambda = 0.728, omega_matter = 0.272, hubble_constant = 0.702) - kwargs = dict(field_dtypes = {"Coordinates": "d"}, - cosmology_parameters = cosmology_parameters, - unit_base = {'length': (60.0, "Mpccm/h")}, - n_ref = 64) - ds = data_dir_load(pkdgrav, TipsyDataset, (), kwargs) - assert_equal(str(ds), "halo1e11_run1.00400") - dso = [ None, ("sphere", ("c", (0.3, 'unitary')))] - dd = ds.all_data() - assert_equal(dd["Coordinates"].shape, (26847360, 3)) - tot = sum(dd[ptype,"Coordinates"].shape[0] - for ptype in ds.particle_types if ptype != "all") - assert_equal(tot, 26847360) - for dobj_name in dso: - for field in _fields: - for axis in [0, 1, 2]: - for weight_field in [None]: - yield PixelizedProjectionValuesTest( - ds, axis, field, weight_field, - dobj_name) - yield FieldValuesTest(ds, field, dobj_name) - dobj = create_obj(ds, dobj_name) - s1 = dobj["ones"].sum() - s2 = sum(mask.sum() for block, mask in dobj.blocks) - assert_equal(s1, s2) +pkdgrav_kwargs = dict( + field_dtypes = {"Coordinates": "d"}, + cosmology_parameters = pkdgrav_cosmology_parameters, + unit_base = {'length': (60.0, "Mpccm/h")} +) +@requires_ds(pkdgrav, big_data = True, file_check = True) +def test_pkdgrav(): + ds = data_dir_load(pkdgrav, TipsyDataset, (), kwargs = pkdgrav_kwargs) + for test in nbody_answer(ds, "halo1e11_run1.00400", 26847360, _fields): + yield test + psc = ParticleSelectionComparison(ds) + psc.run_defaults() gasoline_dmonly = "agora_1e11.00400/agora_1e11.00400" @requires_ds(gasoline_dmonly, big_data = True, file_check = True) @@ -73,36 +46,25 @@ def test_gasoline_dmonly(): omega_matter = 0.272, hubble_constant = 0.702) kwargs = dict(cosmology_parameters = cosmology_parameters, - unit_base = {'length': (60.0, "Mpccm/h")}, - n_ref = 64) + unit_base = {'length': (60.0, "Mpccm/h")}) ds = data_dir_load(gasoline_dmonly, TipsyDataset, (), kwargs) - assert_equal(str(ds), "agora_1e11.00400") - dso = [ None, ("sphere", ("c", (0.3, 'unitary')))] - dd = ds.all_data() - assert_equal(dd["Coordinates"].shape, (10550576, 3)) - tot = sum(dd[ptype,"Coordinates"].shape[0] - for ptype in ds.particle_types if ptype != "all") - assert_equal(tot, 10550576) - for dobj_name in dso: - for field in _fields: - for axis in [0, 1, 2]: - for weight_field in [None]: - yield PixelizedProjectionValuesTest( - ds, axis, field, weight_field, - dobj_name) - yield FieldValuesTest(ds, field, dobj_name) - dobj = create_obj(ds, dobj_name) - s1 = dobj["ones"].sum() - s2 = sum(mask.sum() for block, mask in dobj.blocks) - assert_equal(s1, s2) + for test in nbody_answer(ds, "agora_1e11.00400", 10550576, _fields): + yield test + psc = ParticleSelectionComparison(ds) + psc.run_defaults() -tg_fields = OrderedDict( +tg_sph_fields = OrderedDict( [ (('gas', 'density'), None), (('gas', 'temperature'), None), (('gas', 'temperature'), ('gas', 'density')), (('gas', 'velocity_magnitude'), None), (('gas', 'Fe_fraction'), None), + ] +) + +tg_nbody_fields = OrderedDict( + [ (('Stars', 'Metals'), None), ] ) @@ -110,13 +72,30 @@ def test_gasoline_dmonly(): tipsy_gal = 'TipsyGalaxy/galaxy.00300' @requires_ds(tipsy_gal) def test_tipsy_galaxy(): - ds = data_dir_load(tipsy_gal) - for test in sph_answer(ds, 'galaxy.00300', 315372, tg_fields): + ds = data_dir_load(tipsy_gal, kwargs = {'bounding_box': [[-2000, 2000], + [-2000, 2000], + [-2000, 2000]]}) + # These tests should be re-enabled. But the holdup is that the region + # selector does not offset by domain_left_edge, and we have inelegant + # selection using bboxes. + #psc = ParticleSelectionComparison(ds) + #psc.run_defaults() + for test in sph_answer(ds, 'galaxy.00300', 315372, tg_sph_fields): + test_tipsy_galaxy.__name__ = test.description + yield test + for test in nbody_answer(ds, 'galaxy.00300', 315372, tg_nbody_fields): test_tipsy_galaxy.__name__ = test.description yield test @requires_file(gasoline_dmonly) @requires_file(pkdgrav) def test_TipsyDataset(): - assert isinstance(data_dir_load(pkdgrav), TipsyDataset) + assert isinstance(data_dir_load(pkdgrav, kwargs = pkdgrav_kwargs), TipsyDataset) assert isinstance(data_dir_load(gasoline_dmonly), TipsyDataset) + + +@requires_file(tipsy_gal) +def test_tipsy_index(): + ds = data_dir_load(tipsy_gal) + sl = ds.slice('z', 0.0) + assert sl['gas', 'density'].shape[0]!=0 diff --git a/yt/frontends/ytdata/__init__.py b/yt/frontends/ytdata/__init__.py index a7c3f22f595..7cccc849a5c 100644 --- a/yt/frontends/ytdata/__init__.py +++ b/yt/frontends/ytdata/__init__.py @@ -6,10 +6,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/frontends/ytdata/api.py b/yt/frontends/ytdata/api.py index 6d7cfcbfcfe..a7fdf936854 100644 --- a/yt/frontends/ytdata/api.py +++ b/yt/frontends/ytdata/api.py @@ -1,19 +1,3 @@ -""" -API for ytData frontend - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .data_structures import \ YTDataContainerDataset, \ YTSpatialPlotDataset, \ diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 22c03a27afa..579ab4b654c 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -1,19 +1,3 @@ -""" -Data structures for YTData frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import \ defaultdict from numbers import \ @@ -37,9 +21,8 @@ Profile3DFromDataset from yt.data_objects.static_output import \ Dataset, \ - ParticleFile -from yt.extern.six import \ - string_types + ParticleFile, \ + validate_index_order from yt.funcs import \ is_root, \ parse_h5_attr @@ -85,7 +68,11 @@ def _parse_parameter_file(self): for key in f.attrs.keys(): v = parse_h5_attr(f, key) if key == "con_args": - v = v.astype("str") + try: + v = eval(v) + except ValueError: + # support older ytdata outputs + v = v.astype('str') self.parameters[key] = v self._with_parameter_file_open(f) @@ -162,7 +149,7 @@ def _set_code_unit_attributes(self): self.parameters["%s_units" % attr]) del self.parameters[attr] del self.parameters["%s_units" % attr] - elif isinstance(unit, string_types): + elif isinstance(unit, str): uq = self.quan(1.0, unit) elif isinstance(unit, numeric_type): uq = self.quan(unit, cgs_unit) @@ -220,12 +207,12 @@ def _setup_override_fields(self): pass class YTDataHDF5File(ParticleFile): - def __init__(self, ds, io, filename, file_id): + def __init__(self, ds, io, filename, file_id, range): with h5py.File(filename, mode="r") as f: self.header = dict((field, parse_h5_attr(f, field)) \ for field in f.attrs.keys()) - super(YTDataHDF5File, self).__init__(ds, io, filename, file_id) + super(YTDataHDF5File, self).__init__(ds, io, filename, file_id, range) class YTDataContainerDataset(YTDataset): """Dataset for saved geometric data containers.""" @@ -236,10 +223,10 @@ class YTDataContainerDataset(YTDataset): fluid_types = ("grid", "gas", "deposit", "index") def __init__(self, filename, dataset_type="ytdatacontainer_hdf5", - n_ref = 16, over_refine_factor = 1, units_override=None, + index_order=None, index_filename=None, units_override=None, unit_system="cgs"): - self.n_ref = n_ref - self.over_refine_factor = over_refine_factor + self.index_order = validate_index_order(index_order) + self.index_filename=index_filename super(YTDataContainerDataset, self).__init__(filename, dataset_type, units_override=units_override, unit_system=unit_system) @@ -249,8 +236,7 @@ def _parse_parameter_file(self): self.particle_types = self.particle_types_raw self.filename_template = self.parameter_filename self.file_count = 1 - nz = 1 << self.over_refine_factor - self.domain_dimensions = np.ones(3, "int32") * nz + self.domain_dimensions = np.ones(3, "int32") def _setup_gas_alias(self): "Alias the grid type to gas by making a particle union." @@ -276,7 +262,7 @@ def data(self): # since this is now particle-like data. data_type = self.parameters.get("data_type") container_type = self.parameters.get("container_type") - ex_container_type = ["cutting", "proj", "ray", "slice", "cut_region"] + ex_container_type = ["cutting", "quad_proj", "ray", "slice", "cut_region"] if data_type == "yt_light_ray" or container_type in ex_container_type: mylog.info("Returning an all_data data container.") return self.all_data() @@ -351,7 +337,7 @@ def __init__(self, *args, **kwargs): def _parse_parameter_file(self): super(YTSpatialPlotDataset, self)._parse_parameter_file() if self.parameters["container_type"] == "proj": - if isinstance(self.parameters["weight_field"], string_types) and \ + if isinstance(self.parameters["weight_field"], str) and \ self.parameters["weight_field"] == "None": self.parameters["weight_field"] = None elif isinstance(self.parameters["weight_field"], np.ndarray): @@ -365,7 +351,7 @@ def _is_valid(self, *args, **kwargs): data_type = parse_h5_attr(f, "data_type") cont_type = parse_h5_attr(f, "container_type") if data_type == "yt_data_container" and \ - cont_type in ["cutting", "proj", "slice"]: + cont_type in ["cutting", "proj", "slice", "quad_proj"]: return True return False @@ -386,7 +372,7 @@ def __getitem__(self, key): except YTFieldTypeNotFound: return tr finfo = self.ds._get_field_info(*fields[0]) - if not finfo.particle_type: + if not finfo.sampling_type == "particle": return tr.reshape(self.ActiveDimensions[:self.ds.dimensionality]) return tr @@ -602,7 +588,7 @@ def get_data(self, fields=None): for ftype, fname in fields_to_get: finfo = self.ds._get_field_info(ftype, fname) finfos[ftype, fname] = finfo - if finfo.particle_type: + if finfo.sampling_type == "particle": particles.append((ftype, fname)) elif (ftype, fname) not in fluids: fluids.append((ftype, fname)) @@ -621,7 +607,7 @@ def get_data(self, fields=None): else: v = v.astype(np.float64) if convert: - self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units) + self.field_data[f] = self.ds.arr(v, units = finfos[f].units) self.field_data[f].convert_to_units(finfos[f].output_units) read_particles, gen_particles = self.index._read_fluid_fields( @@ -635,7 +621,7 @@ def get_data(self, fields=None): else: v = v.astype(np.float64) if convert: - self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units) + self.field_data[f] = self.ds.arr(v, units = finfos[f].units) self.field_data[f].convert_to_units(finfos[f].output_units) fields_to_generate += gen_fluids + gen_particles @@ -755,7 +741,7 @@ def profile(self): def _parse_parameter_file(self): super(YTGridDataset, self)._parse_parameter_file() - if isinstance(self.parameters["weight_field"], string_types) and \ + if isinstance(self.parameters["weight_field"], str) and \ self.parameters["weight_field"] == "None": self.parameters["weight_field"] = None elif isinstance(self.parameters["weight_field"], np.ndarray): @@ -788,7 +774,7 @@ def _parse_parameter_file(self): setattr(self, range_name, self.parameters[range_name]) bin_field = "%s_field" % ax - if isinstance(self.parameters[bin_field], string_types) and \ + if isinstance(self.parameters[bin_field], str) and \ self.parameters[bin_field] == "None": self.parameters[bin_field] = None elif isinstance(self.parameters[bin_field], np.ndarray): diff --git a/yt/frontends/ytdata/fields.py b/yt/frontends/ytdata/fields.py index 6b1753df3bf..2ecb400e2ed 100644 --- a/yt/frontends/ytdata/fields.py +++ b/yt/frontends/ytdata/fields.py @@ -1,19 +1,3 @@ -""" -YTData-specific fields - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.fields.field_info_container import \ FieldInfoContainer @@ -48,7 +32,9 @@ def _cell_volume(field, data): return data["grid", "dx"] * \ data["grid", "dy"] * \ data["grid", "dz"] - self.add_field(("grid", "cell_volume"), sampling_type="particle", function=_cell_volume, + self.add_field(("grid", "cell_volume"), + sampling_type="particle", + function=_cell_volume, units="cm**3") class YTGridFieldInfo(FieldInfoContainer): diff --git a/yt/frontends/ytdata/io.py b/yt/frontends/ytdata/io.py index ebd666a1567..2ad30b9ef3d 100644 --- a/yt/frontends/ytdata/io.py +++ b/yt/frontends/ytdata/io.py @@ -1,28 +1,12 @@ -""" -YTData data-file handling function - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np -from yt.extern.six import \ - u from yt.funcs import \ mylog, \ parse_h5_attr from yt.geometry.selection_routines import \ GridSelector +from yt.units.yt_array import \ + uvstack from yt.utilities.exceptions import \ YTDomainOverflow from yt.utilities.io_handler import \ @@ -44,7 +28,7 @@ def _read_fluid_selection(self, g, selector, fields): gf = self._cached_fields[g.id] rv.update(gf) if len(rv) == len(fields): return rv - f = h5py.File(u(g.filename), mode="r") + f = h5py.File(g.filename, mode="r") for field in fields: if field in rv: self._hits += 1 @@ -79,7 +63,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): gf = self._cached_fields[g.id] rv.update(gf) if len(rv) == len(fields): return rv - f = h5py.File(u(g.filename), mode="r") + f = h5py.File(g.filename, mode="r") gds = f[self.ds.default_fluid_type] for field in fields: if field in rv: @@ -183,6 +167,18 @@ class IOHandlerYTDataContainerHDF5(BaseIOHandler): def _read_fluid_selection(self, chunks, selector, fields, size): raise NotImplementedError + def _yield_coordinates(self, data_file): + with h5py.File(data_file.filename, 'r') as f: + for ptype in f.keys(): + if 'x' not in f[ptype].keys(): + continue + units = _get_position_array_units(ptype, f, "x") + x, y, z = (self.ds.arr(_get_position_array(ptype, f, ax), units) + for ax in "xyz") + pos = uvstack([x, y, z]).T + pos.convert_to_units('code_length') + yield ptype, pos + def _read_particle_coords(self, chunks, ptf): # This will read chunks and yield the results. chunks = list(chunks) @@ -190,7 +186,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): pcount = data_file.total_particles[ptype] @@ -208,7 +204,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): units = _get_position_array_units(ptype, f, "x") @@ -264,7 +260,14 @@ def _initialize_index(self, data_file, regions): return morton def _count_particles(self, data_file): - return self.ds.num_particles + si, ei = data_file.start, data_file.end + if None not in (si, ei): + pcount = {} + for ptype, npart in self.ds.num_particles.items(): + pcount[ptype] = np.clip(npart - si, 0, ei - si) + else: + pcount = self.ds.num_particles + return pcount def _identify_fields(self, data_file): fields = [] @@ -287,7 +290,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): pcount = data_file.total_particles[ptype] @@ -305,7 +308,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for chunk in chunks: for obj in chunk.objs: data_files.update(obj.data_files) - for data_file in sorted(data_files): + for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): all_count = self._count_particles(data_file) with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(ptf.items()): diff --git a/yt/frontends/ytdata/tests/test_old_outputs.py b/yt/frontends/ytdata/tests/test_old_outputs.py index 1fd7a283425..60c384ffdb6 100644 --- a/yt/frontends/ytdata/tests/test_old_outputs.py +++ b/yt/frontends/ytdata/tests/test_old_outputs.py @@ -50,7 +50,8 @@ ytdata_dir = "ytdata_test" @requires_ds(enzotiny) -@requires_ds(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "DD0046_sphere.h5")) +@requires_file(os.path.join(ytdata_dir, "DD0046_cut_region.h5")) def test_old_datacontainer_data(): ds = data_dir_load(enzotiny) sphere = ds.sphere(ds.domain_center, (10, "Mpc")) @@ -69,7 +70,9 @@ def test_old_datacontainer_data(): assert (cr["temperature"] == cr_ds.data["temperature"]).all() @requires_ds(enzotiny) -@requires_ds(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "DD0046_covering_grid.h5")) +@requires_file(os.path.join(ytdata_dir, "DD0046_arbitrary_grid.h5")) +@requires_file(os.path.join(ytdata_dir, "DD0046_proj_frb.h5")) def test_old_grid_datacontainer_data(): ds = data_dir_load(enzotiny) @@ -100,7 +103,7 @@ def test_old_grid_datacontainer_data(): yield YTDataFieldTest(full_fn, "density", geometric=False) @requires_ds(enzotiny) -@requires_ds(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "DD0046_proj.h5")) def test_old_spatial_data(): ds = data_dir_load(enzotiny) fn = "DD0046_proj.h5" @@ -111,7 +114,8 @@ def test_old_spatial_data(): yield YTDataFieldTest(full_fn, ("grid", "density"), geometric=False) @requires_ds(enzotiny) -@requires_ds(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "DD0046_Profile1D.h5")) +@requires_file(os.path.join(ytdata_dir, "DD0046_Profile2D.h5")) def test_old_profile_data(): tmpdir = tempfile.mkdtemp() curdir = os.getcwd() @@ -157,7 +161,8 @@ def test_old_profile_data(): shutil.rmtree(tmpdir) @requires_ds(enzotiny) -@requires_ds(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "test_data.h5")) +@requires_file(os.path.join(ytdata_dir, "random_data.h5")) def test_old_nonspatial_data(): ds = data_dir_load(enzotiny) region = ds.box([0.25]*3, [0.75]*3) @@ -181,7 +186,9 @@ def test_old_nonspatial_data(): yield YTDataFieldTest(full_fn, "density", geometric=False) @requires_module('h5py') -@requires_file(ytdata_dir) +@requires_file(os.path.join(ytdata_dir, "slice.h5")) +@requires_file(os.path.join(ytdata_dir, "proj.h5")) +@requires_file(os.path.join(ytdata_dir, "oas.h5")) def test_old_plot_data(): tmpdir = tempfile.mkdtemp() curdir = os.getcwd() diff --git a/yt/frontends/ytdata/tests/test_outputs.py b/yt/frontends/ytdata/tests/test_outputs.py index 382ec43cd69..311720d1b4e 100644 --- a/yt/frontends/ytdata/tests/test_outputs.py +++ b/yt/frontends/ytdata/tests/test_outputs.py @@ -1,18 +1,3 @@ -""" -ytdata frontend tests using enzo_tiny_cosmology - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. All rights reserved. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.convenience import \ load from yt.data_objects.api import \ diff --git a/yt/frontends/ytdata/utilities.py b/yt/frontends/ytdata/utilities.py index 0855bff7835..c72e464ed70 100644 --- a/yt/frontends/ytdata/utilities.py +++ b/yt/frontends/ytdata/utilities.py @@ -1,22 +1,3 @@ -""" -Utility functions for ytdata frontend. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np - -from yt.funcs import iterable from yt.units.yt_array import \ YTArray from yt.utilities.logger import \ @@ -233,15 +214,9 @@ def _yt_array_hdf5_attr(fh, attr, val): if val is None: val = "None" if hasattr(val, "units"): fh.attrs["%s_units" % attr] = str(val.units) - # The following is a crappy workaround for getting - # Unicode strings into HDF5 attributes in Python 3 - if iterable(val): - val = np.array(val) - if val.dtype.kind == 'U': - val = val.astype('|S') try: fh.attrs[str(attr)] = val # This is raised if no HDF5 equivalent exists. # In that case, save its string representation. except TypeError: - fh.attrs[str(attr)] = str(val) + fh.attrs[str(attr)] = repr(val) diff --git a/yt/funcs.py b/yt/funcs.py index db7304f9fbc..e7401c5bff2 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -1,22 +1,6 @@ -""" -Useful functions. If non-original, see function for citation. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import errno -from yt.extern.six import string_types -from yt.extern.six.moves import input, builtins +import builtins +import copy import time import inspect import traceback @@ -38,14 +22,13 @@ from math import floor, ceil from numbers import Number as numeric_type -from yt.extern.six.moves import urllib +import urllib.request +import urllib.parse from yt.utilities.logger import ytLogger as mylog from yt.utilities.lru_cache import lru_cache -from yt.utilities.exceptions import \ - YTInvalidWidthError, \ - YTEquivalentDimsError +from yt.utilities.exceptions import YTInvalidWidthError from yt.extern.tqdm import tqdm -from yt.units.yt_array import YTArray, YTQuantity +from yt.units import YTArray, YTQuantity from functools import wraps # Some functions for handling sequences and other types @@ -237,6 +220,15 @@ def rootloginfo(*args): if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return mylog.info(*args) +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning, adapted from NumPy + + By default python does not show users deprecation warnings. + This ensures that a deprecation warning is visible to users + if that is desired. + """ + pass + def deprecate(replacement): def real_deprecate(func): """ @@ -255,7 +247,7 @@ def run_func(*args, **kwargs): message = "%s has been deprecated and may be removed without notice!" if replacement is not None: message += " Use %s instead." % replacement - warnings.warn(message % func.__name__, DeprecationWarning, + warnings.warn(message % func.__name__, VisibleDeprecationWarning, stacklevel=2) func(*args, **kwargs) return run_func @@ -447,8 +439,9 @@ def paste_traceback(exc_type, exc, tb): Should only be used in sys.excepthook. """ sys.__excepthook__(exc_type, exc, tb) - from yt.extern.six.moves import StringIO, xmlrpc_client - p = xmlrpc_client.ServerProxy( + import xmlrpc.client + from io import StringIO + p = xmlrpc.client.ServerProxy( "http://paste.yt-project.org/xmlrpc/", allow_none=True) s = StringIO() @@ -465,13 +458,14 @@ def paste_traceback_detailed(exc_type, exc, tb): Should only be used in sys.excepthook. """ import cgitb - from yt.extern.six.moves import StringIO, xmlrpc_client + from io import StringIO + import xmlrpc.client s = StringIO() handler = cgitb.Hook(format="text", file = s) handler(exc_type, exc, tb) s = s.getvalue() print(s) - p = xmlrpc_client.ServerProxy( + p = xmlrpc.client.ServerProxy( "http://paste.yt-project.org/xmlrpc/", allow_none=True) ret = p.pastes.newPaste('text', s, None, '', '', True) @@ -765,7 +759,7 @@ def fix_length(length, ds): if isinstance(length, numeric_type): return YTArray(length, 'code_length', registry=registry) length_valid_tuple = isinstance(length, (list, tuple)) and len(length) == 2 - unit_is_string = isinstance(length[1], string_types) + unit_is_string = isinstance(length[1], str) length_is_number = (isinstance(length[0], numeric_type) and not isinstance(length[0], YTArray)) if length_valid_tuple and unit_is_string and length_is_number: @@ -848,11 +842,11 @@ def get_output_filename(name, keyword, suffix): Examples -------- - >>> print get_output_filename(None, "Projection_x", ".png") + >>> print(get_output_filename(None, "Projection_x", ".png")) Projection_x.png - >>> print get_output_filename("my_file", "Projection_x", ".png") + >>> print(get_output_filename("my_file", "Projection_x", ".png")) my_file.png - >>> print get_output_filename("my_file/", "Projection_x", ".png") + >>> print(get_output_filename("my_file/", "Projection_x", ".png")) my_file/Projection_x.png """ @@ -896,7 +890,7 @@ def validate_width_tuple(width): "width (%s) is not a two element tuple" % width) is_numeric = isinstance(width[0], numeric_type) length_has_units = isinstance(width[0], YTArray) - unit_is_string = isinstance(width[1], string_types) + unit_is_string = isinstance(width[1], str) if not is_numeric or length_has_units and unit_is_string: msg = "width (%s) is invalid. " % str(width) msg += "Valid widths look like this: (12, 'au')" @@ -1025,6 +1019,13 @@ def enable_plugins(pluginfilename=None): if callable(execdict[k]): setattr(yt, k, execdict[k]) +def subchunk_count(n_total, chunk_size): + handled = 0 + while handled < n_total: + tr = min(n_total - handled, chunk_size) + yield tr + handled += tr + def fix_unitary(u): if u == '1': return 'unitary' @@ -1188,12 +1189,24 @@ def obj_length(v): # to signify zero length (aka a scalar). return 0 -def handle_mks_cgs(values, field_units): - try: - values = values.to(field_units) - except YTEquivalentDimsError as e: - values = values.to_equivalent(e.new_units, e.base) - return values +def array_like_field(data, x, field): + field = data._determine_fields(field)[0] + if isinstance(field, tuple): + finfo = data.ds._get_field_info(field[0],field[1]) + else: + finfo = data.ds._get_field_info(field) + if finfo.sampling_type == 'particle': + units = finfo.output_units + else: + units = finfo.units + if isinstance(x, YTArray): + arr = copy.deepcopy(x) + arr.convert_to_units(units) + return arr + if isinstance(x, np.ndarray): + return data.ds.arr(x, units) + else: + return data.ds.quan(x, units) def validate_3d_array(obj): if not iterable(obj) or len(obj) != 3: @@ -1204,7 +1217,7 @@ def validate_float(obj): """Validates if the passed argument is a float value. Raises an exception if `obj` is a single float value - or a YTQunatity of size 1. + or a YTQuantity of size 1. Parameters ---------- @@ -1234,7 +1247,7 @@ def validate_float(obj): """ if isinstance(obj, tuple): if len(obj) != 2 or not isinstance(obj[0], numeric_type)\ - or not isinstance(obj[1], string_types): + or not isinstance(obj[1], str): raise TypeError("Expected a numeric value (or tuple of format " "(float, String)), received an inconsistent tuple " "'%s'." % str(obj)) @@ -1267,7 +1280,7 @@ def validate_axis(ds, axis): "received '%s'." % (list(valid_axis), axis)) def validate_center(center): - if isinstance(center, string_types): + if isinstance(center, str): c = center.lower() if c not in ["c", "center", "m", "max", "min"] \ and not c.startswith("max_") and not c.startswith("min_"): diff --git a/yt/geometry/api.py b/yt/geometry/api.py index 67aaa020f8c..4adeee23510 100644 --- a/yt/geometry/api.py +++ b/yt/geometry/api.py @@ -1,20 +1,3 @@ -""" -API for Geometry Handlers - - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .geometry_handler import \ Index diff --git a/yt/geometry/coordinates/api.py b/yt/geometry/coordinates/api.py index 7da4bddf072..5a3c0f9c6b9 100644 --- a/yt/geometry/coordinates/api.py +++ b/yt/geometry/coordinates/api.py @@ -1,16 +1,3 @@ -""" -API for coordinate handlers - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .coordinate_handler import \ CoordinateHandler diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 6da1eeb1934..40e3364e780 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -1,19 +1,3 @@ -""" -Definitions for cartesian coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .coordinate_handler import \ CoordinateHandler, \ @@ -25,10 +9,17 @@ from yt.units.yt_array import uvstack, YTArray from yt.utilities.lib.pixelization_routines import \ pixelize_element_mesh, pixelize_off_axis_cartesian, \ - pixelize_cartesian, pixelize_cartesian_nodal, \ - pixelize_element_mesh_line + pixelize_cartesian, \ + pixelize_cartesian_nodal, \ + pixelize_sph_kernel_slice, \ + pixelize_sph_kernel_projection, \ + pixelize_element_mesh_line, \ + interpolate_sph_grid_gather, \ + normalization_2d_utility from yt.data_objects.unstructured_mesh import SemiStructuredMesh from yt.utilities.nodal_data_utils import get_nodal_data +from yt.units.yt_array import uconcatenate + def _sample_ray(ray, npoints, field): """ @@ -65,6 +56,30 @@ def _sample_ray(ray, npoints, field): x = np.arange(npoints)/(npoints-1)*(dr*npoints) return x, field_values +def all_data(data, ptype, fields, kdtree=False): + field_data = {} + fields = set(fields) + for field in fields: + field_data[field] = [] + + for chunk in data.all_data().chunks([], "io"): + for field in fields: + field_data[field].append(chunk[ptype, + field].in_base("code")) + + for field in fields: + field_data[field] = uconcatenate(field_data[field]) + + if kdtree is True: + kdtree = data.index.kdtree + for field in fields: + if len(field_data[field].shape) == 1: + field_data[field] = field_data[field][kdtree.idx] + else: + field_data[field] = field_data[field][kdtree.idx, :] + + return field_data + class CartesianCoordinateHandler(CoordinateHandler): name = "cartesian" @@ -74,17 +89,28 @@ def __init__(self, ds, ordering = ('x','y','z')): def setup_fields(self, registry): for axi, ax in enumerate(self.axis_order): f1, f2 = _get_coord_fields(axi) - registry.add_field(("index", "d%s" % ax), sampling_type="cell", function = f1, + registry.add_field(("index", "d%s" % ax), + sampling_type="cell", + function = f1, display_field = False, units = "code_length") - registry.add_field(("index", "path_element_%s" % ax), sampling_type="cell", function = f1, + + registry.add_field(("index", "path_element_%s" % ax), + sampling_type="cell", + function = f1, display_field = False, units = "code_length") - registry.add_field(("index", "%s" % ax), sampling_type="cell", function = f2, + + registry.add_field(("index", "%s" % ax), + sampling_type="cell", + function = f2, display_field = False, units = "code_length") + f3 = _get_vert_fields(axi) - registry.add_field(("index", "vertex_%s" % ax), sampling_type="cell", function = f3, + registry.add_field(("index", "vertex_%s" % ax), + sampling_type="cell", + function = f3, display_field = False, units = "code_length") def _cell_volume(field, data): @@ -92,8 +118,14 @@ def _cell_volume(field, data): rv *= data["index", "dy"] rv *= data["index", "dz"] return rv - registry.add_field(("index", "cell_volume"), sampling_type="cell", function=_cell_volume, - display_field=False, units = "code_length**3") + + registry.add_field(("index", "cell_volume"), + sampling_type="cell", + function=_cell_volume, + display_field=False, + units = "code_length**3") + registry.alias(('index', 'volume'), ('index', 'cell_volume')) + registry.check_derived_fields( [("index", "dx"), ("index", "dy"), ("index", "dz"), ("index", "x"), ("index", "y"), ("index", "z"), @@ -213,7 +245,15 @@ def pixelize_line(self, field, start_point, end_point, npoints): def _ortho_pixelize(self, data_source, field, bounds, size, antialias, dim, periodic): + from yt.frontends.sph.data_structures import ParticleDataset + from yt.frontends.stream.data_structures import StreamParticlesDataset + from yt.data_objects.selection_data_containers import \ + YTSlice + from yt.data_objects.construction_data_containers import \ + YTParticleProj # We should be using fcoords + field = data_source._determine_fields(field)[0] + finfo = data_source.ds.field_info[field] period = self.period[:2].copy() # dummy here period[0] = self.period[self.x_axis[dim]] period[1] = self.period[self.y_axis[dim]] @@ -221,10 +261,11 @@ def _ortho_pixelize(self, data_source, field, bounds, size, antialias, period = period.in_units("code_length").d buff = np.zeros((size[1], size[0]), dtype="f8") + particle_datasets = (ParticleDataset, StreamParticlesDataset) + is_sph_field = finfo.is_sph_field finfo = self.ds._get_field_info(field) - nodal_flag = finfo.nodal_flag - if np.any(nodal_flag): + if np.any(finfo.nodal_flag): nodal_data = get_nodal_data(data_source, field) coord = data_source.coord.d pixelize_cartesian_nodal(buff, @@ -232,8 +273,187 @@ def _ortho_pixelize(self, data_source, field, bounds, size, antialias, data_source['pdx'], data_source['pdy'], data_source['pdz'], nodal_data, coord, bounds, int(antialias), period, int(periodic)) + elif isinstance(data_source.ds, particle_datasets) and is_sph_field: + ptype = field[0] + if ptype == 'gas': + ptype = data_source.ds._sph_ptypes[0] + px_name = self.axis_name[self.x_axis[dim]] + py_name = self.axis_name[self.y_axis[dim]] + ounits = data_source.ds.field_info[field].output_units + bnds = data_source.ds.arr(bounds, 'code_length').tolist() + if isinstance(data_source, YTParticleProj): + weight = data_source.weight_field + le, re = data_source.data_source.get_bbox() + xa = self.x_axis[dim] + ya = self.y_axis[dim] + # If we're not periodic, we need to clip to the boundary edges + # or we get errors about extending off the edge of the region. + if not self.ds.periodicity[xa]: + le[xa] = max(bounds[0], self.ds.domain_left_edge[xa]) + re[xa] = min(bounds[1], self.ds.domain_right_edge[xa]) + else: + le[xa] = bounds[0] + re[xa] = bounds[1] + if not self.ds.periodicity[ya]: + le[ya] = max(bounds[2], self.ds.domain_left_edge[ya]) + re[ya] = min(bounds[3], self.ds.domain_right_edge[ya]) + else: + le[ya] = bounds[2] + re[ya] = bounds[3] + # We actually need to clip these + proj_reg = data_source.ds.region( + left_edge=le, right_edge=re, center=data_source.center, + data_source=data_source.data_source + ) + proj_reg.set_field_parameter("axis", data_source.axis) + buff = np.zeros(size, dtype='float64') + if weight is None: + for chunk in proj_reg.chunks([], 'io'): + data_source._initialize_projected_units([field], chunk) + pixelize_sph_kernel_projection( + buff, + chunk[ptype, px_name].to('code_length'), + chunk[ptype, py_name].to('code_length'), + chunk[ptype, 'smoothing_length'].to('code_length'), + chunk[ptype, 'mass'].to('code_mass'), + chunk[ptype, 'density'].to("code_density"), + chunk[field].in_units(ounits), + bnds) + # We use code length here, but to get the path length right + # we need to multiply by the conversion factor between + # code length and the unit system's length unit + default_path_length_unit = data_source.ds.unit_system['length'] + dl_conv = data_source.ds.quan(1.0, "code_length").to( + default_path_length_unit) + buff *= dl_conv.v + # if there is a weight field, take two projections: + # one of field*weight, the other of just weight, and divide them + else: + weight_buff = np.zeros(size, dtype='float64') + buff = np.zeros(size, dtype='float64') + wounits = data_source.ds.field_info[weight].output_units + for chunk in proj_reg.chunks([], 'io'): + data_source._initialize_projected_units([field], chunk) + data_source._initialize_projected_units([weight], chunk) + pixelize_sph_kernel_projection( + buff, + chunk[ptype, px_name].to('code_length'), + chunk[ptype, py_name].to('code_length'), + chunk[ptype, 'smoothing_length'].to('code_length'), + chunk[ptype, 'mass'].to('code_mass'), + chunk[ptype, 'density'].to('code_density'), + chunk[field].in_units(ounits), + bnds, + weight_field=chunk[weight].in_units(wounits)) + mylog.info("Making a fixed resolution buffer of (%s) %d by %d" % \ + (weight, size[0], size[1])) + for chunk in proj_reg.chunks([], 'io'): + data_source._initialize_projected_units([weight], chunk) + pixelize_sph_kernel_projection( + weight_buff, + chunk[ptype, px_name].to('code_length'), + chunk[ptype, py_name].to('code_length'), + chunk[ptype, 'smoothing_length'].to('code_length'), + chunk[ptype, 'mass'].to('code_mass'), + chunk[ptype, 'density'].to('code_density'), + chunk[weight].in_units(wounits), + bnds) + normalization_2d_utility(buff, weight_buff) + elif isinstance(data_source, YTSlice): + smoothing_style = getattr(self.ds, 'sph_smoothing_style', + 'scatter') + normalize = getattr(self.ds, 'use_sph_normalization', True) + + if smoothing_style == 'scatter': + buff = np.zeros(size, dtype='float64') + if normalize: + buff_den = np.zeros(size, dtype='float64') + + for chunk in data_source.chunks([], 'io'): + pixelize_sph_kernel_slice( + buff, + chunk[ptype, px_name].to('code_length'), + chunk[ptype, py_name].to('code_length'), + chunk[ptype, 'smoothing_length'].to('code_length'), + chunk[ptype, 'mass'].to('code_mass'), + chunk[ptype, 'density'].to('code_density'), + chunk[field].in_units(ounits), + bnds) + if normalize: + pixelize_sph_kernel_slice( + buff_den, + chunk[ptype, px_name].to('code_length'), + chunk[ptype, py_name].to('code_length'), + chunk[ptype, 'smoothing_length'].to('code_length'), + chunk[ptype, 'mass'].to('code_mass'), + chunk[ptype, 'density'].to('code_density'), + np.ones(chunk[ptype, 'density'].shape[0]), + bnds) + + if normalize: + normalization_2d_utility(buff, buff_den) + + if smoothing_style == "gather": + # Here we find out which axis are going to be the "x" and + # "y" axis for the actual visualisation and then we set the + # buffer size and bounds to match. The z axis of the plot + # is the axis we slice over and the buffer will be of size 1 + # in that dimension + x, y, z = self.x_axis[dim], self.y_axis[dim], dim + + buff_size = np.zeros(3, dtype="int64") + buff_size[x] = size[0] + buff_size[y] = size[1] + buff_size[z] = 1 + + buff_bounds = np.zeros(6, dtype="float64") + buff_bounds[2*x:2*x+2] = bounds[0:2] + buff_bounds[2*y:2*y+2] = bounds[2:4] + buff_bounds[2*z] = data_source.coord + buff_bounds[2*z+1] = data_source.coord + + # then we do the interpolation + buff_temp = np.zeros(buff_size, dtype="float64") + + fields_to_get = ['particle_position', 'density', 'mass', + 'smoothing_length', field[1]] + all_fields = all_data(self.ds, ptype, fields_to_get, kdtree=True) + + num_neighbors = getattr(self.ds, 'num_neighbors', 32) + interpolate_sph_grid_gather(buff_temp, + all_fields['particle_position'].to("code_length"), + buff_bounds, + all_fields['smoothing_length'].to("code_length"), + all_fields['mass'].to('code_mass'), + all_fields['density'].to('code_density'), + all_fields[field[1]].in_units(ounits), + self.ds.index.kdtree, + num_neigh=num_neighbors, + use_normalization=normalize) + + # We swap the axes back so the axis which was sliced over + # is the last axis, as this is the "z" axis of the plots. + if z != 2: + buff_temp = buff_temp.swapaxes(2, z) + if x == 2: + x = z + else: + y = z + + buff = buff_temp[:,:,0] + + # Then we just transpose if the buffer x and y are + # different than the plot x and y + if y < x: + buff = buff.transpose() + else: + raise NotImplementedError( + "A pixelization routine has not been implemented for %s " + "data objects" % str(type(data_source))) + buff = buff.transpose() else: - pixelize_cartesian(buff, data_source['px'], data_source['py'], + pixelize_cartesian(buff, + data_source['px'], data_source['py'], data_source['pdx'], data_source['pdy'], data_source[field], bounds, int(antialias), @@ -241,11 +461,15 @@ def _ortho_pixelize(self, data_source, field, bounds, size, antialias, return buff def _oblique_pixelize(self, data_source, field, bounds, size, antialias): + from yt.frontends.ytdata.data_structures import YTSpatialPlotDataset indices = np.argsort(data_source['pdx'])[::-1].astype(np.int_) buff = np.zeros((size[1], size[0]), dtype="f8") + ftype = 'index' + if isinstance(data_source.ds, YTSpatialPlotDataset): + ftype = 'gas' pixelize_off_axis_cartesian(buff, - data_source['index', 'x'], data_source['index', 'y'], - data_source['index', 'z'], data_source['px'], + data_source[ftype, 'x'], data_source[ftype, 'y'], + data_source[ftype, 'z'], data_source['px'], data_source['py'], data_source['pdx'], data_source['pdy'], data_source['pdz'], data_source.center, data_source._inv_mat, indices, diff --git a/yt/geometry/coordinates/coordinate_handler.py b/yt/geometry/coordinates/coordinate_handler.py index c9d55d5e03b..6e7f2f00bd0 100644 --- a/yt/geometry/coordinates/coordinate_handler.py +++ b/yt/geometry/coordinates/coordinate_handler.py @@ -1,24 +1,7 @@ -""" -Coordinate handler base class. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import weakref from numbers import Number -from yt.extern.six import string_types from yt.funcs import \ validate_width_tuple, \ fix_unitary, \ @@ -215,6 +198,8 @@ def sanitize_depth(self, depth): def sanitize_width(self, axis, width, depth): if width is None: + # initialize the index if it is not already initialized + self.ds.index # Default to code units if not iterable(axis): xax = self.x_axis[axis] @@ -241,18 +226,21 @@ def sanitize_width(self, axis, width, depth): return width def sanitize_center(self, center, axis): - if isinstance(center, string_types): + if isinstance(center, str): if center.lower() == "m" or center.lower() == "max": v, center = self.ds.find_max(("gas", "density")) center = self.ds.arr(center, 'code_length') elif center.lower() == "c" or center.lower() == "center": + # domain_left_edge and domain_right_edge might not be + # initialized until we create the index, so create it + self.ds.index center = (self.ds.domain_left_edge + self.ds.domain_right_edge) / 2 else: raise RuntimeError('center keyword \"%s\" not recognized' % center) elif isinstance(center, YTArray): return self.ds.arr(center), self.convert_to_cartesian(center) elif iterable(center): - if isinstance(center[0], string_types) and isinstance(center[1], string_types): + if isinstance(center[0], str) and isinstance(center[1], str): if center[0].lower() == "min": v, center = self.ds.find_min(center[1]) elif center[0].lower() == "max": @@ -260,7 +248,7 @@ def sanitize_center(self, center, axis): else: raise RuntimeError("center keyword \"%s\" not recognized" % center) center = self.ds.arr(center, 'code_length') - elif iterable(center[0]) and isinstance(center[1], string_types): + elif iterable(center[0]) and isinstance(center[1], str): center = self.ds.arr(center[0], center[1]) else: center = self.ds.arr(center, 'code_length') diff --git a/yt/geometry/coordinates/cylindrical_coordinates.py b/yt/geometry/coordinates/cylindrical_coordinates.py index 286b9539e32..ac5471fa07f 100644 --- a/yt/geometry/coordinates/cylindrical_coordinates.py +++ b/yt/geometry/coordinates/cylindrical_coordinates.py @@ -1,19 +1,3 @@ -""" -Definitions for cylindrical coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .coordinate_handler import \ CoordinateHandler, \ @@ -39,31 +23,57 @@ def __init__(self, ds, ordering = ('r', 'z', 'theta')): def setup_fields(self, registry): # return the fields for r, z, theta - registry.add_field(("index", "dx"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "dy"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "x"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "y"), sampling_type="cell", function=_unknown_coord) + registry.add_field(("index", "dx"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "dy"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "x"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "y"), + sampling_type="cell", + function=_unknown_coord) f1, f2 = _get_coord_fields(self.axis_id['r']) - registry.add_field(("index", "dr"), sampling_type="cell", function = f1, + registry.add_field(("index", "dr"), + sampling_type="cell", + function = f1, display_field = False, units = "code_length") - registry.add_field(("index", "r"), sampling_type="cell", function = f2, + + registry.add_field(("index", "r"), + sampling_type="cell", + function = f2, display_field = False, units = "code_length") f1, f2 = _get_coord_fields(self.axis_id['z']) - registry.add_field(("index", "dz"), sampling_type="cell", function = f1, + registry.add_field(("index", "dz"), + sampling_type="cell", + function = f1, display_field = False, units = "code_length") - registry.add_field(("index", "z"), sampling_type="cell", function = f2, + + registry.add_field(("index", "z"), + sampling_type="cell", + function = f2, display_field = False, units = "code_length") f1, f2 = _get_coord_fields(self.axis_id['theta'], "") - registry.add_field(("index", "dtheta"), sampling_type="cell", function = f1, + registry.add_field(("index", "dtheta"), + sampling_type="cell", + function = f1, display_field = False, units = "") - registry.add_field(("index", "theta"), sampling_type="cell", function = f2, + + registry.add_field(("index", "theta"), + sampling_type="cell", + function = f2, display_field = False, units = "") @@ -74,26 +84,34 @@ def _CylindricalVolume(field, data): vol *= data["index", "dtheta"] vol *= data["index", "dz"] return vol - registry.add_field(("index", "cell_volume"), sampling_type="cell", - function=_CylindricalVolume, - units = "code_length**3") + registry.add_field(("index", "cell_volume"), + sampling_type="cell", + function=_CylindricalVolume, + units = "code_length**3") + registry.alias(('index', 'volume'), ('index', 'cell_volume')) def _path_r(field, data): return data["index", "dr"] - registry.add_field(("index", "path_element_r"), sampling_type="cell", - function = _path_r, - units = "code_length") + + registry.add_field(("index", "path_element_r"), + sampling_type="cell", + function = _path_r, + units = "code_length") + def _path_theta(field, data): # Note: this already assumes cell-centered return data["index", "r"] * data["index", "dtheta"] - registry.add_field(("index", "path_element_theta"), sampling_type="cell", - function = _path_theta, - units = "code_length") + registry.add_field(("index", "path_element_theta"), + sampling_type="cell", + function = _path_theta, + units = "code_length") + def _path_z(field, data): return data["index", "dz"] - registry.add_field(("index", "path_element_z"), sampling_type="cell", - function = _path_z, - units = "code_length") + registry.add_field(("index", "path_element_z"), + sampling_type="cell", + function = _path_z, + units = "code_length") def pixelize(self, dimension, data_source, field, bounds, size, antialias = True, periodic = False): diff --git a/yt/geometry/coordinates/geographic_coordinates.py b/yt/geometry/coordinates/geographic_coordinates.py index e48d9905bf7..0413d7d3ebe 100644 --- a/yt/geometry/coordinates/geographic_coordinates.py +++ b/yt/geometry/coordinates/geographic_coordinates.py @@ -1,19 +1,3 @@ -""" -Definitions for geographic coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from .coordinate_handler import \ CoordinateHandler, \ @@ -37,35 +21,68 @@ def __init__(self, ds, ordering = None): def setup_fields(self, registry): # return the fields for r, z, theta - registry.add_field(("index", "dx"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "dy"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "dz"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "x"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "y"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "z"), sampling_type="cell", function=_unknown_coord) + registry.add_field(("index", "dx"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "dy"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "dz"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "x"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "y"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "z"), + sampling_type="cell", + function=_unknown_coord) + f1, f2 = _get_coord_fields(self.axis_id['latitude'], "") - registry.add_field(("index", "dlatitude"), sampling_type="cell", function = f1, + registry.add_field(("index", "dlatitude"), + sampling_type="cell", + function = f1, display_field = False, units = "") - registry.add_field(("index", "latitude"), sampling_type="cell", function = f2, + + registry.add_field(("index", "latitude"), + sampling_type="cell", + function = f2, display_field = False, units = "") f1, f2 = _get_coord_fields(self.axis_id['longitude'], "") - registry.add_field(("index", "dlongitude"), sampling_type="cell", function = f1, - display_field = False, - units = "") - registry.add_field(("index", "longitude"), sampling_type="cell", function = f2, - display_field = False, - units = "") + registry.add_field(("index", "dlongitude"), + sampling_type="cell", + function=f1, + display_field=False, + units="") + + registry.add_field(("index", "longitude"), + sampling_type="cell", + function=f2, + display_field=False, + units="") f1, f2 = _get_coord_fields(self.axis_id[self.radial_axis]) - registry.add_field(("index", "d%s" % (self.radial_axis,)), sampling_type="cell", function = f1, - display_field = False, - units = "code_length") - registry.add_field(("index", self.radial_axis), sampling_type="cell", function = f2, - display_field = False, - units = "code_length") + registry.add_field(("index", "d%s" % (self.radial_axis,)), + sampling_type="cell", + function=f1, + display_field=False, + units="code_length") + + registry.add_field(("index", self.radial_axis), + sampling_type="cell", + function=f2, + display_field=False, + units="code_length") def _SphericalVolume(field, data): # We can use the transformed coordinates here. @@ -78,54 +95,75 @@ def _SphericalVolume(field, data): vol *= np.cos(theta-0.5*dtheta)-np.cos(theta+0.5*dtheta) vol *= data["index", "dphi"] return vol - registry.add_field(("index", "cell_volume"), sampling_type="cell", - function=_SphericalVolume, - units = "code_length**3") + + registry.add_field(("index", "cell_volume"), + sampling_type="cell", + function=_SphericalVolume, + units = "code_length**3") + registry.alias(('index', 'volume'), ('index', 'cell_volume')) def _path_radial_axis(field, data): return data["index", "d%s" % self.radial_axis] - registry.add_field(("index", "path_element_%s" % self.radial_axis), sampling_type="cell", - function = _path_radial_axis, - units = "code_length") + + registry.add_field(("index", "path_element_%s" % self.radial_axis), + sampling_type="cell", + function = _path_radial_axis, + units = "code_length") + def _path_latitude(field, data): # We use r here explicitly return data["index", "r"] * \ data["index", "dlatitude"] * np.pi/180.0 - registry.add_field(("index", "path_element_latitude"), sampling_type="cell", - function = _path_latitude, - units = "code_length") + + registry.add_field(("index", "path_element_latitude"), + sampling_type="cell", + function = _path_latitude, + units = "code_length") + def _path_longitude(field, data): # We use r here explicitly return data["index", "r"] \ * data["index", "dlongitude"] * np.pi/180.0 \ * np.sin((data["index", "latitude"] + 90.0) * np.pi/180.0) - registry.add_field(("index", "path_element_longitude"), sampling_type="cell", - function = _path_longitude, - units = "code_length") + + registry.add_field(("index", "path_element_longitude"), + sampling_type="cell", + function=_path_longitude, + units="code_length") def _latitude_to_theta(field, data): # latitude runs from -90 to 90 return (data["latitude"] + 90) * np.pi/180.0 - registry.add_field(("index", "theta"), sampling_type="cell", - function = _latitude_to_theta, - units = "") + + registry.add_field(("index", "theta"), + sampling_type="cell", + function=_latitude_to_theta, + units="") + def _dlatitude_to_dtheta(field, data): return data["dlatitude"] * np.pi/180.0 - registry.add_field(("index", "dtheta"), sampling_type="cell", - function = _dlatitude_to_dtheta, - units = "") + + registry.add_field(("index", "dtheta"), + sampling_type="cell", + function=_dlatitude_to_dtheta, + units="") def _longitude_to_phi(field, data): # longitude runs from -180 to 180 return (data["longitude"] + 180) * np.pi/180.0 - registry.add_field(("index", "phi"), sampling_type="cell", - function = _longitude_to_phi, - units = "") + + registry.add_field(("index", "phi"), + sampling_type="cell", + function=_longitude_to_phi, + units="") + def _dlongitude_to_dphi(field, data): return data["dlongitude"] * np.pi/180.0 - registry.add_field(("index", "dphi"), sampling_type="cell", - function = _dlongitude_to_dphi, - units = "") + + registry.add_field(("index", "dphi"), + sampling_type="cell", + function=_dlongitude_to_dphi, + units="") self._setup_radial_fields(registry) @@ -140,9 +178,11 @@ def _altitude_to_radius(field, data): else: surface_height = data.ds.quan(0.0, "code_length") return data["altitude"] + surface_height - registry.add_field(("index", "r"), sampling_type="cell", - function=_altitude_to_radius, - units = "code_length") + + registry.add_field(("index", "r"), + sampling_type="cell", + function=_altitude_to_radius, + units = "code_length") registry.alias(("index", "dr"), ("index", "daltitude")) def _retrieve_radial_offset(self, data_source = None): @@ -380,9 +420,11 @@ def _depth_to_radius(field, data): rax = self.axis_id[self.radial_axis] outer_radius = data.ds.domain_right_edge[rax] return -1.0 * data["depth"] + outer_radius - registry.add_field(("index", "r"), sampling_type="cell", - function=_depth_to_radius, - units = "code_length") + + registry.add_field(("index", "r"), + sampling_type="cell", + function=_depth_to_radius, + units="code_length") registry.alias(("index", "dr"), ("index", "ddepth")) def _retrieve_radial_offset(self, data_source = None): diff --git a/yt/geometry/coordinates/polar_coordinates.py b/yt/geometry/coordinates/polar_coordinates.py index a0f09c2359c..9c8adb6dc9d 100644 --- a/yt/geometry/coordinates/polar_coordinates.py +++ b/yt/geometry/coordinates/polar_coordinates.py @@ -1,19 +1,3 @@ -""" -Definitions for polar coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .cylindrical_coordinates import CylindricalCoordinateHandler diff --git a/yt/geometry/coordinates/spec_cube_coordinates.py b/yt/geometry/coordinates/spec_cube_coordinates.py index 0d0213bfbc3..c162cdab079 100644 --- a/yt/geometry/coordinates/spec_cube_coordinates.py +++ b/yt/geometry/coordinates/spec_cube_coordinates.py @@ -1,19 +1,3 @@ -""" -Definitions for spectral cube coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .cartesian_coordinates import \ CartesianCoordinateHandler from .coordinate_handler import \ @@ -67,23 +51,38 @@ def _length_func(field, data): rv[:] = 1.0 return rv return _length_func - registry.add_field(("index", "d%s" % ax), sampling_type="cell", function = f1, - display_field = False, - units = "code_length") - registry.add_field(("index", "path_element_%s" % ax), sampling_type="cell", - function = _get_length_func(), - display_field = False, - units = "") - registry.add_field(("index", "%s" % ax), sampling_type="cell", function = f2, - display_field = False, - units = "code_length") + + registry.add_field(("index", "d%s" % ax), + sampling_type="cell", + function=f1, + display_field=False, + units="code_length") + + registry.add_field(("index", "path_element_%s" % ax), + sampling_type="cell", + function=_get_length_func(), + display_field=False, + units="") + + registry.add_field(("index", "%s" % ax), + sampling_type="cell", + function=f2, + display_field=False, + units="code_length") + def _cell_volume(field, data): rv = data["index", "dx"].copy(order='K') rv *= data["index", "dy"] rv *= data["index", "dz"] return rv - registry.add_field(("index", "cell_volume"), sampling_type="cell", function=_cell_volume, - display_field=False, units = "code_length**3") + + registry.add_field(("index", "cell_volume"), + sampling_type="cell", + function=_cell_volume, + display_field=False, + units="code_length**3") + registry.alias(('index', 'volume'), ('index', 'cell_volume')) + registry.check_derived_fields( [("index", "dx"), ("index", "dy"), ("index", "dz"), ("index", "x"), ("index", "y"), ("index", "z"), diff --git a/yt/geometry/coordinates/spherical_coordinates.py b/yt/geometry/coordinates/spherical_coordinates.py index 7126ffabdbc..affd3f5ad00 100644 --- a/yt/geometry/coordinates/spherical_coordinates.py +++ b/yt/geometry/coordinates/spherical_coordinates.py @@ -1,18 +1,3 @@ -""" -Definitions for spherical coordinate systems - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from __future__ import print_function import numpy as np from .coordinate_handler import \ @@ -35,35 +20,68 @@ def __init__(self, ds, ordering = ('r', 'theta', 'phi')): def setup_fields(self, registry): # return the fields for r, z, theta - registry.add_field(("index", "dx"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "dy"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "dz"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "x"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "y"), sampling_type="cell", function=_unknown_coord) - registry.add_field(("index", "z"), sampling_type="cell", function=_unknown_coord) + registry.add_field(("index", "dx"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "dy"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "dz"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "x"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "y"), + sampling_type="cell", + function=_unknown_coord) + + registry.add_field(("index", "z"), + sampling_type="cell", + function=_unknown_coord) + f1, f2 = _get_coord_fields(self.axis_id['r']) - registry.add_field(("index", "dr"), sampling_type="cell", function = f1, - display_field = False, - units = "code_length") - registry.add_field(("index", "r"), sampling_type="cell", function = f2, - display_field = False, - units = "code_length") + registry.add_field(("index", "dr"), + sampling_type="cell", + function=f1, + display_field=False, + units="code_length") + + registry.add_field(("index", "r"), + sampling_type="cell", + function=f2, + display_field=False, + units="code_length") f1, f2 = _get_coord_fields(self.axis_id['theta'], "") - registry.add_field(("index", "dtheta"), sampling_type="cell", function = f1, - display_field = False, - units = "") - registry.add_field(("index", "theta"), sampling_type="cell", function = f2, - display_field = False, - units = "") + registry.add_field(("index", "dtheta"), + sampling_type="cell", + function=f1, + display_field=False, + units="") + + registry.add_field(("index", "theta"), + sampling_type="cell", + function=f2, + display_field=False, + units="") f1, f2 = _get_coord_fields(self.axis_id['phi'], "") - registry.add_field(("index", "dphi"), sampling_type="cell", function = f1, - display_field = False, - units = "") - registry.add_field(("index", "phi"), sampling_type="cell", function = f2, - display_field = False, - units = "") + registry.add_field(("index", "dphi"), + sampling_type="cell", + function=f1, + display_field=False, + units="") + + registry.add_field(("index", "phi"), + sampling_type="cell", + function=f2, + display_field=False, + units="") def _SphericalVolume(field, data): # Here we compute the spherical volume element exactly @@ -75,29 +93,39 @@ def _SphericalVolume(field, data): vol *= np.cos(theta-0.5*dtheta)-np.cos(theta+0.5*dtheta) vol *= data["index", "dphi"] return vol - registry.add_field(("index", "cell_volume"), sampling_type="cell", - function=_SphericalVolume, - units = "code_length**3") + + registry.add_field(("index", "cell_volume"), + sampling_type="cell", + function=_SphericalVolume, + units = "code_length**3") + registry.alias(('index', 'volume'), ('index', 'cell_volume')) def _path_r(field, data): return data["index", "dr"] - registry.add_field(("index", "path_element_r"), sampling_type="cell", - function = _path_r, - units = "code_length") + + registry.add_field(("index", "path_element_r"), + sampling_type="cell", + function = _path_r, + units = "code_length") + def _path_theta(field, data): # Note: this already assumes cell-centered return data["index", "r"] * data["index", "dtheta"] - registry.add_field(("index", "path_element_theta"), sampling_type="cell", - function = _path_theta, - units = "code_length") + + registry.add_field(("index", "path_element_theta"), + sampling_type="cell", + function = _path_theta, + units = "code_length") def _path_phi(field, data): # Note: this already assumes cell-centered return data["index", "r"] \ * data["index", "dphi"] \ * np.sin(data["index", "theta"]) - registry.add_field(("index", "path_element_phi"), sampling_type="cell", - function = _path_phi, - units = "code_length") + + registry.add_field(("index", "path_element_phi"), + sampling_type="cell", + function = _path_phi, + units = "code_length") def pixelize(self, dimension, data_source, field, bounds, size, antialias = True, periodic = True): diff --git a/yt/analysis_modules/halo_analysis/tests/__init__.py b/yt/geometry/coordinates/tests/__init__.py similarity index 100% rename from yt/analysis_modules/halo_analysis/tests/__init__.py rename to yt/geometry/coordinates/tests/__init__.py diff --git a/yt/geometry/coordinates/tests/test_geographic_coordinates.py b/yt/geometry/coordinates/tests/test_geographic_coordinates.py index 3f5f3f3dc09..7b71dd35753 100644 --- a/yt/geometry/coordinates/tests/test_geographic_coordinates.py +++ b/yt/geometry/coordinates/tests/test_geographic_coordinates.py @@ -18,7 +18,7 @@ def test_geographic_coordinates(): # means our volume will be that of a shell 1000 wide, starting at r of # whatever our surface_height is set to. ds = fake_amr_ds(geometry="geographic") - ds.surface_height = ds.quan(5000, "code_length") + ds.surface_height = ds.quan(5000., "code_length") axes = ["latitude", "longitude", "altitude"] for i, axis in enumerate(axes): dd = ds.all_data() @@ -35,7 +35,7 @@ def test_geographic_coordinates(): assert_equal(dd["index","dphi"], dd["index","dlongitude"]*np.pi/180.0) # Note our terrible agreement here. assert_rel_equal(dd["cell_volume"].sum(dtype="float64"), - (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 3) + (4.0/3.0) * np.pi * (outer_r**3 - inner_r**3), 10) assert_equal(dd["index", "path_element_altitude"], dd["index", "daltitude"]) assert_equal(dd["index", "path_element_altitude"], dd["index", "dr"]) # Note that latitude corresponds to theta, longitude to phi @@ -55,7 +55,7 @@ def test_internal_geographic_coordinates(): # means our volume will be that of a shell 1000 wide, starting at r of # outer_radius - 1000. ds = fake_amr_ds(geometry="internal_geographic") - ds.outer_radius = ds.quan(5000, "code_length") + ds.outer_radius = ds.quan(5000., "code_length") axes = ["latitude", "longitude", "depth"] for i, axis in enumerate(axes): dd = ds.all_data() diff --git a/yt/geometry/fake_octree.pyx b/yt/geometry/fake_octree.pyx index 7f35289b8d0..e69dc92165f 100644 --- a/yt/geometry/fake_octree.pyx +++ b/yt/geometry/fake_octree.pyx @@ -6,13 +6,6 @@ Make a fake octree, deposit particle at every leaf """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from libc.stdlib cimport malloc, free, rand, RAND_MAX cimport numpy as np @@ -45,7 +38,7 @@ def create_fake_octree(RAMSESOctreeContainer oct_handler, cur_leaf = 8 #we've added one parent... mask = np.ones((max_noct,8),dtype='uint8') while oct_handler.domains[0].n_assigned < max_noct: - print "root: nocts ", oct_handler.domains[0].n_assigned + print("root: nocts ", oct_handler.domains[0].n_assigned) cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0, max_noct, max_level, fsubdivide, mask) return cur_leaf @@ -57,7 +50,7 @@ cdef long subdivide(RAMSESOctreeContainer oct_handler, long cur_leaf, long cur_level, long max_noct, long max_level, float fsubdivide, np.ndarray[np.uint8_t, ndim=2] mask): - print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level + print("child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level) cdef int ddr[3] cdef int ii cdef long i diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index 6fdc84a7375..8d3c3703945 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -1,21 +1,5 @@ -""" -Geometry container base class. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os -from yt.extern.six.moves import cPickle +import pickle import weakref from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np @@ -32,7 +16,6 @@ class Index(ParallelAnalysisInterface): """The base index class""" - _global_mesh = True _unsupported_objects = () _index_properties = () @@ -149,7 +132,7 @@ def save_object(self, obj, name): Save an object (*obj*) to the data_file using the Pickle protocol, under the name *name* on the node /Objects. """ - s = cPickle.dumps(obj, protocol=-1) + s = pickle.dumps(obj, protocol=-1) self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True) def load_object(self, name): @@ -160,7 +143,7 @@ def load_object(self, name): obj = self.get_data("/Objects", name) if obj is None: return - obj = cPickle.loads(obj.value) + obj = pickle.loads(obj.value) if iterable(obj) and len(obj) == 2: obj = obj[1] # Just the object, not the ds if hasattr(obj, '_fix_pickle'): obj._fix_pickle() @@ -220,8 +203,9 @@ def _read_particle_fields(self, fields, dobj, chunk = None): selector = dobj.selector if chunk is None: self._identify_base_chunk(dobj) + chunks = self._chunk_io(dobj, cache = False) fields_to_return = self.io._read_particle_selection( - self._chunk_io(dobj, cache = False), + chunks, selector, fields_to_read) return fields_to_return, fields_to_generate @@ -307,11 +291,11 @@ def fcoords(self): if self._fast_index is not None: ci = self._fast_index.select_fcoords( self.dobj.selector, self.data_size) - ci = YTArray(ci, input_units = "code_length", + ci = YTArray(ci, units = "code_length", registry = self.dobj.ds.unit_registry) return ci ci = np.empty((self.data_size, 3), dtype='float64') - ci = YTArray(ci, input_units = "code_length", + ci = YTArray(ci, units = "code_length", registry = self.dobj.ds.unit_registry) if self.data_size == 0: return ci ind = 0 @@ -343,11 +327,11 @@ def fwidth(self): if self._fast_index is not None: ci = self._fast_index.select_fwidth( self.dobj.selector, self.data_size) - ci = YTArray(ci, input_units = "code_length", + ci = YTArray(ci, units = "code_length", registry = self.dobj.ds.unit_registry) return ci ci = np.empty((self.data_size, 3), dtype='float64') - ci = YTArray(ci, input_units = "code_length", + ci = YTArray(ci, units = "code_length", registry = self.dobj.ds.unit_registry) if self.data_size == 0: return ci ind = 0 @@ -399,7 +383,7 @@ def fcoords_vertex(self): nodes_per_elem = self.dobj.index.meshes[0].connectivity_indices.shape[1] dim = self.dobj.ds.dimensionality ci = np.empty((self.data_size, nodes_per_elem, dim), dtype='float64') - ci = YTArray(ci, input_units = "code_length", + ci = YTArray(ci, units = "code_length", registry = self.dobj.ds.unit_registry) if self.data_size == 0: return ci ind = 0 diff --git a/yt/geometry/grid_container.pxd b/yt/geometry/grid_container.pxd index d8d685030d5..8c63731ab4b 100644 --- a/yt/geometry/grid_container.pxd +++ b/yt/geometry/grid_container.pxd @@ -5,13 +5,6 @@ Matching points on the grid to specific grids """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/geometry/grid_container.pyx b/yt/geometry/grid_container.pyx index 8fc6419d8ce..73f995a911e 100644 --- a/yt/geometry/grid_container.pyx +++ b/yt/geometry/grid_container.pyx @@ -5,13 +5,6 @@ Matching points on the grid to specific grids """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 3ff193cac19..f176e124e4f 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -1,19 +1,3 @@ -""" -AMR index container class - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np import weakref diff --git a/yt/geometry/grid_visitors.pxd b/yt/geometry/grid_visitors.pxd index d9f953b3b30..aa98e3b05e6 100644 --- a/yt/geometry/grid_visitors.pxd +++ b/yt/geometry/grid_visitors.pxd @@ -6,13 +6,6 @@ Grid visitor definitions file """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np diff --git a/yt/geometry/grid_visitors.pyx b/yt/geometry/grid_visitors.pyx index 5f282bb2a62..5c77bfacf57 100644 --- a/yt/geometry/grid_visitors.pyx +++ b/yt/geometry/grid_visitors.pyx @@ -6,13 +6,6 @@ Grid visitor functions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np cimport cython diff --git a/yt/geometry/object_finding_mixin.py b/yt/geometry/object_finding_mixin.py index f8dfbc2acfa..e1bc96e8dad 100644 --- a/yt/geometry/object_finding_mixin.py +++ b/yt/geometry/object_finding_mixin.py @@ -1,18 +1,3 @@ -""" -AMR index container class - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.config import ytcfg diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index d318643bfd0..4f7c482f075 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -6,13 +6,6 @@ Oct definitions file """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy as np @@ -30,6 +23,9 @@ cdef int ORDER_MAX cdef struct OctKey: np.int64_t key Oct *node + # These next two are for particle sparse octrees. + np.int64_t *indices + np.int64_t pcount cdef struct OctInfo: np.float64_t left_edge[3] @@ -76,14 +72,13 @@ cdef class OctreeContainer: cdef int get_root(self, int ind[3], Oct **o) nogil cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors, Oct *o, bint periodicity[3]) - cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *) # This function must return the offset from global-to-local domains; i.e., # AllocationContainer.offset if such a thing exists. cdef np.int64_t get_domain_offset(self, int domain_id) cdef void visit_all_octs(self, selection_routines.SelectorObject selector, OctVisitor visitor, - int vc = ?) + int vc = ?, np.int64_t *indices = ?) cdef Oct *next_root(self, int domain_id, int ind[3]) cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent) cdef void append_domain(self, np.int64_t domain_count) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index c6e094121ff..4f077a13712 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -6,19 +6,12 @@ Oct container """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy as np import numpy as np from selection_routines cimport SelectorObject -from libc.math cimport floor +from libc.math cimport floor, ceil cimport selection_routines from yt.geometry.oct_visitors cimport OctPadded @@ -133,7 +126,7 @@ cdef class OctreeContainer: obj.nocts = cur.n_assigned if obj.nocts * visitor.nz != ref_mask.size: raise KeyError(ref_mask.size, obj.nocts, obj.oref, - obj.partial_coverage) + obj.partial_coverage, visitor.nz) return obj def __dealloc__(self): @@ -149,7 +142,8 @@ cdef class OctreeContainer: @cython.cdivision(True) cdef void visit_all_octs(self, SelectorObject selector, - OctVisitor visitor, int vc = -1): + OctVisitor visitor, int vc = -1, + np.int64_t *indices = NULL): cdef int i, j, k, n if vc == -1: vc = self.partial_coverage @@ -179,12 +173,6 @@ cdef class OctreeContainer: pos[1] += dds[1] pos[0] += dds[0] - cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size): - cdef int i - #for i in range(3): - # size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level) - # corner[i] = o.pos[i] * size[i] + self.DLE[i] - cdef np.int64_t get_domain_offset(self, int domain_id): return 0 @@ -208,7 +196,6 @@ cdef class OctreeContainer: cdef np.int64_t ipos[3] cdef np.float64_t dds[3] cdef np.float64_t cp[3] - cdef np.float64_t pp[3] cdef Oct *cur cdef Oct *next cdef int i @@ -309,15 +296,13 @@ cdef class OctreeContainer: @cython.cdivision(True) cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o, bint periodicity[3]): - cdef Oct* candidate - nn = 0 # We are going to do a brute-force search here. # This is not the most efficient -- in fact, it's relatively bad. But # we will attempt to improve it in a future iteration, where we will # grow a stack of parent Octs. # Note that in the first iteration, we will just find the up-to-27 # neighbors, including the main oct. - cdef np.int64_t i, j, k, n, level, ii, nfound = 0, dlevel + cdef np.int64_t i, j, k, n, level, ii, dlevel cdef int ind[3] cdef OctList *olist cdef OctList *my_list @@ -546,15 +531,15 @@ cdef class OctreeContainer: visitor.dims = dims self.visit_all_octs(selector, visitor) if (visitor.global_index + 1) * visitor.nz * visitor.dims > source.size: - print "GLOBAL INDEX RAN AHEAD.", + print("GLOBAL INDEX RAN AHEAD.",) print (visitor.global_index + 1) * visitor.nz * visitor.dims - source.size - print dest.size, source.size, num_cells + print(dest.size, source.size, num_cells) raise RuntimeError if visitor.index > dest.size: - print "DEST INDEX RAN AHEAD.", - print visitor.index - dest.size + print("DEST INDEX RAN AHEAD.",) + print(visitor.index - dest.size) print (visitor.global_index + 1) * visitor.nz * visitor.dims, source.size - print num_cells + print(num_cells) raise RuntimeError if num_cells >= 0: return dest @@ -577,11 +562,10 @@ cdef class OctreeContainer: np.ndarray[np.float64_t, ndim=2] pos, int skip_boundary = 1, int count_boundary = 0): - cdef int level, no, p, i, j, k + cdef int no, p, i cdef int ind[3] cdef int nb = 0 cdef Oct *cur - cdef Oct *next = NULL cdef np.float64_t pp[3] cdef np.float64_t cp[3] cdef np.float64_t dds[3] @@ -609,7 +593,7 @@ cdef class OctreeContainer: if cur == NULL: raise RuntimeError # Now we find the location we want # Note that RAMSES I think 1-findiceses levels, but we don't. - for level in range(curlevel): + for _ in range(curlevel): # At every level, find the cell this oct # lives inside for i in range(3): @@ -703,9 +687,27 @@ cdef class OctreeContainer: self.visit_all_octs(selector, visitor) return levels, cell_inds, file_inds + def morton_index_octs(self, SelectorObject selector, int domain_id, + num_cells = -1): + cdef np.int64_t i + cdef np.uint8_t[:] levels + cdef np.uint64_t[:] morton_inds + if num_cells < 0: + num_cells = selector.count_oct_cells(self, domain_id) + levels = np.zeros(num_cells, dtype="uint8") + morton_inds = np.zeros(num_cells, dtype="uint64") + for i in range(num_cells): + levels[i] = 100 + morton_inds[i] = 0 + cdef oct_visitors.MortonIndexOcts visitor + visitor = oct_visitors.MortonIndexOcts(self, domain_id) + visitor.level_arr = levels + visitor.morton_ind = morton_inds + self.visit_all_octs(selector, visitor) + return levels, morton_inds + def domain_count(self, SelectorObject selector): # We create oct arrays of the correct size - cdef np.int64_t i, num_octs cdef np.ndarray[np.int64_t, ndim=1] domain_counts domain_counts = np.zeros(self.num_domains, dtype="int64") cdef oct_visitors.CountByDomain visitor @@ -725,10 +727,8 @@ cdef class OctreeContainer: np.int64_t offset = 0): cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest - cdef int n - cdef int i, di - cdef np.int64_t local_pos, local_filled = 0 - cdef np.float64_t val + cdef int i + cdef np.int64_t local_filled = 0 for key in dest_fields: dest = dest_fields[key] source = source_fields[key] @@ -760,7 +760,7 @@ cdef class SparseOctreeContainer(OctreeContainer): def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge, over_refine = 1): - cdef int i, j, k, p + cdef int i self.partial_coverage = 1 self.oref = over_refine for i in range(3): @@ -781,7 +781,7 @@ cdef class SparseOctreeContainer(OctreeContainer): self.fill_style = "r" @classmethod - def load_octree(self, header): + def load_octree(cls, header): raise NotImplementedError def save_octree(self): @@ -789,7 +789,6 @@ cdef class SparseOctreeContainer(OctreeContainer): cdef int get_root(self, int ind[3], Oct **o) nogil: o[0] = NULL - cdef int i cdef np.int64_t key = self.ipos_to_key(ind) cdef OctKey okey cdef OctKey **oresult = NULL @@ -824,7 +823,8 @@ cdef class SparseOctreeContainer(OctreeContainer): @cython.cdivision(True) cdef void visit_all_octs(self, SelectorObject selector, OctVisitor visitor, - int vc = -1): + int vc = -1, + np.int64_t *indices = NULL): cdef int i, j, k, n cdef np.int64_t key, ukey visitor.global_index = -1 @@ -846,27 +846,27 @@ cdef class SparseOctreeContainer(OctreeContainer): pos[j] = self.DLE[j] + (visitor.pos[j] + 0.5) * dds[j] selector.recursively_visit_octs( o, pos, dds, 0, visitor, vc) + if indices != NULL: + indices[i] = visitor.index cdef np.int64_t get_domain_offset(self, int domain_id): return 0 # We no longer have a domain offset. cdef Oct* next_root(self, int domain_id, int ind[3]): - cdef int i cdef Oct *next = NULL self.get_root(ind, &next) if next != NULL: return next cdef OctAllocationContainer *cont = self.domains.get_cont(domain_id - 1) if cont.n_assigned >= cont.n: - print "Too many assigned." + print("Too many assigned.") return NULL if self.num_root >= self.max_root: - print "Too many roots." + print("Too many roots.") return NULL next = &cont.my_objs[cont.n_assigned] cont.n_assigned += 1 cdef np.int64_t key = 0 cdef OctKey *ikey = &self.root_nodes[self.num_root] - cdef np.int64_t okey = ikey.key key = self.ipos_to_key(ind) self.root_nodes[self.num_root].key = key self.root_nodes[self.num_root].node = next diff --git a/yt/geometry/oct_geometry_handler.py b/yt/geometry/oct_geometry_handler.py index f7516de2014..7289f7f87d6 100644 --- a/yt/geometry/oct_geometry_handler.py +++ b/yt/geometry/oct_geometry_handler.py @@ -1,21 +1,4 @@ -""" -Octree geometry handler - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np - from yt.utilities.logger import ytLogger as mylog from yt.geometry.geometry_handler import Index from yt.fields.field_detector import FieldDetector diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index e571eae9e7e..125c69a523e 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -6,13 +6,6 @@ Oct visitor definitions file """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np @@ -72,6 +65,16 @@ cdef class MaskOcts(OctVisitor): cdef class IndexOcts(OctVisitor): cdef np.int64_t[:] oct_index +cdef class MaskedIndexOcts(OctVisitor): + cdef np.int64_t[:] oct_index + cdef np.uint8_t[:] oct_mask + +cdef class IndexMaskMapOcts(OctVisitor): + cdef np.int64_t[:] oct_index + cdef np.uint8_t[:] oct_mask + cdef np.int64_t[:] map_domain_ind + cdef np.uint64_t map_index + cdef class ICoordsOcts(OctVisitor): cdef np.int64_t[:,:] icoords @@ -92,6 +95,11 @@ cdef class CopyArrayF64(OctVisitor): cdef np.float64_t[:,:,:,:,:] source cdef np.float64_t[:,:] dest +cdef class CopyFileIndArrayI8(OctVisitor): + cdef np.int64_t root + cdef np.uint8_t[:] source + cdef np.uint8_t[:] dest + cdef class IdentifyOcts(OctVisitor): cdef np.uint8_t[:] domain_mask @@ -120,6 +128,10 @@ cdef class LoadOctree(OctVisitor): cdef np.uint64_t *nocts cdef np.uint64_t *nfinest +cdef class MortonIndexOcts(OctVisitor): + cdef np.uint8_t[:] level_arr + cdef np.uint64_t[:] morton_ind + cdef inline int cind(int i, int j, int k) nogil: # THIS ONLY WORKS FOR CHILDREN. It is not general for zones. return (((i*2)+j)*2+k) diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index e71fda20144..f8212b6dff8 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -6,13 +6,6 @@ Oct visitor functions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy @@ -20,6 +13,7 @@ import numpy from yt.utilities.lib.fp_utils cimport * from libc.stdlib cimport malloc, free from yt.geometry.oct_container cimport OctreeContainer +from yt.utilities.lib.geometry_utils cimport encode_morton_64bit # Now some visitor functions @@ -71,6 +65,21 @@ cdef class CopyArrayF64(OctVisitor): self.global_index, :] self.index += 1 +# This copies a bit array from source to the destination, based on file_ind +cdef class CopyFileIndArrayI8(OctVisitor): + def __init__(self, OctreeContainer octree, int domain_id = -1): + super(CopyFileIndArrayI8, self).__init__(octree, domain_id) + self.root = -1 + @cython.boundscheck(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if self.level == 0: + self.root += 1 + if self.last != o.domain_ind: + self.last = o.domain_ind + self.dest[o.domain_ind] = self.source[self.root] + self.index += 1 + # This counts the number of octs, selected or not, that the selector hits. # Note that the selector will not recursively visit unselected octs, so this is # still useful. @@ -122,6 +131,34 @@ cdef class IndexOcts(OctVisitor): self.oct_index[o.domain_ind] = self.index self.index += 1 +# Compute a mapping from domain_ind to flattend index with some octs masked. +cdef class MaskedIndexOcts(OctVisitor): + @cython.boundscheck(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + # Note that we provide an index even if the cell is not selected. + if self.last != o.domain_ind: + self.last = o.domain_ind + if self.oct_mask[o.domain_ind] == 1: + self.oct_index[o.domain_ind] = self.index + self.index += 1 + +# Compute a mapping from domain_ind to flattened index checking mask. +cdef class IndexMaskMapOcts(OctVisitor): + def __init__(self, OctreeContainer octree, int domain_id = -1): + super(IndexMaskMapOcts, self).__init__(octree, domain_id) + self.map_index = 0 + @cython.boundscheck(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if self.last != o.domain_ind: + self.last = o.domain_ind + if self.oct_mask[o.domain_ind] == 1: + if self.map_domain_ind[self.map_index] >= 0: + self.oct_index[self.map_domain_ind[self.map_index]] = self.index + self.map_index += 1 + self.index += 1 + # Integer coordinates cdef class ICoordsOcts(OctVisitor): @cython.boundscheck(False) @@ -263,7 +300,7 @@ cdef class LoadOctree(OctVisitor): self.nfinest[0] += 1 elif self.ref_mask[self.index] > 0: if self.ref_mask[self.index] != 1 and self.ref_mask[self.index] != 8: - print "ARRAY CLUE: ", self.ref_mask[self.index], "UNKNOWN" + print("ARRAY CLUE: ", self.ref_mask[self.index], "UNKNOWN") raise RuntimeError if o.children == NULL: o.children = malloc(sizeof(Oct *) * 8) @@ -277,6 +314,25 @@ cdef class LoadOctree(OctVisitor): o.children[ii + i].children = NULL self.nocts[0] += 1 else: - print "SOMETHING IS AMISS", self.index + print("SOMETHING IS AMISS", self.index) raise RuntimeError self.index += 1 + +cdef class MortonIndexOcts(OctVisitor): + @cython.boundscheck(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if selected == 0: return + cdef np.int64_t coord[3] + cdef int i + for i in range(3): + coord[i] = (self.pos[i] << self.oref) + self.ind[i] + if (coord[i] < 0): + raise RuntimeError("Oct coordinate in dimension {} is ".format(i)+ + "negative. ({})".format(coord[i])) + self.level_arr[self.index] = self.level + self.morton_ind[self.index] = encode_morton_64bit( + np.uint64(coord[0]), + np.uint64(coord[1]), + np.uint64(coord[2])) + self.index += 1 diff --git a/yt/geometry/particle_deposit.pxd b/yt/geometry/particle_deposit.pxd index bdf9e1afc08..fb809b7d5c8 100644 --- a/yt/geometry/particle_deposit.pxd +++ b/yt/geometry/particle_deposit.pxd @@ -6,19 +6,13 @@ Particle Deposition onto Octs """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np import numpy as np from libc.stdlib cimport malloc, free cimport cython from libc.math cimport sqrt +from numpy.math cimport PI as NPY_PI from yt.utilities.lib.fp_utils cimport * from .oct_container cimport Oct, OctreeContainer @@ -40,6 +34,7 @@ cdef inline int gind(int i, int j, int k, int dims[3]): cdef inline np.float64_t sph_kernel_cubic(np.float64_t x) nogil: cdef np.float64_t kernel + # C is 8/pi cdef np.float64_t C = 2.5464790894703255 if x <= 0.5: kernel = 1.-6.*x*x*(1.-x) @@ -117,7 +112,7 @@ cdef inline np.float64_t sph_kernel_dummy(np.float64_t x) nogil: # I don't know the way to use a dict in a cdef class. # So in order to mimic a registry functionality, # I manually created a function to lookup the kernel functions. -ctypedef np.float64_t (*kernel_func) (np.float64_t) +ctypedef np.float64_t (*kernel_func) (np.float64_t) nogil cdef inline kernel_func get_kernel_func(str kernel_name) nogil: with gil: if kernel_name == 'cubic': diff --git a/yt/geometry/particle_deposit.pyx b/yt/geometry/particle_deposit.pyx index 6440e162dff..d79471e973d 100644 --- a/yt/geometry/particle_deposit.pyx +++ b/yt/geometry/particle_deposit.pyx @@ -6,13 +6,6 @@ Particle Deposition onto Cells """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np @@ -444,7 +437,8 @@ cdef class WeightedMeanParticleField(ParticleDepositOperation): def finalize(self): wf = np.asarray(self.wf) w = np.asarray(self.w) - rv = wf / w + with np.errstate(divide='ignore', invalid='ignore'): + rv = wf / w rv.shape = self.nvals return rv diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 81c528ffcc9..03ac22e961c 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -1,63 +1,40 @@ -""" -Particle-only geometry handler - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import collections +import errno import numpy as np import os import weakref +import struct -from yt.funcs import only_on_root +from yt.funcs import \ + get_pbar, \ + only_on_root from yt.utilities.logger import ytLogger as mylog -from yt.data_objects.octree_subset import ParticleOctreeSubset -from yt.geometry.geometry_handler import Index, YTDataChunk -from yt.geometry.particle_oct_container import \ - ParticleOctreeContainer, ParticleRegions +from yt.geometry.geometry_handler import \ + Index, \ + YTDataChunk +from yt.geometry.particle_oct_container import ParticleBitmap +from yt.data_objects.particle_container import ParticleContainer +from yt.utilities.lib.fnv_hash import fnv_hash + +CHUNKSIZE = 64**3 class ParticleIndex(Index): """The Index subclass for particle datasets""" - _global_mesh = False - def __init__(self, ds, dataset_type): self.dataset_type = dataset_type self.dataset = weakref.proxy(ds) - self.index_filename = self.dataset.parameter_filename - self.directory = os.path.dirname(self.index_filename) self.float_type = np.float64 super(ParticleIndex, self).__init__(ds, dataset_type) - - @property - def index_ptype(self): - if hasattr(self.dataset, "index_ptype"): - return self.dataset.index_ptype - else: - return "all" + self._initialize_index() def _setup_geometry(self): - mylog.debug("Initializing Particle Geometry Handler.") - self._initialize_particle_handler() + self.regions = None def get_smallest_dx(self): """ Returns (in code units) the smallest cell size in the simulation. """ - ML = self.oct_handler.max_level - dx = 1.0/(self.dataset.domain_dimensions*2**ML) - dx = dx * (self.dataset.domain_right_edge - - self.dataset.domain_left_edge) - return dx.min() + return self.ds.arr(0, 'code_length') def _get_particle_type_counts(self): result = collections.defaultdict(lambda: 0) @@ -73,78 +50,174 @@ def _setup_filenames(self): template = self.dataset.filename_template ndoms = self.dataset.file_count cls = self.dataset._file_class - self.data_files = \ - [cls(self.dataset, self.io, template % {'num':i}, i) - for i in range(int(ndoms))] + self.data_files = [] + fi = 0 + for i in range(int(ndoms)): + start = 0 + end = start + CHUNKSIZE + while 1: + df = cls(self.dataset, self.io, template % {'num':i}, fi, (start, end)) + if max(df.total_particles.values()) == 0: + break + fi += 1 + self.data_files.append(df) + start = end + end += CHUNKSIZE + self.total_particles = sum( + sum(d.total_particles.values()) for d in self.data_files) - def _initialize_particle_handler(self): - self._setup_data_io() - self._setup_filenames() + def _initialize_index(self): + ds = self.dataset + only_on_root(mylog.info, "Allocating for %0.3e particles", + self.total_particles, global_rootonly = True) + + # if we have not yet set domain_left_edge and domain_right_edge then do + # an I/O pass over the particle coordinates to determine a bounding box + if self.ds.domain_left_edge is None: + min_ppos = np.empty(3, dtype='float64') + min_ppos[:] = np.nan + max_ppos = np.empty(3, dtype='float64') + max_ppos[:] = np.nan + only_on_root( + mylog.info, + 'Bounding box cannot be inferred from metadata, reading ' + 'particle positions to infer bounding box') + for df in self.data_files: + for _, ppos in self.io._yield_coordinates(df): + min_ppos = np.nanmin(np.vstack([min_ppos, ppos]), axis=0) + max_ppos = np.nanmax(np.vstack([max_ppos, ppos]), axis=0) + only_on_root( + mylog.info, + 'Load this dataset with bounding_box=[%s, %s] to avoid I/O ' + 'overhead from inferring bounding_box.' % (min_ppos, max_ppos)) + ds.domain_left_edge = ds.arr(1.05*min_ppos, 'code_length') + ds.domain_right_edge = ds.arr(1.05*max_ppos, 'code_length') + ds.domain_width = ds.domain_right_edge - ds.domain_left_edge - index_ptype = self.index_ptype - if index_ptype == "all": - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files) + # use a trivial morton index for datasets containing a single chunk + if len(self.data_files) == 1: + order1 = 1 + order2 = 1 else: - self.total_particles = sum( - d.total_particles[index_ptype] for d in self.data_files) - ds = self.dataset - self.oct_handler = ParticleOctreeContainer( - [1, 1, 1], ds.domain_left_edge, ds.domain_right_edge, - over_refine = ds.over_refine_factor) - self.oct_handler.n_ref = ds.n_ref - only_on_root(mylog.info, "Allocating for %0.3e particles " - "(index particle type '%s')", - self.total_particles, index_ptype) - # No more than 256^3 in the region finder. - N = min(len(self.data_files), 256) - self.regions = ParticleRegions( - ds.domain_left_edge, ds.domain_right_edge, - [N, N, N], len(self.data_files)) - self._initialize_indices() - self.oct_handler.finalize() - self.max_level = self.oct_handler.max_level - self.dataset.max_level = self.max_level - tot = sum(self.oct_handler.recursively_count().values()) - only_on_root(mylog.info, "Identified %0.3e octs", tot) - - def _initialize_indices(self): - # This will be replaced with a parallel-aware iteration step. - # Roughly outlined, what we will do is: - # * Generate Morton indices on each set of files that belong to - # an individual processor - # * Create a global, accumulated histogram - # * Cut based on estimated load balancing - # * Pass particles to specific processors, along with NREF buffer - # * Broadcast back a serialized octree to join - # - # For now we will do this in serial. - index_ptype = self.index_ptype - # Set the index_ptype attribute of self.io dynamically here, so we don't - # need to assume that the dataset has the attribute. - self.io.index_ptype = index_ptype - morton = np.empty(self.total_particles, dtype="uint64") - ind = 0 - for data_file in self.data_files: - if index_ptype == "all": - npart = sum(data_file.total_particles.values()) - else: - npart = data_file.total_particles[index_ptype] - morton[ind:ind + npart] = \ - self.io._initialize_index(data_file, self.regions) - ind += npart - morton.sort() - # Now we add them all at once. - self.oct_handler.add(morton) + order1 = ds.index_order[0] + order2 = ds.index_order[1] + + if order1 == 1 and order2 == 1: + dont_cache = True + else: + dont_cache = False + + # If we have applied a bounding box then we can't cache the + # ParticleBitmap because it is doman dependent + if getattr(ds, "_domain_override", False): + dont_cache = True + + if not hasattr(self.ds, '_file_hash'): + self.ds._file_hash = self._generate_hash() + + self.regions = ParticleBitmap( + ds.domain_left_edge, ds.domain_right_edge, + ds.periodicity, self.ds._file_hash, + len(self.data_files), + index_order1=order1, + index_order2=order2) + + # Load Morton index from file if provided + if getattr(ds, 'index_filename', None) is None: + fname = ds.parameter_filename + ".index{}_{}.ewah".format( + self.regions.index_order1, self.regions.index_order2) + else: + fname = ds.index_filename + + dont_load = dont_cache and not hasattr(ds, 'index_filename') + try: + if dont_load: + raise OSError + rflag = self.regions.load_bitmasks(fname) + rflag = self.regions.check_bitmasks() + self._initialize_frontend_specific() + if rflag == 0: + raise OSError + except (OSError, struct.error): + self.regions.reset_bitmasks() + self._initialize_coarse_index() + self._initialize_refined_index() + wdir = os.path.dirname(fname) + if not dont_cache and os.access(wdir, os.W_OK): + # Sometimes os mis-reports whether a directory is writable, + # So pass if writing the bitmask file fails. + try: + self.regions.save_bitmasks(fname) + except OSError: + pass + rflag = self.regions.check_bitmasks() + + def _initialize_coarse_index(self): + pb = get_pbar("Initializing coarse index ", len(self.data_files)) + for i, data_file in enumerate(self.data_files): + pb.update(i) + for ptype, pos in self.io._yield_coordinates(data_file): + ds = self.ds + if hasattr(ds, '_sph_ptypes') and ptype == ds._sph_ptypes[0]: + hsml = self.io._get_smoothing_length( + data_file, pos.dtype, pos.shape) + else: + hsml = None + self.regions._coarse_index_data_file( + pos, hsml, data_file.file_id) + self.regions._set_coarse_index_data_file(data_file.file_id) + pb.finish() + self.regions.find_collisions_coarse() + + def _initialize_refined_index(self): + mask = self.regions.masks.sum(axis=1).astype('uint8') + max_npart = max(sum(d.total_particles.values()) + for d in self.data_files) * 28 + sub_mi1 = np.zeros(max_npart, "uint64") + sub_mi2 = np.zeros(max_npart, "uint64") + pb = get_pbar("Initializing refined index", len(self.data_files)) + mask_threshold = getattr(self, '_index_mask_threshold', 2) + count_threshold = getattr(self, '_index_count_threshold', 256) + mylog.debug("Using estimated thresholds of %s and %s for refinement", mask_threshold, count_threshold) + total_refined = 0 + total_coarse_refined = ((mask >= 2) & (self.regions.particle_counts > count_threshold)).sum() + mylog.debug("This should produce roughly %s zones, for %s of the domain", + total_coarse_refined, 100 * total_coarse_refined / mask.size) + for i, data_file in enumerate(self.data_files): + coll = None + pb.update(i) + nsub_mi = 0 + for ptype, pos in self.io._yield_coordinates(data_file): + if pos.size == 0: continue + if hasattr(self.ds, '_sph_ptypes') and ptype == self.ds._sph_ptypes[0]: + hsml = self.io._get_smoothing_length( + data_file, pos.dtype, pos.shape) + else: + hsml = None + nsub_mi, coll = self.regions._refined_index_data_file( + coll, pos, hsml, mask, sub_mi1, sub_mi2, + data_file.file_id, nsub_mi, count_threshold = count_threshold, + mask_threshold = mask_threshold) + total_refined += nsub_mi + self.regions.bitmasks.append(data_file.file_id, coll) + pb.finish() + self.regions.find_collisions_refined() def _detect_output_fields(self): # TODO: Add additional fields + self._setup_filenames() dsl = [] units = {} + pcounts = self._get_particle_type_counts() + field_cache = {} for dom in self.data_files: - fl, _units = self.io._identify_fields(dom) + if dom.filename in field_cache: + fl, _units = field_cache[dom.filename] + else: + fl, _units = self.io._identify_fields(dom) + field_cache[dom.filename] = fl, _units units.update(_units) - dom._calculate_offsets(fl) + dom._calculate_offsets(fl, pcounts) for f in fl: if f not in dsl: dsl.append(f) self.field_list = dsl @@ -156,17 +229,33 @@ def _detect_output_fields(self): ds.particle_types_raw = ds.particle_types def _identify_base_chunk(self, dobj): + # Must check that chunk_info contains the right number of ghost zones if getattr(dobj, "_chunk_info", None) is None: - data_files = getattr(dobj, "data_files", None) - if data_files is None: - data_files = [self.data_files[i] for i in - self.regions.identify_data_files(dobj.selector)] - base_region = getattr(dobj, "base_region", dobj) - oref = self.dataset.over_refine_factor - subset = [ParticleOctreeSubset(base_region, data_files, - self.dataset, over_refine_factor = oref)] - dobj._chunk_info = subset - dobj._current_chunk = list(self._chunk_all(dobj))[0] + if isinstance(dobj, ParticleContainer): + dobj._chunk_info = [dobj] + else: + # TODO: only return files + if getattr(dobj.selector, 'is_all_data', False): + nfiles = self.regions.nfiles + dfi = np.arange(nfiles) + else: + dfi, file_masks, addfi = self.regions.identify_file_masks( + dobj.selector) + nfiles = len(file_masks) + dobj._chunk_info = [None for _ in range(nfiles)] + for i in range(nfiles): + domain_id = i+1 + dobj._chunk_info[i] = ParticleContainer( + dobj, [self.data_files[dfi[i]]], + domain_id = domain_id) + # NOTE: One fun thing about the way IO works is that it + # consolidates things quite nicely. So we should feel free to + # create as many objects as part of the chunk as we want, since + # it'll take the set() of them. So if we break stuff up like + # this here, we end up in a situation where we have the ability + # to break things down further later on for buffer zones and the + # like. + dobj._current_chunk, = self._chunk_all(dobj) def _chunk_all(self, dobj): oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) @@ -174,25 +263,56 @@ def _chunk_all(self, dobj): def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None): sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - # We actually do not really use the data files except as input to the - # ParticleOctreeSubset. - # This is where we will perform cutting of the Octree and - # load-balancing. That may require a specialized selector object to - # cut based on some space-filling curve index. - for i,og in enumerate(sobjs): - if ngz > 0: - g = og.retrieve_ghost_zones(ngz, [], smoothed=True) - else: - g = og - yield YTDataChunk(dobj, "spatial", [g]) + for og in sobjs: + with og._expand_data_files(): + if ngz > 0: + g = og.retrieve_ghost_zones(ngz, [], smoothed=True) + else: + g = og + yield YTDataChunk(dobj, "spatial", [g]) def _chunk_io(self, dobj, cache = True, local_only = False): oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - for subset in oobjs: - yield YTDataChunk(dobj, "io", [subset], None, cache = cache) - -class ParticleDataChunk(YTDataChunk): - def __init__(self, oct_handler, regions, *args, **kwargs): - self.oct_handler = oct_handler - self.regions = regions - super(ParticleDataChunk, self).__init__(*args, **kwargs) + for container in oobjs: + yield YTDataChunk(dobj, "io", [container], None, cache = cache) + + def _generate_hash(self): + # Generate an FNV hash by creating a byte array containing the + # modification time of as well as the first and last 1 MB of data in + # every output file + ret = bytearray() + for pfile in self.data_files: + + # only look at "real" files, not "fake" files generated by the + # chunking system + if pfile.start not in (0, None): + continue + try: + mtime = os.path.getmtime(pfile.filename) + except OSError as e: + if e.errno == errno.ENOENT: + # this is an in-memory file so we return with a dummy + # value + return -1 + else: + raise + ret.extend(str(mtime).encode('utf-8')) + size = os.path.getsize(pfile.filename) + if size > 1e6: + size = int(1e6) + with open(pfile.filename, 'rb') as fh: + # read in first and last 1 MB of data + data = fh.read(size) + fh.seek(-size, os.SEEK_END) + data = fh.read(size) + ret.extend(data) + return fnv_hash(ret) + + def _initialize_frontend_specific(self): + """This is for frontend-specific initialization code + + If there are frontend-specific things that need to be set while + creating the index, this function forces these operations to happen + in cases where we are reloading the index from a sidecar file. + """ + pass diff --git a/yt/geometry/particle_oct_container.pyx b/yt/geometry/particle_oct_container.pyx index e1a4392719b..4b6b7bc7f0f 100644 --- a/yt/geometry/particle_oct_container.pyx +++ b/yt/geometry/particle_oct_container.pyx @@ -6,24 +6,55 @@ Oct container tuned for Particles """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX -from oct_visitors cimport cind + from libc.stdlib cimport malloc, free, qsort -from libc.math cimport floor -from yt.utilities.lib.fp_utils cimport * -cimport numpy as np +from libc.string cimport memset +from libc.math cimport floor, ceil, fmod +from libcpp.map cimport map +from libcpp.vector cimport vector +from yt.utilities.lib.ewah_bool_array cimport \ + ewah_bool_array, ewah_bool_iterator, ewah_map, bool_array, ewah_word_type import numpy as np -from selection_routines cimport SelectorObject +cimport numpy as np + +from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \ + SparseOctreeContainer, OctKey, OctAllocationContainer +cimport oct_visitors +from oct_visitors cimport cind, OctVisitor +from yt.utilities.lib.fp_utils cimport * +from yt.utilities.lib.geometry_utils cimport bounded_morton, \ + bounded_morton_dds, bounded_morton_relative_dds, \ + bounded_morton_split_dds, bounded_morton_split_relative_dds, \ + encode_morton_64bit, decode_morton_64bit, \ + morton_neighbors_coarse, morton_neighbors_refined +from selection_routines cimport SelectorObject, AlwaysSelector cimport cython from cython cimport floating +from cython.operator cimport dereference, preincrement +from cpython.exc cimport PyErr_CheckSignals +from collections import defaultdict +from yt.funcs import get_pbar + +from particle_deposit cimport gind +#from yt.utilities.lib.ewah_bool_wrap cimport \ +from ..utilities.lib.ewah_bool_wrap cimport BoolArrayCollection +import struct +import os + +# If set to 1, ghost cells are added at the refined level reguardless of if the +# coarse cell containing it is refined in the selector. +# If set to 0, ghost cells are only added at the refined level of the coarse +# index for the ghost cell is refined in the selector. +DEF RefinedExternalGhosts = 1 + +_bitmask_version = np.uint64(5) + +from ..utilities.lib.ewah_bool_wrap cimport SparseUnorderedBitmaskSet as SparseUnorderedBitmask +from ..utilities.lib.ewah_bool_wrap cimport SparseUnorderedRefinedBitmaskSet as SparseUnorderedRefinedBitmask +from ..utilities.lib.ewah_bool_wrap cimport BoolArrayCollectionUncompressed as BoolArrayColl +from ..utilities.lib.ewah_bool_wrap cimport FileBitmasks + +ctypedef map[np.uint64_t, bool_array] CoarseRefinedSets cdef class ParticleOctreeContainer(OctreeContainer): cdef Oct** oct_list @@ -45,7 +76,7 @@ cdef class ParticleOctreeContainer(OctreeContainer): def __dealloc__(self): #Call the freemem ops on every ocy #of the root mesh recursively - cdef i, j, k + cdef int i, j, k if self.root_mesh == NULL: return for i in range(self.nn[0]): if self.root_mesh[i] == NULL: continue @@ -70,7 +101,7 @@ cdef class ParticleOctreeContainer(OctreeContainer): free(o) def clear_fileind(self): - cdef i, j, k + cdef int i, j, k for i in range(self.nn[0]): for j in range(self.nn[1]): for k in range(self.nn[2]): @@ -100,7 +131,7 @@ cdef class ParticleOctreeContainer(OctreeContainer): def allocate_domains(self, domain_counts): pass - def finalize(self): + def finalize(self, int domain_id = 0): #This will sort the octs in the oct list #so that domains appear consecutively #And then find the oct index/offset for @@ -108,7 +139,6 @@ cdef class ParticleOctreeContainer(OctreeContainer): cdef int max_level = 0 self.oct_list = malloc(sizeof(Oct*)*self.nocts) cdef np.int64_t i = 0, lpos = 0 - cdef int cur_dom = -1 # Note that we now assign them in the same order they will be visited # by recursive visitors. for i in range(self.nn[0]): @@ -119,8 +149,7 @@ cdef class ParticleOctreeContainer(OctreeContainer): assert(lpos == self.nocts) for i in range(self.nocts): self.oct_list[i].domain_ind = i - self.oct_list[i].domain = 0 - self.oct_list[i].file_ind = -1 + self.oct_list[i].domain = domain_id self.max_level = max_level cdef visit_assign(self, Oct *o, np.int64_t *lpos, int level, int *max_level): @@ -146,7 +175,6 @@ cdef class ParticleOctreeContainer(OctreeContainer): #track of how many are used with np initially 0 self.nocts += 1 cdef Oct *my_oct = malloc(sizeof(Oct)) - cdef int i, j, k my_oct.domain = -1 my_oct.file_ind = 0 my_oct.domain_ind = self.nocts - 1 @@ -156,16 +184,17 @@ cdef class ParticleOctreeContainer(OctreeContainer): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def add(self, np.ndarray[np.uint64_t, ndim=1] indices): + def add(self, np.ndarray[np.uint64_t, ndim=1] indices, + np.uint8_t order = ORDER_MAX): #Add this particle to the root oct #Then if that oct has children, add it to them recursively #If the child needs to be refined because of max particles, do so - cdef np.int64_t no = indices.shape[0], p, index + cdef np.int64_t no = indices.shape[0], p + cdef np.uint64_t index cdef int i, level cdef int ind[3] if self.root_mesh[0][0][0] == NULL: self.allocate_root() cdef np.uint64_t *data = indices.data - cdef np.uint64_t FLAG = ~(0) for p in range(no): # We have morton indices, which means we choose left and right by # looking at (MAX_ORDER - level) & with the values 1, 2, 4. @@ -175,35 +204,45 @@ cdef class ParticleOctreeContainer(OctreeContainer): # This is a marker for the index not being inside the domain # we're interested in. continue + # Convert morton index to 3D index of octree root for i in range(3): - ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1 + ind[i] = (index >> ((order - level)*3 + (2 - i))) & 1 cur = self.root_mesh[ind[0]][ind[1]][ind[2]] if cur == NULL: raise RuntimeError + # Continue refining the octree until you reach the level of the + # morton indexing order. Along the way, use prefix to count + # previous indices at levels in the octree? while (cur.file_ind + 1) > self.n_ref: - if level >= ORDER_MAX: break # Just dump it here. + if level >= order: break # Just dump it here. level += 1 for i in range(3): - ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1 + ind[i] = (index >> ((order - level)*3 + (2 - i))) & 1 if cur.children == NULL or \ cur.children[cind(ind[0],ind[1],ind[2])] == NULL: - cur = self.refine_oct(cur, index, level) - self.filter_particles(cur, data, p, level) + cur = self.refine_oct(cur, index, level, order) + self.filter_particles(cur, data, p, level, order) else: cur = cur.children[cind(ind[0],ind[1],ind[2])] - cur.file_ind += 1 + # If our n_ref is 1, we are always refining, which means we're an + # index octree. In this case, we should store the index for fast + # lookup later on when we find neighbors and the like. + if self.n_ref == 1: + cur.file_ind = index + else: + cur.file_ind += 1 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef Oct *refine_oct(self, Oct *o, np.uint64_t index, int level): + cdef Oct *refine_oct(self, Oct *o, np.uint64_t index, int level, + np.uint8_t order): #Allocate and initialize child octs #Attach particles to child octs #Remove particles from this oct entirely - cdef int i, j, k, m, n + cdef int i, j, k cdef int ind[3] cdef Oct *noct - cdef np.uint64_t prefix1, prefix2 # TODO: This does not need to be changed. o.children = malloc(sizeof(Oct *)*8) for i in range(2): @@ -215,25 +254,26 @@ cdef class ParticleOctreeContainer(OctreeContainer): o.children[cind(i,j,k)] = noct o.file_ind = self.n_ref + 1 for i in range(3): - ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1 + ind[i] = (index >> ((order - level)*3 + (2 - i))) & 1 noct = o.children[cind(ind[0],ind[1],ind[2])] return noct cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p, - int level): + int level, np.uint8_t order): # Now we look at the last nref particles to decide where they go. + # If p: Loops over all previous morton indices + # If n_ref: Loops over n_ref previous morton indices cdef int n = imin(p, self.n_ref) cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0) cdef np.uint64_t prefix1, prefix2 # Now we figure out our prefix, which is the oct address at this level. # As long as we're actually in Morton order, we do not need to worry # about *any* of the other children of the oct. - prefix1 = data[p] >> (ORDER_MAX - level)*3 + prefix1 = data[p] >> (order - level)*3 for i in range(n): - prefix2 = arr[i] >> (ORDER_MAX - level)*3 + prefix2 = arr[i] >> (order - level)*3 if (prefix1 == prefix2): - o.file_ind += 1 - #print ind[0], ind[1], ind[2], o.file_ind, level + o.file_ind += 1 # Says how many morton indices are in this octant? def recursively_count(self): #Visit every cell, accumulate the # of cells per level @@ -262,95 +302,1899 @@ cdef class ParticleOctreeContainer(OctreeContainer): self.visit(o.children[cind(i,j,k)], counts, level + 1) return + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef Oct *get_from_index(self, np.uint64_t mi, np.uint8_t order = ORDER_MAX, + int max_level = 99): + cdef Oct *cur + cdef Oct *next + cur = next = NULL + cdef int i + cdef np.int64_t level = -1 + cdef int ind32[3] + cdef np.uint64_t ind[3] + cdef np.uint64_t index + # Get level offset + cdef int level_offset[3] + for i in range(3): + level_offset[i] = np.log2(self.nn[i]) + if (1 << level_offset[i]) != self.nn[i]: + raise Exception("Octree does not have roots along dimension {} in a power of 2 ".format(i)) + for i in range(2,3): + if level_offset[i] != level_offset[0]: + raise Exception("Octree must have the same number of roots in each dimension for this.") + # Get root for index + index = (mi >> ((order - level_offset[0])*3)) + decode_morton_64bit(index, ind) + for i in range(3): + ind32[i] = ind[i] + self.get_root(ind32, &next) + # We want to stop recursing when there's nowhere else to go + level = level_offset[0] + max_level = min(max_level, order) + while next != NULL and level <= max_level: + level += 1 + for i in range(3): + ind[i] = (mi >> ((order - level)*3 + (2 - i))) & 1 + cur = next + if cur.children != NULL: + next = cur.children[cind(ind[0],ind[1],ind[2])] + else: + next = NULL + return cur + + def apply_domain(self, int domain_id, BoolArrayCollection mask, + int masklevel): + cdef SelectorObject selector = AlwaysSelector(None) + ind = self.domain_ind(selector, mask = mask, masklevel = masklevel) + for i in range(self.nocts): + if ind[i] < 0: continue + self.oct_list[i].domain = domain_id + ind_out = super(ParticleOctreeContainer,self).domain_ind(selector, domain_id = domain_id) + + def domain_ind(self, selector, int domain_id = -1, + BoolArrayCollection mask = None, int masklevel = 99): + if mask is None: + return super(ParticleOctreeContainer,self).domain_ind(selector, domain_id = domain_id) + # Create mask for octs that are touched by the mask + cdef ewah_bool_array *ewah_slct = mask.ewah_keys + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct[0].begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct[0].end()) + cdef np.ndarray[np.uint8_t, ndim=1] oct_mask + oct_mask = np.zeros(self.nocts, 'uint8') + cdef Oct *o + cdef int coct, cmi + coct = cmi = 0 + while iter_set[0] != iter_end[0]: + mi = dereference(iter_set[0]) + o = self.get_from_index(mi, order = masklevel) + if o != NULL: + _mask_children(oct_mask, o) + coct += 1 + cmi += 1; + preincrement(iter_set[0]) + # Get domain ind + cdef np.ndarray[np.int64_t, ndim=1] ind + ind = np.zeros(self.nocts, 'int64') - 1 + cdef oct_visitors.MaskedIndexOcts visitor + visitor = oct_visitors.MaskedIndexOcts(self, domain_id) + visitor.oct_index = ind + visitor.oct_mask = oct_mask + self.visit_all_octs(selector, visitor) + return ind + +cdef void _mask_children(np.ndarray[np.uint8_t] mask, Oct *cur): + cdef int i, j, k + if cur == NULL: + return + mask[cur.domain_ind] = 1 + if cur.children == NULL: + return + for i in range(2): + for j in range(2): + for k in range(2): + _mask_children(mask, cur.children[cind(i,j,k)]) + cdef np.uint64_t ONEBIT=1 +cdef np.uint64_t FLAG = ~(0) -cdef class ParticleRegions: +cdef class ParticleBitmap: cdef np.float64_t left_edge[3] cdef np.float64_t right_edge[3] + cdef np.uint8_t periodicity[3] cdef np.float64_t dds[3] + cdef np.float64_t dds_mi1[3] + cdef np.float64_t dds_mi2[3] cdef np.float64_t idds[3] cdef np.int32_t dims[3] + cdef np.int64_t file_hash + cdef np.uint64_t directional_max2[3] cdef public np.uint64_t nfiles + cdef public np.int32_t index_order1 + cdef public np.int32_t index_order2 cdef public object masks + cdef public object particle_counts + cdef public object counts + cdef public object max_count + cdef public object _last_selector + cdef public object _last_return_values + cdef public object _cached_octrees + cdef public object _last_octree_subset + cdef public object _last_oct_handler + cdef public object _prev_octree_subset + cdef public object _prev_oct_handler + cdef np.uint32_t *file_markers + cdef np.uint64_t n_file_markers + cdef np.uint64_t file_marker_i + cdef public FileBitmasks bitmasks + cdef public BoolArrayCollection collisions - def __init__(self, left_edge, right_edge, dims, nfiles): + def __init__(self, left_edge, right_edge, periodicity, file_hash, nfiles, + index_order1, index_order2): + # TODO: Set limit on maximum orders? cdef int i + self._cached_octrees = {} + self._last_selector = None + self._last_return_values = None + self._last_octree_subset = None + self._last_oct_handler = None + self._prev_octree_subset = None + self._prev_oct_handler = None + self.file_hash = file_hash self.nfiles = nfiles for i in range(3): self.left_edge[i] = left_edge[i] self.right_edge[i] = right_edge[i] - self.dims[i] = dims[i] - self.dds[i] = (right_edge[i] - left_edge[i])/dims[i] - self.idds[i] = 1.0/self.dds[i] + self.periodicity[i] = periodicity[i] + self.dims[i] = (1< self.right_edge[i]): - use = 0 + axiter[i][1] = 999 + # Skip particles outside the domain + if pos[p,i] >= RE[i] or pos[p,i] < LE[i]: + skip = 1 break - ind[i] = ((pos[p, i] - self.left_edge[i])*self.idds[i]) - ind[i] = iclip(ind[i], 0, self.dims[i]) - if use == 1: - mask[ind[0],ind[1],ind[2]] |= val - return + ppos[i] = pos[p,i] + if skip==1: continue + mi = bounded_morton_split_dds(ppos[0], ppos[1], ppos[2], LE, + dds, mi_split) + mask[mi] = 1 + particle_counts[mi] += 1 + # Expand mask by softening + if hsml is None: + continue + if hsml[p] < 0: + raise RuntimeError( + "Smoothing length for particle %s is negative with " + "value \"%s\"" % p, hsml[p]) + radius = hsml[p] + # We first check if we're bounded within the domain; this follows the logic in the + # pixelize_cartesian routine. We assume that no smoothing + # length can wrap around both directions. + for i in range(3): + if PER[i] and ppos[i] - radius < LE[i]: + axiter[i][1] = +1 + axiterv[i][1] = DW[i] + elif PER[i] and ppos[i] + radius > RE[i]: + axiter[i][1] = -1 + axiterv[i][1] = -DW[i] + for xi in range(2): + if axiter[0][xi] == 999: continue + s_ppos[0] = ppos[0] + axiterv[0][xi] + for yi in range(2): + if axiter[1][yi] == 999: continue + s_ppos[1] = ppos[1] + axiterv[1][yi] + for zi in range(2): + if axiter[2][zi] == 999: continue + s_ppos[2] = ppos[2] + axiterv[2][zi] + # OK, now we compute the left and right edges for this shift. + for i in range(3): + clip_pos_l[i] = fmax(s_ppos[i] - radius, LE[i] + dds[i]/10) + clip_pos_r[i] = fmin(s_ppos[i] + radius, RE[i] - dds[i]/10) + bounded_morton_split_dds(clip_pos_l[0], clip_pos_l[1], clip_pos_l[2], LE, dds, bounds[0]) + bounded_morton_split_dds(clip_pos_r[0], clip_pos_r[1], clip_pos_r[2], LE, dds, bounds[1]) + # We go to the upper bound plus one so that we have *inclusive* loops -- the upper bound + # is the cell *index*, so we want to make sure we include that cell. This is also why + # we don't need to worry about mi_max being the max index rather than the cell count. + for xex in range(bounds[0][0], bounds[1][0] + 1): + for yex in range(bounds[0][1], bounds[1][1] + 1): + for zex in range(bounds[0][2], bounds[1][2] + 1): + miex = encode_morton_64bit(xex, yex, zex) + mask[miex] = 1 + particle_counts[miex] += 1 + if miex >= msize: + raise IndexError( + "Index for a softening region " + + "({}) exceeds ".format(miex) + + "max ({})".format(msize)) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def _set_coarse_index_data_file(self, np.uint64_t file_id): + return self.__set_coarse_index_data_file(file_id) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void __set_coarse_index_data_file(self, np.uint64_t file_id): + cdef np.int64_t i + cdef FileBitmasks bitmasks = self.bitmasks + cdef np.ndarray[np.uint8_t, ndim=1] mask = self.masks[:,file_id] + # Add in order + for i in range(mask.shape[0]): + if mask[i] == 1: + bitmasks._set_coarse(file_id, i) - def identify_data_files(self, SelectorObject selector): - # This is relatively cheap to iterate over. - cdef int i, j, k, n - cdef np.uint64_t fmask, offset, fcheck + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def _refined_index_data_file(self, + BoolArrayCollection in_collection, + np.ndarray[floating, ndim=2] pos, + np.ndarray[floating, ndim=1] hsml, + np.ndarray[np.uint8_t, ndim=1] mask, + np.ndarray[np.uint64_t, ndim=1] sub_mi1, + np.ndarray[np.uint64_t, ndim=1] sub_mi2, + np.uint64_t file_id, np.int64_t nsub_mi, + np.uint64_t count_threshold = 128, + np.uint8_t mask_threshold = 2): + if in_collection is None: + in_collection = BoolArrayCollection() + cdef BoolArrayCollection _in_coll = in_collection + out_collection = self.__refined_index_data_file(_in_coll, pos, hsml, mask, + count_threshold, mask_threshold) + return 0, out_collection + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef BoolArrayCollection __refined_index_data_file( + self, + BoolArrayCollection in_collection, + np.ndarray[floating, ndim=2] pos, + np.ndarray[floating, ndim=1] hsml, + np.ndarray[np.uint8_t, ndim=1] mask, + np.uint64_t count_threshold, np.uint8_t mask_threshold + ): + # Initialize + cdef np.int64_t p, sorted_ind + cdef np.uint64_t i + cdef np.uint64_t mi1, mi2 + cdef np.float64_t ppos[3] + cdef np.float64_t s_ppos[3] # shifted ppos + cdef int skip + cdef BoolArrayCollection this_collection, out_collection + cdef np.uint64_t bounds[2][3] + cdef np.uint8_t fully_enclosed cdef np.float64_t LE[3] cdef np.float64_t RE[3] - cdef np.ndarray[np.uint64_t, ndim=3] mask - files = [] - for n in range(len(self.masks)): - fmask = 0 - mask = self.masks[n] - LE[0] = self.left_edge[0] - RE[0] = LE[0] + self.dds[0] - for i in range(self.dims[0]): - LE[1] = self.left_edge[1] - RE[1] = LE[1] + self.dds[1] - for j in range(self.dims[1]): - LE[2] = self.left_edge[2] - RE[2] = LE[2] + self.dds[2] - for k in range(self.dims[2]): - if selector.select_grid(LE, RE, 0) == 1: - fmask |= mask[i,j,k] - LE[2] += self.dds[2] - RE[2] += self.dds[2] - LE[1] += self.dds[1] - RE[1] += self.dds[1] - LE[0] += self.dds[0] - RE[0] += self.dds[0] - # Now we iterate through... - for fcheck in range(64): - if ((fmask >> fcheck) & ONEBIT) == ONEBIT: - files.append(fcheck + n * 64) - return files + cdef np.float64_t DW[3] + cdef np.uint8_t PER[3] + cdef np.float64_t dds1[3] + cdef np.float64_t dds2[3] + cdef np.float64_t radius + cdef np.uint64_t mi_split1[3] + cdef np.uint64_t mi_split2[3] + cdef np.uint64_t miex1 + cdef np.uint64_t[:] particle_counts = self.particle_counts + cdef np.uint64_t xex, yex, zex + cdef np.float64_t clip_pos_l[3] + cdef np.float64_t clip_pos_r[3] + cdef int axiter[3][2] + cdef np.float64_t axiterv[3][2] + cdef CoarseRefinedSets coarse_refined_map + cdef map[np.uint64_t, np.uint64_t] refined_count + cdef np.uint64_t nfully_enclosed = 0, n_calls = 0 + mi1_max = (1 << self.index_order1) - 1 + mi2_max = (1 << self.index_order2) - 1 + cdef np.uint64_t max_mi1_elements = 1 << (3*self.index_order1) + cdef np.uint64_t max_mi2_elements = 1 << (3*self.index_order2) + for i in range(max_mi1_elements): + refined_count[i] = 0 + # Copy things from structure (type cast) + for i in range(3): + LE[i] = self.left_edge[i] + RE[i] = self.right_edge[i] + PER[i] = self.periodicity[i] + dds1[i] = self.dds_mi1[i] + dds2[i] = self.dds_mi2[i] + DW[i] = RE[i] - LE[i] + axiter[i][0] = 0 # We always do an offset of 0 + axiterv[i][0] = 0.0 + cdef np.ndarray[np.uint64_t, ndim=1] morton_indices = np.empty(pos.shape[0], dtype="u8") + for p in range(pos.shape[0]): + morton_indices[p] = bounded_morton(pos[p, 0], pos[p, 1], pos[p, 2], + LE, RE, self.index_order1) + # Loop over positions skipping those outside the domain + cdef np.ndarray[np.uint64_t, ndim=1, cast=True] sorted_order + if hsml is None: + sorted_order = np.argsort(morton_indices) + else: + sorted_order = np.argsort(hsml)[::-1] + for sorted_ind in range(sorted_order.shape[0]): + p = sorted_order[sorted_ind] + skip = 0 + for i in range(3): + axiter[i][1] = 999 + if pos[p,i] >= RE[i] or pos[p,i] < LE[i]: + skip = 1 + break + ppos[i] = pos[p,i] + if skip==1: continue + # Only look if collision at coarse index + mi1 = bounded_morton_split_dds(ppos[0], ppos[1], ppos[2], LE, + dds1, mi_split1) + if hsml is None: + if mask[mi1] < mask_threshold \ + or particle_counts[mi1] < count_threshold: + continue + # Determine sub index within cell of primary index + mi2 = bounded_morton_split_relative_dds( + ppos[0], ppos[1], ppos[2], LE, dds1, dds2, mi_split2) + if refined_count[mi1] == 0: + coarse_refined_map[mi1].padWithZeroes(max_mi2_elements) + if not coarse_refined_map[mi1].get(mi2): + coarse_refined_map[mi1].set(mi2) + refined_count[mi1] += 1 + else: # only hit if we have smoothing lengths. + # We have to do essentially the identical process to in the coarse indexing, + # except here we need to fill in all the subranges as well as the coarse ranges + # Note that we are also doing the null case, where we do no shifting + radius = hsml[p] + #if mask[mi1] <= 4: # only one thing in this area + # continue + for i in range(3): + if PER[i] and ppos[i] - radius < LE[i]: + axiter[i][1] = +1 + axiterv[i][1] = DW[i] + elif PER[i] and ppos[i] + radius > RE[i]: + axiter[i][1] = -1 + axiterv[i][1] = -DW[i] + for xi in range(2): + if axiter[0][xi] == 999: continue + s_ppos[0] = ppos[0] + axiterv[0][xi] + for yi in range(2): + if axiter[1][yi] == 999: continue + s_ppos[1] = ppos[1] + axiterv[1][yi] + for zi in range(2): + if axiter[2][zi] == 999: continue + s_ppos[2] = ppos[2] + axiterv[2][zi] + # OK, now we compute the left and right edges for this shift. + for i in range(3): + # casting to int64 is not nice but is so we can have negative values we clip + clip_pos_l[i] = fmax(s_ppos[i] - radius, LE[i] + dds1[i]/10) + clip_pos_r[i] = fmin(s_ppos[i] + radius, RE[i] - dds1[i]/10) + + bounded_morton_split_dds(clip_pos_l[0], clip_pos_l[1], clip_pos_l[2], LE, dds1, bounds[0]) + bounded_morton_split_dds(clip_pos_r[0], clip_pos_r[1], clip_pos_r[2], LE, dds1, bounds[1]) + + # We go to the upper bound plus one so that we have *inclusive* loops -- the upper bound + # is the cell *index*, so we want to make sure we include that cell. This is also why + # we don't need to worry about mi_max being the max index rather than the cell count. + # One additional thing to note is that for all of + # the *internal* cells, i.e., those that are both + # greater than the left edge and less than the + # right edge, we are fully enclosed. + for xex in range(bounds[0][0], bounds[1][0] + 1): + for yex in range(bounds[0][1], bounds[1][1] + 1): + for zex in range(bounds[0][2], bounds[1][2] + 1): + miex1 = encode_morton_64bit(xex, yex, zex) + if mask[miex1] < mask_threshold or \ + particle_counts[miex1] < count_threshold: + continue + # this explicitly requires that it be *between* + # them, not overlapping + if xex > bounds[0][0] and xex < bounds[1][0] and \ + yex > bounds[0][1] and yex < bounds[1][1] and \ + zex > bounds[0][2] and zex < bounds[1][2]: + fully_enclosed = 1 + else: + fully_enclosed = 0 + # Now we need to fill our sub-range + if refined_count[miex1] == 0: + coarse_refined_map[miex1].padWithZeroes(max_mi2_elements) + elif refined_count[miex1] >= max_mi2_elements: + continue + if fully_enclosed == 1: + nfully_enclosed += 1 + coarse_refined_map[miex1].inplace_logicalxor( + coarse_refined_map[miex1]) + coarse_refined_map[miex1].inplace_logicalnot() + refined_count[miex1] = max_mi2_elements + continue + n_calls += 1 + refined_count[miex1] += self.__fill_refined_ranges(s_ppos, radius, LE, RE, + dds1, xex, yex, zex, + dds2, + coarse_refined_map[miex1]) + cdef np.uint64_t vec_i + cdef bool_array *buf = NULL + cdef ewah_word_type w + this_collection = BoolArrayCollection() + cdef ewah_bool_array *refined_arr = NULL + for it1 in coarse_refined_map: + mi1 = it1.first + refined_arr = &this_collection.ewah_coll[0][mi1] + this_collection.ewah_keys[0].set(mi1) + this_collection.ewah_refn[0].set(mi1) + buf = &it1.second + for vec_i in range(buf.sizeInBytes() / sizeof(ewah_word_type)): + w = buf.getWord(vec_i) + refined_arr.addWord(w) + out_collection = BoolArrayCollection() + in_collection._logicalor(this_collection, out_collection) + return out_collection + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef np.int64_t __fill_refined_ranges(self, np.float64_t s_ppos[3], np.float64_t radius, + np.float64_t LE[3], np.float64_t RE[3], + np.float64_t dds1[3], np.uint64_t xex, np.uint64_t yex, np.uint64_t zex, + np.float64_t dds2[3], bool_array &refined_set) except -1: + cdef int i + cdef np.uint64_t new_nsub = 0 + cdef np.uint64_t bounds_l[3], bounds_r[3] + cdef np.uint64_t miex2, miex2_min, miex2_max + cdef np.float64_t clip_pos_l[3] + cdef np.float64_t clip_pos_r[3] + cdef np.float64_t cell_edge_l, cell_edge_r + cdef np.uint64_t ex1[3] + cdef np.uint64_t xiex_min, yiex_min, ziex_min + cdef np.uint64_t xiex_max, yiex_max, ziex_max + ex1[0] = xex + ex1[1] = yex + ex1[2] = zex + # Check a few special cases + for i in range(3): + # Figure out our bounds inside our coarse cell, in the space of the + # full domain + cell_edge_l = ex1[i] * dds1[i] + LE[i] + cell_edge_r = cell_edge_l + dds1[i] + if s_ppos[i] + radius < cell_edge_l or s_ppos[i] - radius > cell_edge_r: + return 0 + clip_pos_l[i] = fmax(s_ppos[i] - radius, cell_edge_l + dds2[i]/2.0) + clip_pos_r[i] = fmin(s_ppos[i] + radius, cell_edge_r - dds2[i]/2.0) + miex2_min = bounded_morton_split_relative_dds(clip_pos_l[0], clip_pos_l[1], clip_pos_l[2], + LE, dds1, dds2, bounds_l) + miex2_max = bounded_morton_split_relative_dds(clip_pos_r[0], clip_pos_r[1], clip_pos_r[2], + LE, dds1, dds2, bounds_r) + xex_max = self.directional_max2[0] + yex_max = self.directional_max2[1] + zex_max = self.directional_max2[2] + xiex_min = miex2_min & xex_max + yiex_min = miex2_min & yex_max + ziex_min = miex2_min & zex_max + xiex_max = miex2_max & xex_max + yiex_max = miex2_max & yex_max + ziex_max = miex2_max & zex_max + # This could *probably* be sped up by iterating over words. + for miex2 in range(miex2_min, miex2_max + 1): + #miex2 = encode_morton_64bit(xex2, yex2, zex2) + #decode_morton_64bit(miex2, ex2) + # Let's check all our cases here + if (miex2 & xex_max) < (xiex_min): continue + if (miex2 & xex_max) > (xiex_max): continue + if (miex2 & yex_max) < (yiex_min): continue + if (miex2 & yex_max) > (yiex_max): continue + if (miex2 & zex_max) < (ziex_min): continue + if (miex2 & zex_max) > (ziex_max): continue + refined_set.set(miex2) + new_nsub += 1 + return refined_set.numberOfOnes() + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def _set_refined_index_data_file(self, + np.ndarray[np.uint64_t, ndim=1] sub_mi1, + np.ndarray[np.uint64_t, ndim=1] sub_mi2, + np.uint64_t file_id, np.int64_t nsub_mi): + return self.__set_refined_index_data_file(sub_mi1, sub_mi2, + file_id, nsub_mi) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void __set_refined_index_data_file(self, + np.ndarray[np.uint64_t, ndim=1] sub_mi1, + np.ndarray[np.uint64_t, ndim=1] sub_mi2, + np.uint64_t file_id, np.int64_t nsub_mi): + cdef np.int64_t i, p + cdef FileBitmasks bitmasks = self.bitmasks + bitmasks._set_refined_index_array(file_id, nsub_mi, sub_mi1, sub_mi2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def find_collisions(self, verbose=False): + cdef tuple cc, rc + cc, rc = self.bitmasks._find_collisions(self.collisions,verbose) + return cc, rc + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def find_collisions_coarse(self, verbose=False, file_list = None): + cdef int nc, nm + nc, nm = self.bitmasks._find_collisions_coarse(self.collisions, verbose, file_list) + return nc, nm + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def find_uncontaminated(self, np.uint32_t ifile, BoolArrayCollection mask, + BoolArrayCollection mask2 = None): + cdef np.ndarray[np.uint8_t, ndim=1] arr = np.zeros((1 << (self.index_order1 * 3)),'uint8') + cdef np.uint8_t[:] arr_view = arr + self.bitmasks._select_uncontaminated(ifile, mask, arr_view, mask2) + return arr + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def find_contaminated(self, np.uint32_t ifile, BoolArrayCollection mask, + BoolArrayCollection mask2 = None): + cdef np.ndarray[np.uint8_t, ndim=1] arr = np.zeros((1 << (self.index_order1 * 3)),'uint8') + cdef np.uint8_t[:] arr_view = arr + cdef np.ndarray[np.uint8_t, ndim=1] sfiles = np.zeros(self.nfiles,'uint8') + cdef np.uint8_t[:] sfiles_view = sfiles + self.bitmasks._select_contaminated(ifile, mask, arr_view, sfiles_view, mask2) + return arr, np.where(sfiles)[0].astype('uint32') + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def find_collisions_refined(self, verbose=False): + cdef np.int32_t nc, nm + nc, nm = self.bitmasks._find_collisions_refined(self.collisions,verbose) + return nc, nm + + def calcsize_bitmasks(self): + # TODO: All cython + cdef bytes serial_BAC + cdef np.uint64_t ifile + cdef int out = 0 + out += struct.calcsize('Q') + # Bitmaps for each file + for ifile in range(self.nfiles): + serial_BAC = self.bitmasks._dumps(ifile) + out += struct.calcsize('Q') + out += len(serial_BAC) + # Bitmap for collisions + serial_BAC = self.collisions._dumps() + out += struct.calcsize('Q') + out += len(serial_BAC) + return out + + def get_bitmasks(self): + return self.bitmasks + + def iseq_bitmask(self, solf): + return self.bitmasks._iseq(solf.get_bitmasks()) + + def save_bitmasks(self, fname): + cdef bytes serial_BAC + cdef np.uint64_t ifile + f = open(fname,'wb') + # Header + f.write(struct.pack('Q', _bitmask_version)) + f.write(struct.pack('q', self.file_hash)) + f.write(struct.pack('Q', self.nfiles)) + # Bitmap for each file + for ifile in range(self.nfiles): + serial_BAC = self.bitmasks._dumps(ifile) + f.write(struct.pack('Q', len(serial_BAC))) + f.write(serial_BAC) + # Collisions + serial_BAC = self.collisions._dumps() + f.write(struct.pack('Q', len(serial_BAC))) + f.write(serial_BAC) + f.close() + + def check_bitmasks(self): + return self.bitmasks._check() + + def reset_bitmasks(self): + self.bitmasks._reset() + + def load_bitmasks(self, fname): + cdef bint read_flag = 1 + cdef bint irflag + cdef np.uint64_t ver + cdef np.uint64_t nfiles = 0 + cdef np.int64_t file_hash + cdef np.uint64_t size_serial + cdef bint overwrite = 0 + # Verify that file is correct version + if not os.path.isfile(fname): + raise OSError + f = open(fname,'rb') + ver, = struct.unpack('Q',f.read(struct.calcsize('Q'))) + if ver == self.nfiles and ver != _bitmask_version: + overwrite = 1 + nfiles = ver + ver = 0 # Original bitmaps had number of files first + if ver != _bitmask_version: + raise OSError("The file format of the index has changed since " + "this file was created. It will be replaced with an " + "updated version.") + # Read file hash + file_hash, = struct.unpack('q', f.read(struct.calcsize('q'))) + if file_hash != self.file_hash: + raise OSError + # Read number of bitmaps + if nfiles == 0: + nfiles, = struct.unpack('Q', f.read(struct.calcsize('Q'))) + if nfiles != self.nfiles: + raise OSError( + "Number of bitmasks ({}) conflicts with number of files " + "({})".format(nfiles, self.nfiles)) + # Read bitmap for each file + pb = get_pbar("Loading particle index", nfiles) + for ifile in range(nfiles): + pb.update(ifile) + size_serial, = struct.unpack('Q', f.read(struct.calcsize('Q'))) + irflag = self.bitmasks._loads(ifile, f.read(size_serial)) + if irflag == 0: + read_flag = 0 + pb.finish() + # Collisions + size_serial, = struct.unpack('Q',f.read(struct.calcsize('Q'))) + irflag = self.collisions._loads(f.read(size_serial)) + f.close() + # Save in correct format + if overwrite == 1: + self.save_bitmasks(fname) + return read_flag + + def print_info(self): + cdef np.uint64_t ifile + for ifile in range(self.nfiles): + self.bitmasks.print_info(ifile, "File: %03d" % ifile) + + def count_coarse(self, ifile): + r"""Get the number of coarse cells set for a file.""" + return self.bitmasks.count_coarse(ifile) + + def count_refined(self, ifile): + r"""Get the number of cells refined for a file.""" + return self.bitmasks.count_refined(ifile) + + def count_total(self, ifile): + r"""Get the total number of cells set for a file.""" + return self.bitmasks.count_total(ifile) + + def check(self): + cdef np.uint64_t mi1 + cdef ewah_bool_array arr_totref, arr_tottwo + cdef ewah_bool_array arr, arr_any, arr_two, arr_swap + cdef vector[size_t] vec_totref + cdef vector[size_t].iterator it_mi1 + cdef int nm = 0, nc = 0 + cdef np.uint64_t ifile, nbitmasks + nbitmasks = len(self.bitmasks) + # Locate all indices with second level refinement + for ifile in range(self.nfiles): + arr = ( self.bitmasks.ewah_refn)[ifile][0] + arr_totref.logicalor(arr,arr_totref) + # Count collections & second level indices + vec_totref = arr_totref.toArray() + it_mi1 = vec_totref.begin() + while it_mi1 != vec_totref.end(): + mi1 = dereference(it_mi1) + arr_any.reset() + arr_two.reset() + for ifile in range(nbitmasks): + if self.bitmasks._isref(ifile, mi1) == 1: + arr = ( self.bitmasks.ewah_coll)[ifile][0][mi1] + arr_any.logicaland(arr, arr_two) # Indices in previous files + arr_any.logicalor(arr, arr_swap) # All second level indices + arr_any = arr_swap + arr_two.logicalor(arr_tottwo,arr_tottwo) + nc += arr_tottwo.numberOfOnes() + nm += arr_any.numberOfOnes() + preincrement(it_mi1) + # nc: total number of second level morton indices that are repeated + # nm: total number of second level morton indices + print("Total of %s / %s collisions (% 3.5f%%)" % (nc, nm, 100.0*float(nc)/nm)) + + def primary_indices(self): + mi = ( self.collisions.ewah_keys)[0].toArray() + return np.array(mi,'uint64') + + def file_ownership_mask(self, fid): + cdef BoolArrayCollection out + out = self.bitmasks._get_bitmask( fid) + return out + + def finalize(self): + return + # self.index_octree = ParticleOctreeContainer([1,1,1], + # [self.left_edge[0], self.left_edge[1], self.left_edge[2]], + # [self.right_edge[0], self.right_edge[1], self.right_edge[2]], + # over_refine = 0 + # ) + # self.index_octree.n_ref = 1 + # mi = ( self.collisions.ewah_keys)[0].toArray() + # Change from vector to numpy + # mi = mi.astype("uint64") + # self.index_octree.add(mi, self.index_order1) + # self.index_octree.finalize() + + def get_DLE(self): + cdef int i + cdef np.ndarray[np.float64_t, ndim=1] DLE + DLE = np.zeros(3, dtype='float64') + for i in range(3): + DLE[i] = self.left_edge[i] + return DLE + def get_DRE(self): + cdef int i + cdef np.ndarray[np.float64_t, ndim=1] DRE + DRE = np.zeros(3, dtype='float64') + for i in range(3): + DRE[i] = self.right_edge[i] + return DRE + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def get_ghost_zones(self, SelectorObject selector, int ngz, + BoolArrayCollection dmask = None, bint coarse_ghosts = False): + cdef BoolArrayCollection gmask, gmask2, out + cdef np.ndarray[np.uint8_t, ndim=1] periodic = selector.get_periodicity() + cdef bint periodicity[3] + cdef int i + for i in range(3): + periodicity[i] = periodic[i] + if dmask is None: + dmask = BoolArrayCollection() + gmask2 = BoolArrayCollection() + morton_selector = ParticleBitmapSelector(selector,self,ngz=0) + morton_selector.fill_masks(dmask, gmask2) + gmask = BoolArrayCollection() + dmask._get_ghost_zones(ngz, self.index_order1, self.index_order2, + periodicity, gmask, coarse_ghosts) + dfiles, gfiles = self.masks_to_files(dmask, gmask) + out = BoolArrayCollection() + gmask._logicalor(dmask, out) + return gfiles, out + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def selector2mask(self, SelectorObject selector): + cdef BoolArrayCollection cmask = BoolArrayCollection() + cdef ParticleBitmapSelector morton_selector + morton_selector = ParticleBitmapSelector(selector,self,ngz=0) + morton_selector.fill_masks(cmask) + return cmask + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def mask2files(self, BoolArrayCollection cmask): + cdef np.ndarray[np.uint32_t, ndim=1] file_idx + file_idx = self.mask_to_files(cmask) + return file_idx + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def mask2filemasks(self, BoolArrayCollection cmask, np.ndarray[np.uint32_t, ndim=1] file_idx): + cdef BoolArrayCollection fmask + cdef np.int32_t fid + cdef np.ndarray[object, ndim=1] file_masks + cdef int i + # Get bitmasks for parts of files touching the selector + file_masks = np.array([BoolArrayCollection() for i in range(len(file_idx))], + dtype="object") + for i, (fid, fmask) in enumerate(zip(file_idx,file_masks)): + self.bitmasks._logicaland( fid, cmask, fmask) + return file_masks + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def filemasks2addfiles(self, np.ndarray[object, ndim=1] file_masks): + cdef list addfile_idx + addfile_idx = len(file_masks)*[None] + for i, fmask in enumerate(file_masks): + addfile_idx[i] = self.mask_to_files(fmask).astype('uint32') + return addfile_idx + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def identify_file_masks(self, SelectorObject selector): + cdef BoolArrayCollection cmask = BoolArrayCollection() + cdef BoolArrayCollection fmask + cdef np.int32_t fid + cdef np.ndarray[object, ndim=1] file_masks + cdef np.ndarray[np.uint32_t, ndim=1] file_idx + cdef list addfile_idx + # Get bitmask for selector + cdef ParticleBitmapSelector morton_selector + morton_selector = ParticleBitmapSelector(selector, self, ngz=0) + morton_selector.fill_masks(cmask) + # Get bitmasks for parts of files touching the selector + file_idx = self.mask_to_files(cmask) + file_masks = np.array([BoolArrayCollection() for i in range(len(file_idx))], + dtype="object") + addfile_idx = len(file_idx)*[None] + for i, (fid, fmask) in enumerate(zip(file_idx,file_masks)): + self.bitmasks._logicaland( fid, cmask, fmask) + addfile_idx[i] = self.mask_to_files(fmask).astype('uint32') + return file_idx.astype('uint32'), file_masks, addfile_idx + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def identify_data_files(self, SelectorObject selector, int ngz = 0): + cdef BoolArrayCollection cmask_s = BoolArrayCollection() + cdef BoolArrayCollection cmask_g = BoolArrayCollection() + # Find mask of selected morton indices + cdef ParticleBitmapSelector morton_selector + morton_selector = ParticleBitmapSelector(selector, self, ngz=ngz) + morton_selector.fill_masks(cmask_s, cmask_g) + return self.masks_to_files(cmask_s, cmask_g), (cmask_s, cmask_g) + + def mask_to_files(self, BoolArrayCollection mm_s): + cdef FileBitmasks mm_d = self.bitmasks + cdef np.uint32_t ifile + cdef np.ndarray[np.uint8_t, ndim=1] file_mask_p + file_mask_p = np.zeros(self.nfiles, dtype="uint8") + # Compare with mask of particles + for ifile in range(self.nfiles): + # Only continue if the file is not already selected + if file_mask_p[ifile] == 0: + if mm_d._intersects(ifile, mm_s): + file_mask_p[ifile] = 1 + cdef np.ndarray[np.int32_t, ndim=1] file_idx_p + file_idx_p = np.where(file_mask_p)[0].astype('int32') + return file_idx_p.astype('uint32') + + def masks_to_files(self, BoolArrayCollection mm_s, BoolArrayCollection mm_g): + cdef FileBitmasks mm_d = self.bitmasks + cdef np.uint32_t ifile + cdef np.ndarray[np.uint8_t, ndim=1] file_mask_p + cdef np.ndarray[np.uint8_t, ndim=1] file_mask_g + file_mask_p = np.zeros(self.nfiles, dtype="uint8") + file_mask_g = np.zeros(self.nfiles, dtype="uint8") + # Compare with mask of particles + for ifile in range(self.nfiles): + # Only continue if the file is not already selected + if file_mask_p[ifile] == 0: + if mm_d._intersects(ifile, mm_s): + file_mask_p[ifile] = 1 + file_mask_g[ifile] = 0 # No intersection + elif mm_d._intersects(ifile, mm_g): + file_mask_g[ifile] = 1 + cdef np.ndarray[np.int32_t, ndim=1] file_idx_p + cdef np.ndarray[np.int32_t, ndim=1] file_idx_g + file_idx_p = np.where(file_mask_p)[0].astype('int32') + file_idx_g = np.where(file_mask_g)[0].astype('int32') + return file_idx_p.astype('uint32'), file_idx_g.astype('uint32') + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def construct_octree(self, index, io_handler, data_files, + over_refine_factor, + BoolArrayCollection selector_mask, + BoolArrayCollection base_mask = None): + cdef np.uint64_t total_pcount + cdef np.uint64_t i, j, k, n + cdef int ind[3] + cdef np.uint64_t ind64[3] + cdef ParticleBitmapOctreeContainer octree + cdef np.uint64_t mi, mi_root + cdef np.ndarray pos + cdef np.ndarray[np.float32_t, ndim=2] pos32 + cdef np.ndarray[np.float64_t, ndim=2] pos64 + cdef np.float64_t ppos[3] + cdef np.float64_t DLE[3] + cdef np.float64_t DRE[3] + cdef int bitsize = 0 + for i in range(3): + DLE[i] = self.left_edge[i] + DRE[i] = self.right_edge[i] + cdef np.ndarray[np.uint64_t, ndim=1] morton_ind + # Determine cells that need to be added to the octree + cdef np.ndarray[np.uint32_t, ndim=1] file_idx_p + cdef np.ndarray[np.uint32_t, ndim=1] file_idx_g + cdef np.uint64_t nroot = selector_mask._count_total() + # Now we can actually create a sparse octree. + octree = ParticleBitmapOctreeContainer( + (self.dims[0], self.dims[1], self.dims[2]), + (self.left_edge[0], self.left_edge[1], self.left_edge[2]), + (self.right_edge[0], self.right_edge[1], self.right_edge[2]), + nroot, over_refine_factor) + octree.n_ref = index.dataset.n_ref + octree.level_offset = self.index_order1 + octree.allocate_domains() + # Add roots based on the mask + cdef np.uint64_t croot = 0 + cdef ewah_bool_array *ewah_slct = selector_mask.ewah_keys + cdef ewah_bool_array *ewah_base + if base_mask is not None: + ewah_base = base_mask.ewah_keys + else: + ewah_base = NULL + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct[0].begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct[0].end()) + cdef np.ndarray[np.uint8_t, ndim=1] slct_arr + slct_arr = np.zeros((1 << (self.index_order1 * 3)),'uint8') + while iter_set[0] != iter_end[0]: + mi = dereference(iter_set[0]) + if ewah_base != NULL and ewah_base[0].get(mi) == 0: + octree._index_base_roots[croot] = 0 + slct_arr[mi] = 2 + else: + slct_arr[mi] = 1 + decode_morton_64bit(mi, ind64) + for j in range(3): + ind[j] = ind64[j] + octree.next_root(1, ind) + croot += 1 + preincrement(iter_set[0]) + assert(croot == nroot) + if ewah_base != NULL: + assert(np.sum(octree._index_base_roots) == ewah_base[0].numberOfOnes()) + # Get morton indices for all particles in this file and those + # contaminating cells it has majority control of. + files_touched = data_files #+ buffer_files # datafile object from ID goes here + total_pcount = 0 + for data_file in files_touched: + total_pcount += sum(data_file.total_particles.values()) + morton_ind = np.empty(total_pcount, dtype='uint64') + total_pcount = 0 + cdef np.uint64_t base_pcount = 0 + for data_file in files_touched: + # We now get our particle positions + for pos in io_handler._yield_coordinates(data_file): + pos32 = pos64 = None + bitsize = 0 + if pos.dtype == np.float32: + pos32 = pos + bitsize = 32 + for j in range(pos.shape[0]): + for k in range(3): + ppos[k] = pos32[j,k] + mi = bounded_morton(ppos[0], ppos[1], ppos[2], + DLE, DRE, ORDER_MAX) + mi_root = mi >> (3*(ORDER_MAX-self.index_order1)) + if slct_arr[mi_root] > 0: + morton_ind[total_pcount] = mi + total_pcount += 1 + if slct_arr[mi_root] == 1: + base_pcount += 1 + elif pos.dtype == np.float64: + pos64 = pos + bitsize = 64 + for j in range(pos.shape[0]): + for k in range(3): + ppos[k] = pos64[j,k] + mi = bounded_morton(ppos[0], ppos[1], ppos[2], + DLE, DRE, ORDER_MAX) + mi_root = mi >> (3*(ORDER_MAX-self.index_order1)) + if slct_arr[mi_root] > 0: + morton_ind[total_pcount] = mi + total_pcount += 1 + if slct_arr[mi_root] == 1: + base_pcount += 1 + else: + raise RuntimeError + morton_ind = morton_ind[:total_pcount] + morton_ind.sort() + octree.add(morton_ind, self.index_order1) + octree.finalize() + return octree + +cdef class ParticleBitmapSelector: + cdef SelectorObject selector + cdef ParticleBitmap bitmap + cdef np.uint32_t ngz + cdef np.float64_t DLE[3] + cdef np.float64_t DRE[3] + cdef bint periodicity[3] + cdef np.uint32_t order1 + cdef np.uint32_t order2 + cdef np.uint64_t max_index1 + cdef np.uint64_t max_index2 + cdef np.uint64_t s1 + cdef np.uint64_t s2 + cdef void* pointers[11] + cdef np.uint64_t[:,:] ind1_n + cdef np.uint64_t[:,:] ind2_n + cdef np.uint32_t[:,:] neighbors + cdef np.uint64_t[:] neighbor_list1 + cdef np.uint64_t[:] neighbor_list2 + cdef np.uint32_t nfiles + cdef np.uint8_t[:] file_mask_p + cdef np.uint8_t[:] file_mask_g + # Uncompressed boolean + cdef np.uint8_t[:] refined_select_bool + cdef np.uint8_t[:] refined_ghosts_bool + cdef np.uint8_t[:] coarse_select_bool + cdef np.uint8_t[:] coarse_ghosts_bool + cdef SparseUnorderedRefinedBitmask refined_ghosts_list + cdef BoolArrayColl select_ewah + cdef BoolArrayColl ghosts_ewah + + def __cinit__(self, selector, bitmap, ngz=0): + cdef int i + cdef np.ndarray[np.uint8_t, ndim=1] periodicity = np.zeros(3, dtype='uint8') + cdef np.ndarray[np.float64_t, ndim=1] DLE = np.zeros(3, dtype='float64') + cdef np.ndarray[np.float64_t, ndim=1] DRE = np.zeros(3, dtype='float64') + + self.selector = selector + self.bitmap = bitmap + self.ngz = ngz + # Things from the bitmap & selector + periodicity = selector.get_periodicity() + DLE = bitmap.get_DLE() + DRE = bitmap.get_DRE() + for i in range(3): + self.DLE[i] = DLE[i] + self.DRE[i] = DRE[i] + self.periodicity[i] = periodicity[i] + self.order1 = bitmap.index_order1 + self.order2 = bitmap.index_order2 + self.nfiles = bitmap.nfiles + self.max_index1 = (1 << self.order1) + self.max_index2 = (1 << self.order2) + self.s1 = (1 << (self.order1*3)) + self.s2 = (1 << (self.order2*3)) + + self.neighbors = np.zeros((2*ngz+1, 3), dtype='uint32') + self.ind1_n = np.zeros((2*ngz+1, 3), dtype='uint64') + self.ind2_n = np.zeros((2*ngz+1, 3), dtype='uint64') + self.neighbor_list1 = np.zeros((2*ngz+1)**3, dtype='uint64') + self.neighbor_list2 = np.zeros((2*ngz+1)**3, dtype='uint64') + self.file_mask_p = np.zeros(bitmap.nfiles, dtype='uint8') + self.file_mask_g = np.zeros(bitmap.nfiles, dtype='uint8') + + self.refined_select_bool = np.zeros(self.s2, 'uint8') + self.refined_ghosts_bool = np.zeros(self.s2, 'uint8') + self.coarse_select_bool = np.zeros(self.s1, 'uint8') + self.coarse_ghosts_bool = np.zeros(self.s1, 'uint8') + + self.refined_ghosts_list = SparseUnorderedRefinedBitmask() + self.select_ewah = BoolArrayColl(self.s1, self.s2) + self.ghosts_ewah = BoolArrayColl(self.s1, self.s2) + + def fill_masks(self, BoolArrayCollection mm_s, BoolArrayCollection mm_g = None): + # Normal variables + cdef int i + cdef np.int32_t level = 0 + cdef np.uint64_t mi1 + mi1 = ~(0) + cdef np.float64_t pos[3] + cdef np.float64_t dds[3] + cdef np.uint64_t cur_ind[3] + for i in range(3): + cur_ind[i] = 0 + pos[i] = self.DLE[i] + dds[i] = self.DRE[i] - self.DLE[i] + if mm_g is None: + mm_g = BoolArrayCollection() + # Uncompressed version + cdef BoolArrayColl mm_s0 + cdef BoolArrayColl mm_g0 + mm_s0 = BoolArrayColl(self.s1, self.s2) + mm_g0 = BoolArrayColl(self.s1, self.s2) + # Recurse + cdef np.float64_t rpos[3] + for i in range(3): + rpos[i] = self.DRE[i] - self.bitmap.dds_mi2[i]/2.0 + sbbox = self.selector.select_bbox_edge(pos, rpos) + if sbbox == 1: + for mi1 in range(self.s1): + mm_s0._set_coarse(mi1) + mm_s0._compress(mm_s) + return + else: + self.recursive_morton_mask(level, pos, dds, mi1, cur_ind) + # Set coarse morton indices in order + self.set_coarse_bool(mm_s0, mm_g0) + self.set_refined_list(mm_s0, mm_g0) + self.set_refined_bool(mm_s0, mm_g0) + # Compress + mm_s0._compress(mm_s) + mm_g0._compress(mm_g) + + def find_files(self, + np.ndarray[np.uint8_t, ndim=1] file_mask_p, + np.ndarray[np.uint8_t, ndim=1] file_mask_g): + cdef np.uint64_t i + cdef np.int32_t level = 0 + cdef np.uint64_t mi1 + mi1 = ~(0) + cdef np.float64_t pos[3] + cdef np.float64_t dds[3] + for i in range(3): + pos[i] = self.DLE[i] + dds[i] = self.DRE[i] - self.DLE[i] + # Fill with input + for i in range(self.nfiles): + self.file_mask_p[i] = file_mask_p[i] + self.file_mask_g[i] = file_mask_g[i] + # Recurse + self.recursive_morton_files(level, pos, dds, mi1) + # Fill with results + for i in range(self.nfiles): + file_mask_p[i] = self.file_mask_p[i] + if file_mask_p[i]: + file_mask_g[i] = 0 + else: + file_mask_g[i] = self.file_mask_g[i] + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef bint is_refined(self, np.uint64_t mi1): + return self.bitmap.collisions._isref(mi1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef bint is_refined_files(self, np.uint64_t mi1): + cdef np.uint64_t i + if self.bitmap.collisions._isref(mi1): + # Don't refine if files all selected already + for i in range(self.nfiles): + if self.file_mask_p[i] == 0: + if self.bitmap.bitmasks._isref(i, mi1) == 1: + return 1 + return 0 + else: + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void add_coarse(self, np.uint64_t mi1, int bbox = 2): + self.coarse_select_bool[mi1] = 1 + # Neighbors + if (self.ngz > 0) and (bbox == 2): + if not self.is_refined(mi1): + self.add_neighbors_coarse(mi1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void set_files_coarse(self, np.uint64_t mi1): + cdef np.uint64_t i + cdef bint flag_ref = self.is_refined(mi1) + # Flag files at coarse level + if flag_ref == 0: + for i in range(self.nfiles): + if self.file_mask_p[i] == 0: + if self.bitmap.bitmasks._get_coarse(i, mi1) == 1: + self.file_mask_p[i] = 1 + # Neighbors + if (flag_ref == 0) and (self.ngz > 0): + self.set_files_neighbors_coarse(mi1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int add_refined(self, np.uint64_t mi1, np.uint64_t mi2, int bbox = 2) except -1: + self.refined_select_bool[mi2] = 1 + # Neighbors + if (self.ngz > 0) and (bbox == 2): + self.add_neighbors_refined(mi1, mi2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void set_files_refined(self, np.uint64_t mi1, np.uint64_t mi2): + cdef np.uint64_t i + # Flag files + for i in range(self.nfiles): + if self.file_mask_p[i] == 0: + if self.bitmap.bitmasks._get(i, mi1, mi2): + self.file_mask_p[i] = 1 + # Neighbors + if (self.ngz > 0): + self.set_files_neighbors_refined(mi1, mi2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void add_neighbors_coarse(self, np.uint64_t mi1): + cdef np.uint64_t m + cdef np.uint32_t ntot + cdef np.uint64_t mi1_n + ntot = morton_neighbors_coarse(mi1, self.max_index1, + self.periodicity, + self.ngz, self.neighbors, + self.ind1_n, self.neighbor_list1) + for m in range(ntot): + mi1_n = self.neighbor_list1[m] + self.coarse_ghosts_bool[mi1_n] = 1 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void set_files_neighbors_coarse(self, np.uint64_t mi1): + cdef np.uint64_t i, m + cdef np.uint32_t ntot + cdef np.uint64_t mi1_n + ntot = morton_neighbors_coarse(mi1, self.max_index1, + self.periodicity, + self.ngz, self.neighbors, + self.ind1_n, self.neighbor_list1) + for m in range(ntot): + mi1_n = self.neighbor_list1[m] + for i in range(self.nfiles): + if self.file_mask_g[i] == 0: + if self.bitmap.bitmasks._get_coarse(i, mi1_n): + self.file_mask_g[i] = 1 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void add_neighbors_refined(self, np.uint64_t mi1, np.uint64_t mi2): + cdef int m + cdef np.uint32_t ntot + cdef np.uint64_t mi1_n, mi2_n + ntot = morton_neighbors_refined(mi1, mi2, + self.max_index1, self.max_index2, + self.periodicity, self.ngz, + self.neighbors, self.ind1_n, self.ind2_n, + self.neighbor_list1, self.neighbor_list2) + for m in range(ntot): + mi1_n = self.neighbor_list1[m] + mi2_n = self.neighbor_list2[m] + self.coarse_ghosts_bool[mi1_n] = 1 + IF RefinedExternalGhosts == 1: + if mi1_n == mi1: + self.refined_ghosts_bool[mi2_n] = 1 + else: + self.refined_ghosts_list._set(mi1_n, mi2_n) + ELSE: + if mi1_n == mi1: + self.refined_ghosts_bool[mi2_n] = 1 + elif self.is_refined(mi1_n) == 1: + self.refined_ghosts_list._set(mi1_n, mi2_n) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void set_files_neighbors_refined(self, np.uint64_t mi1, np.uint64_t mi2): + cdef int i, m + cdef np.uint32_t ntot + cdef np.uint64_t mi1_n, mi2_n + ntot = morton_neighbors_refined(mi1, mi2, + self.max_index1, self.max_index2, + self.periodicity, self.ngz, + self.neighbors, self.ind1_n, self.ind2_n, + self.neighbor_list1, self.neighbor_list2) + for m in range(ntot): + mi1_n = self.neighbor_list1[m] + mi2_n = self.neighbor_list2[m] + if self.is_refined(mi1_n) == 1: + for i in range(self.nfiles): + if self.file_mask_g[i] == 0: + if self.bitmap.bitmasks._get(i, mi1_n, mi2_n) == 1: + self.file_mask_g[i] = 1 + else: + for i in range(self.nfiles): + if self.file_mask_g[i] == 0: + if self.bitmap.bitmasks._get_coarse(i, mi1_n) == 1: + self.file_mask_g[i] = 1 + break # If not refined, only one file should be selected + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void set_coarse_list(self, BoolArrayColl mm_s, BoolArrayColl mm_g): + self.coarse_select_list._fill_bool(mm_s) + self.coarse_ghosts_list._fill_bool(mm_g) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void set_refined_list(self, BoolArrayColl mm_s, BoolArrayColl mm_g): + self.refined_ghosts_list._fill_bool(mm_g) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void set_coarse_bool(self, BoolArrayColl mm_s, BoolArrayColl mm_g): + cdef np.uint64_t mi1 + mm_s._set_coarse_array_ptr(&self.coarse_select_bool[0]) + for mi1 in range(self.s1): + self.coarse_select_bool[mi1] = 0 + mm_g._set_coarse_array_ptr(&self.coarse_ghosts_bool[0]) + for mi1 in range(self.s1): + self.coarse_ghosts_bool[mi1] = 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void set_refined_bool(self, BoolArrayColl mm_s, BoolArrayColl mm_g): + mm_s._append(self.select_ewah) + mm_g._append(self.ghosts_ewah) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void push_refined_bool(self, np.uint64_t mi1): + cdef np.uint64_t mi2 + self.select_ewah._set_refined_array_ptr(mi1, &self.refined_select_bool[0]) + for mi2 in range(self.s2): + self.refined_select_bool[mi2] = 0 + self.ghosts_ewah._set_refined_array_ptr(mi1, &self.refined_ghosts_bool[0]) + for mi2 in range(self.s2): + self.refined_ghosts_bool[mi2] = 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void add_ghost_zones(self, BoolArrayColl mm_s, BoolArrayColl mm_g): + cdef np.uint64_t mi1, mi2, mi1_n, mi2_n + # Get ghost zones, unordered + for mi1 in range(self.s1): + if mm_s._get_coarse(mi1): + if self.is_refined(mi1): + for mi2 in range(self.s2): + if mm_s._get(mi1, mi2): + self.add_neighbors_refined(mi1, mi2) + # self.push_refined_bool(mi1) + self.ghosts_ewah._set_refined_array_ptr(mi1, + &self.refined_ghosts_bool[0]) + for mi2 in range(self.s2): + self.refined_ghosts_bool[mi2] = 0 + else: + self.add_neighbors_coarse(mi1) + # Add ghost zones to bool array in order + mm_g._set_coarse_array_ptr(&self.coarse_ghosts_bool[0]) + for mi1 in range(self.s1): + self.coarse_ghosts_bool[mi1] = 0 + self.refined_ghosts_list._fill_bool(mm_g) + mm_g._append(self.ghosts_ewah) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int fill_subcells_mi1(self, + np.uint64_t nlevel, + np.uint64_t ind1[3]) except -1: + cdef np.uint64_t imi, fmi + cdef np.uint64_t mi + cdef np.uint64_t shift_by = 3 * (self.bitmap.index_order1 - nlevel) + imi = encode_morton_64bit(ind1[0], ind1[1], ind1[2]) << shift_by + fmi = imi + (1 << shift_by) + for mi in range(imi, fmi): + self.add_coarse(mi, 1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int fill_subcells_mi2(self, + np.uint64_t nlevel, + np.uint64_t mi1, + np.uint64_t ind2[3]) except -1: + cdef np.uint64_t imi, fmi + cdef np.uint64_t shift_by = 3 * ((self.bitmap.index_order2 + + self.bitmap.index_order1) - nlevel) + imi = encode_morton_64bit(ind2[0], ind2[1], ind2[2]) << shift_by + fmi = imi + (1 << shift_by) + for mi2 in range(imi, fmi): + self.add_refined(mi1, mi2, 1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int recursive_morton_mask( + self, np.int32_t level, np.float64_t pos[3], + np.float64_t dds[3], np.uint64_t mi1, np.uint64_t cur_ind[3]) except -1: + cdef np.uint64_t mi2 + cdef np.float64_t npos[3] + cdef np.float64_t rpos[3] + cdef np.float64_t ndds[3] + cdef np.uint64_t nlevel + cdef np.uint64_t ind1[3] + cdef np.uint64_t ind2[3] + cdef np.uint64_t ncur_ind[3] + cdef np.uint64_t* zeros = [0, 0, 0] + cdef int i, j, k, m, sbbox + PyErr_CheckSignals() + for i in range(3): + ndds[i] = dds[i]/2 + nlevel = level + 1 + # Loop over octs + for i in range(2): + npos[0] = pos[0] + i*ndds[0] + rpos[0] = npos[0] + ndds[0] + ncur_ind[0] = (cur_ind[0] << 1) + i + for j in range(2): + npos[1] = pos[1] + j*ndds[1] + rpos[1] = npos[1] + ndds[1] + ncur_ind[1] = (cur_ind[1] << 1) + j + for k in range(2): + npos[2] = pos[2] + k*ndds[2] + rpos[2] = npos[2] + ndds[2] + ncur_ind[2] = (cur_ind[2] << 1) + k + # Only recurse into selected cells + sbbox = self.selector.select_bbox_edge(npos, rpos) + if sbbox == 0: + continue + if nlevel < self.order1: + if sbbox == 1: + self.fill_subcells_mi1(nlevel, ncur_ind) + else: + self.recursive_morton_mask( + nlevel, npos, ndds, mi1, ncur_ind) + elif nlevel == self.order1: + mi1 = encode_morton_64bit( + ncur_ind[0], ncur_ind[1], ncur_ind[2]) + if sbbox == 2: # an edge cell + if self.is_refined(mi1) == 1: + # note we pass zeros here in the last argument + # this is because we now need to generate + # *refined* indices above order1 so we need to + # start a new running count of refined indices. + # + # note that recursive_morton_mask does not + # mutate the last argument (a new index is + # calculated in each stack frame) so this is + # safe + self.recursive_morton_mask( + nlevel, npos, ndds, mi1, zeros) + self.add_coarse(mi1, sbbox) + self.push_refined_bool(mi1) + elif nlevel < (self.order1 + self.order2): + if sbbox == 1: + self.fill_subcells_mi2(nlevel, mi1, ncur_ind) + else: + self.recursive_morton_mask( + nlevel, npos, ndds, mi1, ncur_ind) + elif nlevel == (self.order1 + self.order2): + mi2 = encode_morton_64bit( + ncur_ind[0], ncur_ind[1], ncur_ind[2]) + self.add_refined(mi1, mi2, sbbox) + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void recursive_morton_files(self, np.int32_t level, np.float64_t pos[3], + np.float64_t dds[3], np.uint64_t mi1): + cdef np.uint64_t mi2 + cdef np.float64_t npos[3] + cdef np.float64_t rpos[3] + cdef np.float64_t ndds[3] + cdef np.uint64_t nlevel + cdef np.float64_t DLE[3] + cdef np.uint64_t ind1[3] + cdef np.uint64_t ind2[3] + cdef int i, j, k, m + for i in range(3): + ndds[i] = dds[i]/2 + nlevel = level + 1 + # Loop over octs + for i in range(2): + npos[0] = pos[0] + i*ndds[0] + rpos[0] = npos[0] + ndds[0] + for j in range(2): + npos[1] = pos[1] + j*ndds[1] + rpos[1] = npos[1] + ndds[1] + for k in range(2): + npos[2] = pos[2] + k*ndds[2] + rpos[2] = npos[2] + ndds[2] + # Only recurse into selected cells + if not self.selector.select_bbox(npos, rpos): continue + if nlevel < self.order1: + self.recursive_morton_files(nlevel, npos, ndds, mi1) + elif nlevel == self.order1: + mi1 = bounded_morton_dds(npos[0], npos[1], npos[2], self.DLE, ndds) + if self.is_refined_files(mi1): + self.recursive_morton_files(nlevel, npos, ndds, mi1) + self.set_files_coarse(mi1) + elif nlevel < (self.order1 + self.order2): + self.recursive_morton_files(nlevel, npos, ndds, mi1) + elif nlevel == (self.order1 + self.order2): + decode_morton_64bit(mi1,ind1) + for m in range(3): + DLE[m] = self.DLE[m] + ndds[m]*ind1[m]*self.max_index2 + mi2 = bounded_morton_dds(npos[0], npos[1], npos[2], DLE, ndds) + self.set_files_refined(mi1,mi2) + +cdef class ParticleBitmapOctreeContainer(SparseOctreeContainer): + cdef Oct** oct_list + cdef public int max_level + cdef public int n_ref + cdef int loaded # Loaded with load_octree? + cdef np.uint8_t* _ptr_index_base_roots + cdef np.uint8_t* _ptr_index_base_octs + cdef np.uint64_t* _ptr_octs_per_root + cdef public np.uint8_t[:] _index_base_roots + cdef public np.uint8_t[:] _index_base_octs + cdef np.uint64_t[:] _octs_per_root + cdef public int overlap_cells + def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge, + int num_root, over_refine = 1): + super(ParticleBitmapOctreeContainer, self).__init__( + domain_dimensions, domain_left_edge, domain_right_edge, + over_refine) + self.loaded = 0 + self.fill_style = "o" + self.partial_coverage = 2 + self.overlap_cells = 0 + # Now the overrides + self.max_level = -1 + self.max_root = num_root + self.root_nodes = malloc(sizeof(OctKey) * num_root) + self._ptr_index_base_roots = malloc(sizeof(np.uint8_t) * num_root) + self._ptr_octs_per_root = malloc(sizeof(np.uint64_t) * num_root) + for i in range(num_root): + self.root_nodes[i].key = -1 + self.root_nodes[i].node = NULL + self._ptr_index_base_roots[i] = 1 + self._ptr_octs_per_root[i] = 0 + self._index_base_roots = self._ptr_index_base_roots + self._octs_per_root = self._ptr_octs_per_root + + def allocate_domains(self, counts = None): + if counts is None: + counts = [self.max_root] + OctreeContainer.allocate_domains(self, counts) + + def finalize(self): + # Assign domain ind + cdef SelectorObject selector = AlwaysSelector(None) + selector.overlap_cells = self.overlap_cells + cdef oct_visitors.AssignDomainInd visitor + visitor = oct_visitors.AssignDomainInd(self) + self.visit_all_octs(selector, visitor) + assert ((visitor.global_index+1)*visitor.nz == visitor.index) + # Copy indexes + self._ptr_index_base_octs = malloc(sizeof(np.uint8_t)*self.nocts) + self._index_base_octs = self._ptr_index_base_octs + cdef np.int64_t nprev_octs = 0 + cdef int i + for i in range(self.num_root): + self._index_base_octs[nprev_octs:(nprev_octs+self._octs_per_root[i])] = self._index_base_roots[i] + nprev_octs += self._octs_per_root[i] + + cdef visit_assign(self, Oct *o, np.int64_t *lpos, int level, int *max_level, + np.int64_t index_root): + cdef int i, j, k + if o.children == NULL: + self.oct_list[lpos[0]] = o + self._index_base_octs[lpos[0]] = self._index_base_roots[index_root] + lpos[0] += 1 + max_level[0] = imax(max_level[0], level) + for i in range(2): + for j in range(2): + for k in range(2): + if o.children != NULL \ + and o.children[cind(i,j,k)] != NULL: + self.visit_assign(o.children[cind(i,j,k)], lpos, + level + 1, max_level, index_root) + return + + cdef Oct* allocate_oct(self): + #Allocate the memory, set to NULL or -1 + #We reserve space for n_ref particles, but keep + #track of how many are used with np initially 0 + self.nocts += 1 + cdef Oct *my_oct = malloc(sizeof(Oct)) + my_oct.domain = -1 + my_oct.file_ind = 0 + my_oct.domain_ind = self.nocts - 1 + my_oct.children = NULL + return my_oct + + def get_index_base_octs(self, np.int64_t[:] domain_ind): + cdef np.int64_t ndst = np.max(domain_ind) + 1 + ind = np.zeros(ndst, 'int64') - 1 + self._get_index_base_octs(ind, domain_ind) + return ind[ind >= 0] + + cdef void _get_index_base_octs(self, np.int64_t[:] ind, np.int64_t[:] domain_ind): + cdef SelectorObject selector = AlwaysSelector(None) + selector.overlap_cells = self.overlap_cells + cdef oct_visitors.IndexMaskMapOcts visitor + visitor = oct_visitors.IndexMaskMapOcts(self) + visitor.oct_mask = self._index_base_octs + visitor.oct_index = ind + visitor.map_domain_ind = domain_ind + self.visit_all_octs(selector, visitor) + + def __dealloc__(self): + #Call the freemem ops on every ocy + #of the root mesh recursively + cdef int i + if self.root_nodes== NULL: return + if self.loaded == 0: + for i in range(self.max_root): + if self.root_nodes[i].node == NULL: continue + self.visit_free(&self.root_nodes.node[i], 0) + self.root_nodes = NULL + free(self.oct_list) + free(self._ptr_index_base_roots) + free(self._ptr_index_base_octs) + free(self._ptr_octs_per_root) + self.oct_list = NULL + + cdef void visit_free(self, Oct *o, int free_this): + #Free the memory for this oct recursively + cdef int i, j, k + for i in range(2): + for j in range(2): + for k in range(2): + if o.children != NULL \ + and o.children[cind(i,j,k)] != NULL: + self.visit_free(o.children[cind(i,j,k)], 1) + if o.children != NULL: + free(o.children) + if free_this == 1: + free(o) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef void recursive_add(self, Oct *o, np.ndarray[np.uint64_t, ndim=1] indices, + int level, int *max_level, int domain_id, int *count): + cdef np.int64_t no = indices.shape[0], beg, end, nind + cdef np.int64_t index + cdef int i, j, k + cdef int ind[3] + cdef Oct *noct + cdef Oct *noct_ch + beg = end = 0 + if level > max_level[0]: max_level[0] = level + # Initialize children + if o.children == NULL: + o.children = malloc(sizeof(Oct *)*8) + for i in range(2): + for j in range(2): + for k in range(2): + o.children[cind(i,j,k)] = NULL + # noct = self.allocate_oct() + # noct.domain = o.domain + # noct.file_ind = 0 + # o.children[cind(i,j,k)] = noct + # Loop through sets of particles with matching prefix at this level + while end < no: + beg = end + index = (indices[beg] >> ((ORDER_MAX - level)*3)) + while (end < no) and (index == (indices[end] >> ((ORDER_MAX - level)*3))): + end += 1 + nind = (end - beg) + # Add oct + for i in range(3): + ind[i] = ((index >> (2 - i)) & 1) + # noct = o.children[cind(ind[0],ind[1],ind[2])] + if o.children[cind(ind[0],ind[1],ind[2])] != NULL: + raise Exception('Child was already initialized...') + noct = self.allocate_oct() + noct.domain = o.domain + o.children[cind(ind[0],ind[1],ind[2])] = noct + # Don't add it to the list if it will be refined + if nind > self.n_ref and level < ORDER_MAX: + self.nocts -= 1 + noct.domain_ind = -1 # overwritten by finalize + else: + count[0] += 1 + noct.file_ind = o.file_ind + # noct.file_ind = nind + # o.file_ind = self.n_ref + 1 + # Refine oct or add its children + if nind > self.n_ref and level < ORDER_MAX: + self.recursive_add(noct, indices[beg:end], level+1, + max_level, domain_id, count) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def add(self, np.ndarray[np.uint64_t, ndim=1] indices, + np.uint64_t order1, int domain_id = -1): + #Add this particle to the root oct + #Then if that oct has children, add it to them recursively + #If the child needs to be refined because of max particles, do so + cdef Oct *root = NULL + cdef np.int64_t no = indices.shape[0], beg, end, index, nind + cdef int i, level + cdef int ind[3] + cdef np.uint64_t ind64[3] + cdef int max_level = self.max_level + # Note what we're doing here: we have decided the root will always be + # zero, since we're in a forest of octrees, where the root_mesh node is + # the level 0. This means our morton indices should be made with + # respect to that, which means we need to keep a few different arrays + # of them. + cdef np.int64_t index_root = 0 + cdef int root_count + beg = end = 0 + self._octs_per_root[:] = 1 # Roots count reguardless + while end < no: + # Determine number of octs with this prefix + beg = end + index = (indices[beg] >> ((ORDER_MAX - self.level_offset)*3)) + while (end < no) and (index == (indices[end] >> ((ORDER_MAX - self.level_offset)*3))): + end += 1 + nind = (end - beg) + # Find root for prefix + decode_morton_64bit(index, ind64) + for i in range(3): + ind[i] = ind64[i] + while (index_root < self.num_root) and \ + (self.ipos_to_key(ind) != self.root_nodes[index_root].key): + index_root += 1 + if index_root >= self.num_root: + raise Exception('No root found for {},{},{}'.format(ind[0],ind[1],ind[2])) + root = self.root_nodes[index_root].node + # self.get_root(ind, &root) + # if root == NULL: + # raise Exception('No root found for {},{},{}'.format(ind[0],ind[1],ind[2])) + root.file_ind = index_root + # Refine root as necessary + if (end - beg) > self.n_ref: + root_count = 0 + self.nocts -= 1 + self.recursive_add(root, indices[beg:end], self.level_offset+1, + &max_level, domain_id, &root_count) + self._octs_per_root[index_root] = root_count + self.max_level = max_level + assert(self.nocts == np.sum(self._octs_per_root)) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef Oct *refine_oct(self, Oct *o, np.uint64_t index, int level): + #Allocate and initialize child octs + #Attach particles to child octs + #Remove particles from this oct entirely + cdef int i, j, k + cdef int ind[3] + cdef Oct *noct + + # Initialize empty children + if o.children == NULL: + o.children = malloc(sizeof(Oct *)*8) + + # This version can be used to just add the child containing the index + # for i in range(2): + # for j in range(2): + # for k in range(2): + # o.children[cind(i,j,k)] = NULL + # # Only allocate and count the indexed oct + # for i in range(3): + # ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1 + + # noct = self.allocate_oct() + # noct.domain = o.domain + # noct.file_ind = 0 + # o.children[cind(ind[0],ind[1],ind[2])] = noct + # o.file_ind = self.n_ref + 1 + + + for i in range(2): + for j in range(2): + for k in range(2): + noct = self.allocate_oct() + noct.domain = o.domain + noct.file_ind = 0 + o.children[cind(i,j,k)] = noct + o.file_ind = self.n_ref + 1 + for i in range(3): + ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1 + noct = o.children[cind(ind[0],ind[1],ind[2])] + return noct + + cdef void filter_particles(self, Oct *o, np.uint64_t *data, np.int64_t p, + int level): + # Now we look at the last nref particles to decide where they go. + cdef int n = imin(p, self.n_ref) + cdef np.uint64_t *arr = data + imax(p - self.n_ref, 0) + # Now we figure out our prefix, which is the oct address at this level. + # As long as we're actually in Morton order, we do not need to worry + # about *any* of the other children of the oct. + prefix1 = data[p] >> (ORDER_MAX - level)*3 + for i in range(n): + prefix2 = arr[i] >> (ORDER_MAX - level)*3 + if (prefix1 == prefix2): + o.file_ind += 1 diff --git a/yt/geometry/particle_smooth.pxd b/yt/geometry/particle_smooth.pxd index 906e4f3825c..2240aef6cb3 100644 --- a/yt/geometry/particle_smooth.pxd +++ b/yt/geometry/particle_smooth.pxd @@ -6,13 +6,6 @@ Particle Deposition onto Octs """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np import numpy as np diff --git a/yt/geometry/particle_smooth.pyx b/yt/geometry/particle_smooth.pyx index a94d5d0f844..469aa335a47 100644 --- a/yt/geometry/particle_smooth.pyx +++ b/yt/geometry/particle_smooth.pyx @@ -6,13 +6,6 @@ Particle smoothing in cells """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np import numpy as np @@ -199,7 +192,7 @@ cdef class ParticleSmoothOperation: # If we have yet to assign the starting index to this oct, we do so # now. if doff[offset] < 0: doff[offset] = i - #print domain_id, domain_offset, moff_p, moff_m + #print(domain_id, domain_offset, moff_p, moff_m) #raise RuntimeError # Now doff is full of offsets to the first entry in the pind that # refers to that oct's particles. @@ -225,8 +218,8 @@ cdef class ParticleSmoothOperation: &nind, pind, pcount, offset, index_field_pointers, particle_octree, domain_id, &nsize, oct_left_edges, oct_dds, dist_queue) - #print "VISITED", visited.sum(), visited.size, - #print 100.0*float(visited.sum())/visited.size + #print("VISITED", visited.sum(), visited.size,) + #print(100.0*float(visited.sum())/visited.size) if nind != NULL: free(nind) @@ -326,7 +319,7 @@ cdef class ParticleSmoothOperation: # If we have yet to assign the starting index to this oct, we do so # now. if doff[offset] < 0: doff[offset] = i - #print domain_id, domain_offset, moff_p, moff_m + #print(domain_id, domain_offset, moff_p, moff_m) #raise RuntimeError # Now doff is full of offsets to the first entry in the pind that # refers to that oct's particles. @@ -348,8 +341,8 @@ cdef class ParticleSmoothOperation: doff, &nind, pind, pcount, pind0, NULL, particle_octree, domain_id, &nsize, dist_queue) - #print "VISITED", visited.sum(), visited.size, - #print 100.0*float(visited.sum())/visited.size + #print("VISITED", visited.sum(), visited.size,) + #print(100.0*float(visited.sum())/visited.size) if nind != NULL: free(nind) @@ -542,7 +535,7 @@ cdef class ParticleSmoothOperation: if nind[0][m] < 0: continue nntot += 1 ntot += pcounts[nind[0][m]] - print "SOMETHING WRONG", dq.curn, nneighbors, ntot, nntot + print("SOMETHING WRONG", dq.curn, nneighbors, ntot, nntot) self.process(offset, i, j, k, dim, opos, fields, index_fields, dq) cpos[2] += dds[2] diff --git a/yt/geometry/selection_routines.pxd b/yt/geometry/selection_routines.pxd index 1ed5985b8e9..8ad6c687d63 100644 --- a/yt/geometry/selection_routines.pxd +++ b/yt/geometry/selection_routines.pxd @@ -6,25 +6,14 @@ Geometry selection routine imports. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np from oct_visitors cimport Oct, OctVisitor +from oct_container cimport OctreeContainer from grid_visitors cimport GridTreeNode, GridVisitorData, \ grid_visitor_function, check_child_masked - -cdef inline _ensure_code(arr): - if hasattr(arr, "units"): - if "code_length" == str(arr.units): - return arr - arr.convert_to_units("code_length") - return arr +from yt.utilities.lib.geometry_utils cimport decode_morton_64bit +from yt.utilities.lib.fp_utils cimport _ensure_code cdef class SelectorObject: cdef public np.int32_t min_level @@ -45,13 +34,18 @@ cdef class SelectorObject: OctVisitor visitor, int i, int j, int k) cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], - np.int32_t level, Oct *o = ?) nogil + np.int32_t level, Oct *o = ?) nogil + cdef int select_grid_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3], + np.int32_t level, Oct *o = ?) nogil cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil cdef int select_point(self, np.float64_t pos[3]) nogil cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil cdef int select_bbox(self, np.float64_t left_edge[3], np.float64_t right_edge[3]) nogil + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil cdef int fill_mask_selector(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.float64_t dds[3], int dim[3], @@ -61,14 +55,16 @@ cdef class SelectorObject: cdef void visit_grid_cells(self, GridVisitorData *data, grid_visitor_function *func, np.uint8_t *cached_mask = ?) - # compute periodic distance (if periodicity set) assuming 0->domain_width[i] coordinates - cdef np.float64_t difference(self, np.float64_t x1, np.float64_t x2, int d) nogil + # compute periodic distance (if periodicity set) + # assuming 0->domain_width[d] coordinates + cdef np.float64_t periodic_difference( + self, np.float64_t x1, np.float64_t x2, int d) nogil cdef class AlwaysSelector(SelectorObject): pass cdef class OctreeSubsetSelector(SelectorObject): - cdef SelectorObject base_selector + cdef public SelectorObject base_selector cdef public np.int64_t domain_id cdef class BooleanSelector(SelectorObject): @@ -84,3 +80,4 @@ cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2, elif rel < -dw * 0.5: rel += dw return rel + diff --git a/yt/geometry/selection_routines.pyx b/yt/geometry/selection_routines.pyx index 195d48967d7..6c51664d526 100644 --- a/yt/geometry/selection_routines.pyx +++ b/yt/geometry/selection_routines.pyx @@ -6,17 +6,11 @@ Geometry selection routines. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np cimport cython +from libc.math cimport sqrt from cython cimport floating from libc.stdlib cimport malloc, free from yt.utilities.lib.fnv_hash cimport c_fnv_hash as fnv_hash @@ -29,6 +23,8 @@ from yt.utilities.lib.volume_container cimport \ from yt.utilities.lib.grid_traversal cimport \ sampler_function, walk_volume from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value +from yt.utilities.lib.geometry_utils cimport encode_morton_64bit, decode_morton_64bit, \ + bounded_morton_dds, morton_neighbors_coarse, morton_neighbors_refined cdef extern from "math.h": double exp(double x) nogil @@ -46,6 +42,13 @@ cdef extern from "math.h": cdef np.float64_t grid_eps = np.finfo(np.float64).eps grid_eps = 0.0 +cdef inline np.float64_t dot(np.float64_t* v1, + np.float64_t* v2) nogil: + return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2] + +cdef inline np.float64_t norm(np.float64_t* v) nogil: + return sqrt(dot(v, v)) + # These routines are separated into a couple different categories: # # * Routines for identifying intersections of an object with a bounding box @@ -139,6 +142,14 @@ cdef class SelectorObject: self.domain_width[i] = DRE[i] - DLE[i] self.periodicity[i] = ds.periodicity[i] + def get_periodicity(self): + cdef int i + cdef np.ndarray[np.uint8_t, ndim=1] periodicity + periodicity = np.zeros(3, dtype='uint8') + for i in range(3): + periodicity[i] = self.periodicity[i] + return periodicity + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -200,7 +211,7 @@ cdef class SelectorObject: sdds[i] = dds[i]/2.0 LE[i] = pos[i] - dds[i]/2.0 RE[i] = pos[i] + dds[i]/2.0 - #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2] + #print(LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]) res = self.select_grid(LE, RE, level, root) if res == 1 and visitor.domain > 0 and root.domain != visitor.domain: res = -1 @@ -329,6 +340,15 @@ cdef class SelectorObject: if level < self.min_level or level > self.max_level: return 0 return self.select_bbox(left_edge, right_edge) + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_grid_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3], + np.int32_t level, Oct *o = NULL) nogil: + if level < self.min_level or level > self.max_level: return 0 + return self.select_bbox_edge(left_edge, right_edge) + cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil: return 0 @@ -340,12 +360,27 @@ cdef class SelectorObject: cdef int select_bbox(self, np.float64_t left_edge[3], np.float64_t right_edge[3]) nogil: + """ + Returns: + 0: If the selector does not touch the bounding box. + 1: If the selector overlaps the bounding box anywhere. + """ + return 0 + + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + """ + Returns: + 0: If the selector does not touch the bounding box. + 1: If the selector contains the entire bounding box. + 2: If the selector contains part of the bounding box. + """ return 0 @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef np.float64_t difference(self, np.float64_t x1, np.float64_t x2, int d) nogil: + cdef np.float64_t periodic_difference(self, np.float64_t x1, np.float64_t x2, int d) nogil: # domain_width is already in code units, and we assume what is fed in # is too. cdef np.float64_t rel = x1 - x2 @@ -551,39 +586,52 @@ cdef class SelectorObject: def count_points(self, np.ndarray[floating, ndim=1] x, np.ndarray[floating, ndim=1] y, np.ndarray[floating, ndim=1] z, - np.float64_t radius): + radii): cdef int count = 0 cdef int i cdef np.float64_t pos[3] + cdef np.float64_t radius + cdef np.float64_t[:] _radii + if radii is not None: + _radii = np.atleast_1d(np.array(radii, dtype='float64')) + else: + _radii = np.array([0.0], dtype='float64') _ensure_code(x) _ensure_code(y) _ensure_code(z) with nogil: - if radius == 0.0 : - for i in range(x.shape[0]): - pos[0] = x[i] - pos[1] = y[i] - pos[2] = z[i] + for i in range(x.shape[0]): + pos[0] = x[i] + pos[1] = y[i] + pos[2] = z[i] + if _radii.shape[0] == 1: + radius = _radii[0] + else: + radius = _radii[i] + if radius == 0: count += self.select_point(pos) - else : - for i in range(x.shape[0]): - pos[0] = x[i] - pos[1] = y[i] - pos[2] = z[i] + else: count += self.select_sphere(pos, radius) return count @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def select_points(self, np.ndarray[floating, ndim=1] x, - np.ndarray[floating, ndim=1] y, - np.ndarray[floating, ndim=1] z, - np.float64_t radius): + def select_points(self, + np.ndarray[floating, ndim=1] x, + np.ndarray[floating, ndim=1] y, + np.ndarray[floating, ndim=1] z, + radii): cdef int count = 0 cdef int i cdef np.float64_t pos[3] + cdef np.float64_t radius cdef np.ndarray[np.uint8_t, ndim=1] mask + cdef np.float64_t[:] _radii + if radii is not None: + _radii = np.atleast_1d(np.array(radii, dtype='float64')) + else: + _radii = np.array([0.0], dtype='float64') mask = np.empty(x.shape[0], dtype='uint8') _ensure_code(x) _ensure_code(y) @@ -596,20 +644,19 @@ cdef class SelectorObject: # between a ray and a point is null, while ray and a # sphere is allowed) with nogil: - if radius == 0.0 : - for i in range(x.shape[0]) : - pos[0] = x[i] - pos[1] = y[i] - pos[2] = z[i] + for i in range(x.shape[0]) : + pos[0] = x[i] + pos[1] = y[i] + pos[2] = z[i] + if _radii.shape[0] == 1: + radius = 0 + else: + radius = _radii[i] + if radius == 0: mask[i] = self.select_point(pos) - count += mask[i] - else : - for i in range(x.shape[0]): - pos[0] = x[i] - pos[1] = y[i] - pos[2] = z[i] + else: mask[i] = self.select_sphere(pos, radius) - count += mask[i] + count += mask[i] if count == 0: return None return mask.view("bool") @@ -679,7 +726,7 @@ cdef class PointSelector(SelectorObject): cdef int i cdef np.float64_t dist, dist2 = 0 for i in range(3): - dist = self.difference(pos[i], self.p[i], i) + dist = self.periodic_difference(pos[i], self.p[i], i) dist2 += dist*dist if dist2 <= radius*radius: return 1 return 0 @@ -697,6 +744,21 @@ cdef class PointSelector(SelectorObject): else: return 0 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + # point definitely can only be in one cell + # Return 2 in all cases to indicate that the point only overlaps + # portion of box + if (left_edge[0] <= self.p[0] <= right_edge[0] and + left_edge[1] <= self.p[1] <= right_edge[1] and + left_edge[2] <= self.p[2] <= right_edge[2]): + return 2 + else: + return 0 + def _hash_vals(self): return (("p[0]", self.p[0]), ("p[1]", self.p[1]), @@ -739,6 +801,14 @@ cdef class SphereSelector(SelectorObject): pos[2] - 0.5*dds[2] <= self.center[2] <= pos[2]+0.5*dds[2]): return 1 return self.select_point(pos) + # # langmm: added to allow sphere to interesect edge/corner of cell + # cdef np.float64_t LE[3] + # cdef np.float64_t RE[3] + # cdef int i + # for i in range(3): + # LE[i] = pos[i] - 0.5*dds[i] + # RE[i] = pos[i] + 0.5*dds[i] + # return self.select_bbox(LE, RE) @cython.boundscheck(False) @cython.wraparound(False) @@ -764,7 +834,7 @@ cdef class SphereSelector(SelectorObject): cdef int i cdef np.float64_t dist, dist2 = 0 for i in range(3): - dist = self.difference(pos[i], self.center[i], i) + dist = self.periodic_difference(pos[i], self.center[i], i) dist2 += dist*dist dist = self.radius+radius if dist2 <= dist*dist: return 1 @@ -777,9 +847,9 @@ cdef class SphereSelector(SelectorObject): np.float64_t right_edge[3]) nogil: cdef np.float64_t box_center, relcenter, closest, dist, edge cdef int i - if (left_edge[0] <= self.center[0] <= right_edge[0] and - left_edge[1] <= self.center[1] <= right_edge[1] and - left_edge[2] <= self.center[2] <= right_edge[2]): + if (left_edge[0] <= self.center[0] < right_edge[0] and + left_edge[1] <= self.center[1] < right_edge[1] and + left_edge[2] <= self.center[2] < right_edge[2]): return 1 for i in range(3): if not self.check_box[i]: continue @@ -791,13 +861,66 @@ cdef class SphereSelector(SelectorObject): for i in range(3): # Early terminate box_center = (right_edge[i] + left_edge[i])/2.0 - relcenter = self.difference(box_center, self.center[i], i) + relcenter = self.periodic_difference(box_center, self.center[i], i) edge = right_edge[i] - left_edge[i] closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0) dist += closest*closest if dist > self.radius2: return 0 return 1 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef np.float64_t box_center, relcenter, closest, farthest, cdist, fdist, edge + cdef int i + if (left_edge[0] <= self.center[0] <= right_edge[0] and + left_edge[1] <= self.center[1] <= right_edge[1] and + left_edge[2] <= self.center[2] <= right_edge[2]): + fdist = 0 + for i in range(3): + edge = right_edge[i] - left_edge[i] + box_center = (right_edge[i] + left_edge[i])/2.0 + relcenter = self.periodic_difference( + box_center, self.center[i], i) + if relcenter >= 0: + farthest = relcenter + edge/2.0 + else: + farthest = relcenter - edge/2.0 + # farthest = relcenter + fclip(relcenter, -edge/2.0, edge/2.0) + fdist += farthest*farthest + if fdist >= self.radius2: + return 2 # Box extends outside sphere + return 1 # Box entirely inside sphere + for i in range(3): + if not self.check_box[i]: continue + if right_edge[i] < self.bbox[i][0] or \ + left_edge[i] > self.bbox[i][1]: + return 0 # Box outside sphere bounding box + # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/ + cdist = 0 + fdist = 0 + for i in range(3): + # Early terminate + box_center = (right_edge[i] + left_edge[i])/2.0 + relcenter = self.periodic_difference(box_center, self.center[i], i) + edge = right_edge[i] - left_edge[i] + closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0) + if relcenter >= 0: + farthest = relcenter + edge/2.0 + else: + farthest = relcenter - edge/2.0 + #farthest = relcenter + fclip(relcenter, -edge/2.0, edge/2.0) + cdist += closest*closest + fdist += farthest*farthest + if cdist > self.radius2: + return 0 # Box does not overlap sphere + if fdist < self.radius2: + return 1 # Sphere extends to entirely contain box + else: + return 2 # Sphere only partially overlaps box + def _hash_vals(self): return (("radius", self.radius), ("radius2", self.radius2), @@ -811,8 +934,9 @@ cdef class RegionSelector(SelectorObject): cdef np.float64_t left_edge[3] cdef np.float64_t right_edge[3] cdef np.float64_t right_edge_shift[3] + cdef public bint is_all_data cdef bint loose_selection - cdef bint check_period + cdef bint check_period[3] @cython.boundscheck(False) @cython.wraparound(False) @@ -825,14 +949,20 @@ cdef class RegionSelector(SelectorObject): cdef np.float64_t[:] DW = _ensure_code(dobj.ds.domain_width) cdef np.float64_t[:] DLE = _ensure_code(dobj.ds.domain_left_edge) cdef np.float64_t[:] DRE = _ensure_code(dobj.ds.domain_right_edge) + le_all = (np.array(LE) == _ensure_code(dobj.ds.domain_left_edge)).all() + re_all = (np.array(RE) == _ensure_code(dobj.ds.domain_right_edge)).all() + if le_all and re_all: + self.is_all_data = True + else: + self.is_all_data = False cdef np.float64_t region_width[3] cdef bint p[3] # This is for if we want to include zones that overlap and whose # centers are not strictly included. self.loose_selection = getattr(dobj, "loose_selection", False) - self.check_period = False for i in range(3): + self.check_period[i] = False region_width[i] = RE[i] - LE[i] p[i] = dobj.ds.periodicity[i] if region_width[i] <= 0: @@ -847,7 +977,7 @@ cdef class RegionSelector(SelectorObject): # without any adjustments. This is for short-circuiting the # short-circuit of the loop down below in mask filling. if LE[i] < DLE[i] or LE[i] > DRE[i] or RE[i] > DRE[i]: - self.check_period = True + self.check_period[i] = True # shift so left_edge guaranteed in domain if LE[i] < DLE[i]: LE[i] += DW[i] @@ -889,6 +1019,26 @@ cdef class RegionSelector(SelectorObject): return 0 return 1 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int i + for i in range(3): + if (right_edge[i] < self.left_edge[i] and \ + left_edge[i] >= self.right_edge_shift[i]) or \ + left_edge[i] >= self.right_edge[i]: + return 0 + for i in range(3): + if left_edge[i] < self.right_edge_shift[i]: + if right_edge[i] >= self.right_edge_shift[i]: + return 2 + elif left_edge[i] < self.left_edge[i] or \ + right_edge[i] >= self.right_edge[i]: + return 2 + return 1 + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -914,6 +1064,26 @@ cdef class RegionSelector(SelectorObject): return 0 return 1 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil: + # adapted from http://stackoverflow.com/a/4579192/1382869 + cdef int i + cdef np.float64_t p + cdef np.float64_t r2 = radius**2 + cdef np.float64_t dmin = 0 + for i in range(3): + if self.periodicity[i] and self.check_period[i]: + p = pos[i] + self.right_edge_shift[i] + else: + p = pos[i] + if p < self.left_edge[i]: + dmin += (p - self.left_edge[i])**2 + elif pos[i] > self.right_edge[i]: + dmin += (p - self.right_edge[i])**2 + return int(dmin <= r2) + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -932,14 +1102,13 @@ cdef class RegionSelector(SelectorObject): this_level = 1 cdef np.int64_t si[3] cdef np.int64_t ei[3] - if not self.check_period: - for i in range(3): + for i in range(3): + if not self.check_period[i]: si[i] = ((self.left_edge[i] - left_edge[i])/dds[i]) ei[i] = ((self.right_edge[i] - left_edge[i])/dds[i]) si[i] = iclip(si[i] - 1, 0, dim[i]) ei[i] = iclip(ei[i] + 1, 0, dim[i]) - else: - for i in range(3): + else: si[i] = 0 ei[i] = dim[i] with nogil: @@ -984,6 +1153,10 @@ cdef class CutRegionSelector(SelectorObject): np.float64_t right_edge[3]) nogil: return 1 + cdef int select_bbox_dge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + return 1 + cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil: with gil: if (pos[0], pos[1], pos[2]) in self._positions: @@ -1034,7 +1207,7 @@ cdef class DiskSelector(SelectorObject): cdef int i h = d = 0 for i in range(3): - temp = self.difference(pos[i], self.center[i], i) + temp = self.periodic_difference(pos[i], self.center[i], i) h += temp * self.norm_vec[i] d += temp*temp r2 = (d - h*h) @@ -1049,8 +1222,8 @@ cdef class DiskSelector(SelectorObject): cdef int i h = d = 0 for i in range(3): - temp = self.difference(pos[i], self.center[i], i) - h += pos[i] * self.norm_vec[i] + temp = self.periodic_difference(pos[i], self.center[i], i) + h += temp * self.norm_vec[i] d += temp*temp r2 = (d - h*h) d = self.radius+radius @@ -1107,6 +1280,56 @@ cdef class DiskSelector(SelectorObject): # if all_over == 0 and all_under == 0 and any_radius == 1: return 1 # return 0 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + # Until we can get our OBB/OBB intersection correct, disable this. + return 2 + # cdef np.float64_t *arr[2] + # cdef np.float64_t pos[3], H, D, R2, temp + # cdef int i, j, k, n + # cdef int all_under = 1 + # cdef int all_over = 1 + # cdef int any_radius = 0 + # # A moment of explanation (revised): + # # The disk and bounding box collide if any of the following are true: + # # 1) the center of the disk is inside the bounding box + # # 2) any corner of the box lies inside the disk + # # 3) the box spans the plane (!all_under and !all_over) and at least + # # one corner is within the cylindrical radius + + # # check if disk center lies inside bbox + # if left_edge[0] <= self.center[0] <= right_edge[0] and \ + # left_edge[1] <= self.center[1] <= right_edge[1] and \ + # left_edge[2] <= self.center[2] <= right_edge[2] : + # return 1 + + # # check all corners + # arr[0] = left_edge + # arr[1] = right_edge + # for i in range(2): + # pos[0] = arr[i][0] + # for j in range(2): + # pos[1] = arr[j][1] + # for k in range(2): + # pos[2] = arr[k][2] + # H = D = 0 + # for n in range(3): + # temp = self.periodic_difference( + # pos[n], self.center[n], n) + # H += (temp * self.norm_vec[n]) + # D += temp*temp + # R2 = (D - H*H) + # if R2 < self.radius2 : + # any_radius = 1 + # if fabs(H) < self.height: return 1 + # if H < 0: all_over = 0 + # if H > 0: all_under = 0 + # if all_over == 0 and all_under == 0 and any_radius == 1: return 1 + # return 0 + def _hash_vals(self): return (("norm_vec[0]", self.norm_vec[0]), ("norm_vec[1]", self.norm_vec[1]), @@ -1192,6 +1415,41 @@ cdef class CuttingPlaneSelector(SelectorObject): return 0 return 1 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int i, j, k, n + cdef np.float64_t *arr[2] + cdef np.float64_t pos[3] + cdef np.float64_t gd + arr[0] = left_edge + arr[1] = right_edge + all_under = 1 + all_over = 1 + # Check each corner + for i in range(2): + pos[0] = arr[i][0] + for j in range(2): + pos[1] = arr[j][1] + for k in range(2): + pos[2] = arr[k][2] + gd = self.d + for n in range(3): + gd += pos[n] * self.norm_vec[n] + # this allows corners and faces on the low-end to + # collide, while not selecting cells on the high-side + if i == 0 and j == 0 and k == 0 : + if gd <= 0: all_over = 0 + if gd >= 0: all_under = 0 + else : + if gd < 0: all_over = 0 + if gd > 0: all_under = 0 + if all_over == 1 or all_under == 1: + return 0 + return 2 # a box of non-zeros volume can't be inside a plane + def _hash_vals(self): return (("norm_vec[0]", self.norm_vec[0]), ("norm_vec[1]", self.norm_vec[1]), @@ -1273,7 +1531,8 @@ cdef class SliceSelector(SelectorObject): @cython.wraparound(False) @cython.cdivision(True) cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil: - cdef np.float64_t dist = self.difference(pos[self.axis], self.coord, self.axis) + cdef np.float64_t dist = self.periodic_difference( + pos[self.axis], self.coord, self.axis) if dist*dist < radius*radius: return 1 return 0 @@ -1287,6 +1546,15 @@ cdef class SliceSelector(SelectorObject): return 1 return 0 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + if left_edge[self.axis] - grid_eps <= self.coord < right_edge[self.axis]: + return 2 # a box with non-zero volume can't be inside a plane + return 0 + def _hash_vals(self): return (("axis", self.axis), ("coord", self.coord)) @@ -1370,8 +1638,10 @@ cdef class OrthoRaySelector(SelectorObject): @cython.wraparound(False) @cython.cdivision(True) cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil: - cdef np.float64_t dx = self.difference(pos[self.px_ax], self.px, self.px_ax) - cdef np.float64_t dy = self.difference(pos[self.py_ax], self.py, self.py_ax) + cdef np.float64_t dx = self.periodic_difference( + pos[self.px_ax], self.px, self.px_ax) + cdef np.float64_t dy = self.periodic_difference( + pos[self.py_ax], self.py, self.py_ax) if dx*dx + dy*dy < radius*radius: return 1 return 0 @@ -1386,6 +1656,16 @@ cdef class OrthoRaySelector(SelectorObject): return 1 return 0 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + if left_edge[self.px_ax] <= self.px < right_edge[self.px_ax] and \ + left_edge[self.py_ax] <= self.py < right_edge[self.py_ax] : + return 2 # a box of non-zero volume can't be inside a ray + return 0 + def _hash_vals(self): return (("px_ax", self.px_ax), ("py_ax", self.py_ax), @@ -1510,7 +1790,7 @@ cdef class RaySelector(SelectorObject): dtr[ni] = dt[i, j, k] ni += 1 if not (ni == ia.hits): - print ni, ia.hits + print(ni, ia.hits) free(ia) return dtr, tr @@ -1573,9 +1853,28 @@ cdef class RaySelector(SelectorObject): cdef int select_point(self, np.float64_t pos[3]) nogil: # two 0-volume constructs don't intersect return 0 - + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil: - # not implemented + + cdef int i + cdef np.float64_t length = norm(self.vec) + cdef np.float64_t r[3] + for i in range(3): + r[i] = pos[i] - self.p1[i] + # the projected position of the sphere along the ray + cdef np.float64_t l = dot(r, self.vec) / length + # the square of the impact parameter + cdef np.float64_t b_sqr = dot(r, r) - l*l + + # only accept spheres with radii larger than the impact parameter and + # with a projected position along the ray no more than a radius away + # from the ray + if -radius < l and l < (length+radius) and b_sqr < radius*radius: + return 1 + return 0 @cython.boundscheck(False) @@ -1609,6 +1908,32 @@ cdef class RaySelector(SelectorObject): free(ia) return rv + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int i + cdef np.uint8_t cm = 1 + cdef VolumeContainer vc + cdef IntegrationAccumulator ia + cdef np.float64_t dt, t + for i in range(3): + vc.left_edge[i] = left_edge[i] + vc.right_edge[i] = right_edge[i] + vc.dds[i] = right_edge[i] - left_edge[i] + vc.idds[i] = 1.0/vc.dds[i] + vc.dims[i] = 1 + t = dt = 0.0 + ia.t = &t + ia.dt = &dt + ia.child_mask = &cm + ia.hits = 0 + walk_volume(&vc, self.p1, self.vec, dt_sampler, &ia) + if ia.hits > 0: + return 2 # a box of non-zero volume cannot be inside a ray + return 0 + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) @@ -1713,7 +2038,7 @@ cdef class EllipsoidSelector(SelectorObject): dot_evec[0] = dot_evec[1] = dot_evec[2] = 0 # Calculate the rotated dot product for i in range(3): # axis - dist = self.difference(pos[i], self.center[i], i) + dist = self.periodic_difference(pos[i], self.center[i], i) for j in range(3): dot_evec[j] += dist * self.vec[j][i] dist = 0.0 @@ -1730,7 +2055,7 @@ cdef class EllipsoidSelector(SelectorObject): cdef int i cdef np.float64_t dist, dist2_max, dist2 = 0 for i in range(3): - dist = self.difference(pos[i], self.center[i], i) + dist = self.periodic_difference(pos[i], self.center[i], i) dist2 += dist * dist dist2_max = (self.mag[0] + radius) * (self.mag[0] + radius) if dist2 <= dist2_max: @@ -1753,7 +2078,7 @@ cdef class EllipsoidSelector(SelectorObject): dist = 0 for i in range(3): box_center = (right_edge[i] + left_edge[i])/2.0 - relcenter = self.difference(box_center, self.center[i], i) + relcenter = self.periodic_difference(box_center, self.center[i], i) edge = right_edge[i] - left_edge[i] closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0) dist += closest * closest @@ -1762,6 +2087,44 @@ cdef class EllipsoidSelector(SelectorObject): return 1 return 0 + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + # This is the sphere selection + cdef int i + cdef np.float64_t box_center, relcenter, closest, farthest, cdist, fdist, edge + if left_edge[0] <= self.center[0] <= right_edge[0] and \ + left_edge[1] <= self.center[1] <= right_edge[1] and \ + left_edge[2] <= self.center[2] <= right_edge[2]: + fdist = 0 + for i in range(3): + edge = right_edge[i] - left_edge[i] + box_center = (right_edge[i] + left_edge[i])/2.0 + relcenter = self.periodic_difference( + box_center, self.center[i], i) + farthest = relcenter + fclip(relcenter, -edge/2.0, edge/2.0) + fdist += farthest*farthest + if fdist >= self.mag[0]**2: return 2 + return 1 + # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/ + cdist = 0 + fdist = 0 + for i in range(3): + box_center = (right_edge[i] + left_edge[i])/2.0 + relcenter = self.periodic_difference(box_center, self.center[i], i) + edge = right_edge[i] - left_edge[i] + closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0) + farthest = relcenter + fclip(relcenter, -edge/2.0, edge/2.0) + cdist += closest * closest + fdist += farthest * farthest + if cdist > self.mag[0]**2: return 0 + if fdist < self.mag[0]**2: + return 1 + else: + return 2 + def _hash_vals(self): return (("vec[0][0]", self.vec[0][0]), ("vec[0][1]", self.vec[0][1]), @@ -1827,7 +2190,7 @@ cdef class OctreeSubsetSelector(SelectorObject): self.min_level = self.base_selector.min_level self.max_level = self.base_selector.max_level self.domain_id = dobj.domain_id - self.overlap_cells = 1 + self.overlap_cells = getattr(dobj.oct_handler, 'overlap_cells', 1) @cython.boundscheck(False) @cython.wraparound(False) @@ -1854,13 +2217,14 @@ cdef class OctreeSubsetSelector(SelectorObject): @cython.wraparound(False) @cython.cdivision(True) cdef int select_point(self, np.float64_t pos[3]) nogil: - return 1 + return self.base_selector.select_point(pos) @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) cdef int select_bbox(self, np.float64_t left_edge[3], np.float64_t right_edge[3]) nogil: + # return 1 return self.base_selector.select_bbox(left_edge, right_edge) @cython.boundscheck(False) @@ -1888,7 +2252,7 @@ cdef class IndexedOctreeSubsetSelector(SelectorObject): # This is a numpy array, which will be a bool of ndim 1 cdef np.uint64_t min_ind cdef np.uint64_t max_ind - cdef SelectorObject base_selector + cdef public SelectorObject base_selector cdef int filter_bbox cdef np.float64_t DLE[3] cdef np.float64_t DRE[3] @@ -1995,6 +2359,10 @@ cdef class AlwaysSelector(SelectorObject): np.float64_t right_edge[3]) nogil: return 1 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + return 1 + def _hash_vals(self): return ("always", 1,) @@ -2056,6 +2424,14 @@ cdef class ComposeSelector(SelectorObject): else: return 0 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int rv1 = self.selector1.select_bbox_edge(left_edge, right_edge) + if rv1 == 0: return 0 + cdef int rv2 = self.selector2.select_bbox_edge(left_edge, right_edge) + if rv2 == 0: return 0 + return max(rv1, rv2) + def _hash_vals(self): return (hash(self.selector1), hash(self.selector2)) @@ -2134,6 +2510,14 @@ cdef class BooleanANDSelector(BooleanSelector): if rv2 == 0: return 0 return 1 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int rv1 = self.sel1.select_bbox_edge(left_edge, right_edge) + if rv1 == 0: return 0 + cdef int rv2 = self.sel2.select_bbox_edge(left_edge, right_edge) + if rv2 == 0: return 0 + return max(rv1, rv2) + cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.int32_t level, Oct *o = NULL) nogil: @@ -2178,6 +2562,14 @@ cdef class BooleanORSelector(BooleanSelector): if rv2 == 1: return 1 return 0 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int rv1 = self.sel1.select_bbox_edge(left_edge, right_edge) + if rv1 == 1: return 1 + cdef int rv2 = self.sel2.select_bbox_edge(left_edge, right_edge) + if rv2 == 1: return 1 + return max(rv1, rv2) + cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.int32_t level, Oct *o = NULL) nogil: @@ -2185,6 +2577,7 @@ cdef class BooleanORSelector(BooleanSelector): if rv1 == 1: return 1 cdef int rv2 = self.sel2.select_grid(left_edge, right_edge, level, o) if rv2 == 1: return 1 + if (rv1 == 2) or (rv2 == 2): return 2 return 0 cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil: @@ -2220,6 +2613,13 @@ cdef class BooleanNOTSelector(BooleanSelector): # check anywhere else. return 1 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int rv1 = self.sel1.select_bbox_edge(left_edge, right_edge) + if rv1 == 0: return 1 + elif rv1 == 1: return 0 + return 2 + cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.int32_t level, Oct *o = NULL) nogil: @@ -2252,6 +2652,24 @@ cdef class BooleanXORSelector(BooleanSelector): # check anywhere else. return 1 + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + # Return 2 in cases where one or both selectors partially overlap since + # part of the bounding box could satisfy the condition unless the + # selectors are identical. + cdef int rv1 = self.sel1.select_bbox_edge(left_edge, right_edge) + cdef int rv2 = self.sel2.select_bbox_edge(left_edge, right_edge) + if rv1 == rv2: + if rv1 == 2: + # If not identical, part of the bbox will be touched by one + # selector and not the other. + # if self.sel1 == self.sel2: return 0 # requires gil + return 2 + return 0 + if rv1 == 0: return rv2 + if rv2 == 0: return rv1 + return 2 # part of bbox only touched by selector fully covering bbox + cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.int32_t level, Oct *o = NULL) nogil: @@ -2283,11 +2701,25 @@ cdef class BooleanXORSelector(BooleanSelector): cdef class BooleanNEGSelector(BooleanSelector): cdef int select_bbox(self, np.float64_t left_edge[3], - np.float64_t right_edge[3]) nogil: + np.float64_t right_edge[3]) nogil: # We always return True here, because we don't have a "fully included" # check anywhere else. return self.sel1.select_bbox(left_edge, right_edge) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int rv1 = self.sel1.select_bbox_edge(left_edge, right_edge) + if rv1 == 0: return 0 + cdef int rv2 = self.sel2.select_bbox_edge(left_edge, right_edge) + if rv2 == 1: + return 0 + elif rv2 == 0: + return rv1 + # If sel2 is partial, then sel1 - sel2 will be partial as long + # as sel1 != sel2 + # if self.sel1 == self.sel2: return 0 # requires gil + return 2 + cdef int select_grid(self, np.float64_t left_edge[3], np.float64_t right_edge[3], np.int32_t level, Oct *o = NULL) nogil: @@ -2342,6 +2774,23 @@ cdef class ChainedBooleanANDSelector(ChainedBooleanSelector): return 0 return 1 + @cython.cdivision(True) + @cython.boundscheck(False) + @cython.wraparound(False) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int selected = 1 + cdef int ret + with gil: + for i in range(self.n_obj): + ret = (self.selectors[i]).select_bbox_edge( + left_edge, right_edge) + if ret == 0: + return 0 + elif ret == 2: + selected = 2 + return selected + @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) @@ -2408,6 +2857,23 @@ cdef class ChainedBooleanORSelector(ChainedBooleanSelector): return 1 return 0 + @cython.cdivision(True) + @cython.boundscheck(False) + @cython.wraparound(False) + cdef int select_bbox_edge(self, np.float64_t left_edge[3], + np.float64_t right_edge[3]) nogil: + cdef int selected = 0 + cdef int ret + with gil: + for i in range(self.n_obj): + ret = (self.selectors[i]).select_bbox_edge( + left_edge, right_edge) + if ret == 1: + return 1 + elif ret == 2: + selected = 2 + return selected + @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) @@ -2460,4 +2926,3 @@ cdef class ChainedBooleanORSelector(ChainedBooleanSelector): return v union_selector = ChainedBooleanORSelector - diff --git a/yt/geometry/tests/test_grid_container.py b/yt/geometry/tests/test_grid_container.py index 5f729943160..a8dd8f653cf 100644 --- a/yt/geometry/tests/test_grid_container.py +++ b/yt/geometry/tests/test_grid_container.py @@ -1,17 +1,3 @@ -""" -Tests for GridTree - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np import random diff --git a/yt/geometry/tests/test_neighbor_search.py b/yt/geometry/tests/test_neighbor_search.py index 5cf346c48c5..191ae3db1d8 100644 --- a/yt/geometry/tests/test_neighbor_search.py +++ b/yt/geometry/tests/test_neighbor_search.py @@ -1,23 +1,7 @@ -""" -Tests for neighbor finding - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.fields.particle_fields import \ - add_nearest_neighbor_field, \ - add_nearest_neighbor_value_field + add_nearest_neighbor_field from yt.testing import \ fake_particle_ds, \ assert_equal, \ @@ -25,6 +9,9 @@ def test_neighbor_search(): + # skip for now, in principle we can reimplement this in the demeshening + import nose + raise nose.SkipTest np.random.seed(0x4d3d3d3) ds = fake_particle_ds(npart = 16**3) ds.periodicity = (True, True, True) @@ -57,28 +44,3 @@ def test_neighbor_search(): #dd.field_data.pop(("all", "particle_radius")) assert_equal((min_in == 63).sum(), min_in.size) assert_array_almost_equal(nearest_neighbors, all_neighbors) - -def test_neighbor_value_search(): - np.random.seed(0x4d3d3d3) - ds = fake_particle_ds(npart = 16**3, over_refine_factor = 2) - ds.periodicity = (True, True, True) - ds.index - fn, = add_nearest_neighbor_value_field("all", "particle_position", "particle_mass", - ds.field_info) - dd = ds.all_data() - # Set up our positions onto which the field will be deposited - index_pos = np.array([dd["index",ax] for ax in 'xyz']) * dd["index","x"].uq - particle_pos = dd["particle_position"] - values_in = dd["particle_mass"] - values_out = dd[fn] - for i in range(index_pos.shape[0]): - r2 = particle_pos[:,0]*0 - r2 = r2 * r2 - for j in range(3): - DR = (index_pos[i,j] - particle_pos[:,j]) - DRo = DR.copy() - DR[DRo > ds.domain_width[j]/2.0] -= ds.domain_width[j] - DR[DRo < -ds.domain_width[j]/2.0] += ds.domain_width[j] - r2 += DR*DR - radius = np.sqrt(r2) - assert(values_in[np.argmin(radius)] == values_out[i]) diff --git a/yt/geometry/tests/test_particle_octree.py b/yt/geometry/tests/test_particle_octree.py index 423c0ea655a..0e193c845b5 100644 --- a/yt/geometry/tests/test_particle_octree.py +++ b/yt/geometry/tests/test_particle_octree.py @@ -1,44 +1,31 @@ -""" -Tests for particle octree - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np +import os -from yt.frontends.stream.data_structures import load_particles -from yt.geometry.oct_container import \ - OctreeContainer from yt.geometry.particle_oct_container import \ ParticleOctreeContainer, \ - ParticleRegions + ParticleBitmap from yt.geometry.oct_container import _ORDER_MAX -from yt.geometry.selection_routines import RegionSelector, AlwaysSelector +from yt.geometry.selection_routines import RegionSelector from yt.testing import \ - assert_almost_equal, \ assert_equal, \ - requires_file + assert_true, \ + assert_array_equal from yt.units.unit_registry import UnitRegistry from yt.units.yt_array import YTArray -from yt.utilities.lib.geometry_utils import get_morton_indices +from yt.utilities.lib.geometry_utils import get_morton_indices, \ + get_morton_points, \ + get_hilbert_points, \ + get_hilbert_indices import yt.units.dimensions as dimensions -import yt.data_objects.api NPART = 32**3 DLE = np.array([0.0, 0.0, 0.0]) DRE = np.array([10.0, 10.0, 10.0]) -dx = (DRE-DLE)/(2**_ORDER_MAX) +DW = (DRE-DLE) +PER = np.array([0, 0, 0], 'bool') +dx = DW/(2**_ORDER_MAX) def test_add_particles_random(): np.random.seed(int(0x4d3d3d3)) @@ -67,99 +54,6 @@ def test_add_particles_random(): # level_count += octree.count_levels(total_count.size-1, dom, mask) assert_equal(total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]) -def test_save_load_octree(): - np.random.seed(int(0x4d3d3d3)) - pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE - octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE) - octree.n_ref = 32 - for i in range(3): - np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i]) - # Convert to integers - pos = np.floor((pos - DLE)/dx).astype("uint64") - morton = get_morton_indices(pos) - morton.sort() - octree.add(morton) - octree.finalize() - saved = octree.save_octree() - loaded = OctreeContainer.load_octree(saved) - always = AlwaysSelector(None) - ir1 = octree.ires(always) - ir2 = loaded.ires(always) - assert_equal(ir1, ir2) - - fc1 = octree.fcoords(always) - fc2 = loaded.fcoords(always) - assert_equal(fc1, fc2) - - fw1 = octree.fwidth(always) - fw2 = loaded.fwidth(always) - assert_equal(fw1, fw2) - -def test_particle_octree_counts(): - np.random.seed(int(0x4d3d3d3)) - # Eight times as many! - data = {} - bbox = [] - for i, ax in enumerate('xyz'): - DW = DRE[i] - DLE[i] - LE = DLE[i] - data["particle_position_%s" % ax] = \ - np.random.normal(0.5, scale=0.05, size=(NPART*8)) * DW + LE - bbox.append( [DLE[i], DRE[i]] ) - bbox = np.array(bbox) - for n_ref in [16, 32, 64, 512, 1024]: - ds = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref) - dd = ds.all_data() - bi = dd["io","mesh_id"] - v = np.bincount(bi.astype("intp")) - assert_equal(v.max() <= n_ref, True) - bi2 = dd["all","mesh_id"] - assert_equal(bi, bi2) - -def test_particle_overrefine(): - np.random.seed(int(0x4d3d3d3)) - data = {} - bbox = [] - for i, ax in enumerate('xyz'): - DW = DRE[i] - DLE[i] - LE = DLE[i] - data["particle_position_%s" % ax] = \ - np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE - bbox.append( [DLE[i], DRE[i]] ) - bbox = np.array(bbox) - _attrs = ('icoords', 'fcoords', 'fwidth', 'ires') - for n_ref in [16, 32, 64, 512, 1024]: - ds1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref) - dd1 = ds1.all_data() - v1 = dict((a, getattr(dd1, a)) for a in _attrs) - cv1 = dd1["cell_volume"].sum(dtype="float64") - for over_refine in [1, 2, 3]: - f = 1 << (3*(over_refine-1)) - ds2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref, - over_refine_factor = over_refine) - dd2 = ds2.all_data() - v2 = dict((a, getattr(dd2, a)) for a in _attrs) - for a in sorted(v1): - assert_equal(v1[a].size * f, v2[a].size) - cv2 = dd2["cell_volume"].sum(dtype="float64") - assert_equal(cv1, cv2) - -index_ptype_snap = "snapshot_033/snap_033.0.hdf5" -@requires_file(index_ptype_snap) -def test_particle_index_ptype(): - ds = yt.load(index_ptype_snap) - ds_all = yt.load(index_ptype_snap, index_ptype="all") - ds_pt0 = yt.load(index_ptype_snap, index_ptype="PartType0") - dd = ds.all_data() - dd_all = ds_all.all_data() - dd_pt0 = ds_pt0.all_data() - cv = dd['index', "cell_volume"] - cv_all = dd_all['index', "cell_volume"] - cv_pt0 = dd_pt0['index', "cell_volume"] - assert_equal(cv.shape, cv_all.shape) - assert_almost_equal( - cv.sum(dtype="float64"), cv_pt0.sum(dtype="float64")) - class FakeDS: domain_left_edge = None domain_right_edge = None @@ -169,79 +63,573 @@ class FakeDS: periodicity = (False, False, False) class FakeRegion: - def __init__(self, nfiles): + def __init__(self, nfiles, periodic=False): self.ds = FakeDS() self.ds.domain_left_edge = YTArray([0.0, 0.0, 0.0], "code_length", registry=self.ds.unit_registry) self.ds.domain_right_edge = YTArray([nfiles, nfiles, nfiles], "code_length", - registry=self.ds.unit_registry) + registry=self.ds.unit_registry, + dtype='float64') self.ds.domain_width = self.ds.domain_right_edge - \ self.ds.domain_left_edge + self.ds.periodicity = (periodic, periodic, periodic) self.nfiles = nfiles - def set_edges(self, file_id): - self.left_edge = YTArray([file_id + 0.1, 0.0, 0.0], + def set_edges(self, file_id, dx = 0.1): + self.left_edge = YTArray([file_id + dx, 0.0, 0.0], 'code_length', registry=self.ds.unit_registry) - self.right_edge = YTArray([file_id+1 - 0.1, self.nfiles, self.nfiles], + self.right_edge = YTArray([file_id+1 - dx, self.nfiles, self.nfiles], 'code_length', registry=self.ds.unit_registry) -def test_particle_regions(): + +class FakeBoxRegion: + def __init__(self, nfiles, left_edge, right_edge): + self.ds = FakeDS() + self.ds.domain_left_edge = YTArray(left_edge, "code_length", + registry=self.ds.unit_registry) + self.ds.domain_right_edge = YTArray(right_edge, "code_length", + registry=self.ds.unit_registry) + self.ds.domain_width = self.ds.domain_right_edge - \ + self.ds.domain_left_edge + self.nfiles = nfiles + + def set_edges(self, center, width): + self.left_edge = self.ds.domain_left_edge + self.ds.domain_width*(center-width/2) + self.right_edge = self.ds.domain_left_edge + self.ds.domain_width*(center+width/2) + + +def FakeBitmap(npart, nfiles, order1, order2, + left_edge=None, right_edge=None, periodicity=None, + decomp='sliced', buff=0.1, distrib='uniform', + fname=None): + if left_edge is None: + left_edge = np.array([0.0, 0.0, 0.0]) + if right_edge is None: + right_edge = np.array([1.0, 1.0, 1.0]) + if periodicity is None: + periodicity = np.array([0, 0, 0], 'bool') + reg = ParticleBitmap(left_edge, right_edge, periodicity, 12345, nfiles, + order1, order2) + # Load from file if it exists + if isinstance(fname,str) and os.path.isfile(fname): + reg.load_bitmasks(fname) + else: + # Create positions for each file + posgen = yield_fake_decomp(decomp, npart, nfiles, + left_edge, right_edge, buff=buff, + distrib=distrib) + # Coarse index + max_npart = 0 + for i, (pos, hsml) in enumerate(posgen): + max_npart = max(max_npart, pos.shape[0]) + reg._coarse_index_data_file(pos, hsml, i) + reg._set_coarse_index_data_file(i) + if i != (nfiles-1): + raise RuntimeError("There are positions for {} files, but there should be {}.".format(i+1,nfiles)) + # Refined index + mask = reg.masks.sum(axis=1).astype('uint8') + sub_mi1 = np.zeros(max_npart, "uint64") + sub_mi2 = np.zeros(max_npart, "uint64") + posgen = yield_fake_decomp(decomp, npart, nfiles, + left_edge, right_edge, buff=buff, + distrib=distrib) + coll = None + for i, (pos, hsml) in enumerate(posgen): + nsub_mi, coll = reg._refined_index_data_file( + coll, pos, hsml, mask, sub_mi1, sub_mi2, i, + 0, count_threshold = 1, mask_threshold = 2) + reg.bitmasks.append(i, coll) + # Save if file name provided + if isinstance(fname, str): + reg.save_bitmasks(fname) + return reg + + +def test_bitmap_no_collisions(): + # Test init for slabs of points in x + left_edge = np.array([0.0, 0.0, 0.0]) + right_edge = np.array([1.0, 1.0, 1.0]) + periodicity = np.array([0, 0, 0], 'bool') + npart = 100 + nfiles = 2 + file_hash = 12345 + order1 = 2 + order2 = 2 + reg = ParticleBitmap(left_edge, right_edge, periodicity, file_hash, nfiles, + order1, order2) + # Coarse index + posgen = yield_fake_decomp('sliced', npart, nfiles, + left_edge, right_edge) + max_npart = 0 + for i, (pos, hsml) in enumerate(posgen): + reg._coarse_index_data_file(pos, hsml, i) + max_npart = max(max_npart, pos.shape[0]) + reg._set_coarse_index_data_file(i) + assert_equal(reg.count_total(i), np.sum(reg.masks[:,i])) + mask = reg.masks.sum(axis=1).astype('uint8') + ncoll = np.sum(mask > 1) + nc, nm = reg.find_collisions_coarse() + assert_equal(nc, 0, "%d coarse collisions" % nc) + assert_equal(ncoll, nc, "%d in mask, %d in bitmap" % (ncoll, nc)) + # Refined index + sub_mi1 = np.zeros(max_npart, "uint64") + sub_mi2 = np.zeros(max_npart, "uint64") + posgen = yield_fake_decomp('sliced', npart, nfiles, + left_edge, right_edge) + coll = None + for i, (pos, hsml) in enumerate(posgen): + nsub_mi, coll = reg._refined_index_data_file( + coll, pos, hsml, mask, sub_mi1, sub_mi2, i, + 0, count_threshold = 1, mask_threshold = 2) + reg.bitmasks.append(i, coll) + assert_equal(reg.count_refined(i), 0) + nr, nm = reg.find_collisions_refined() + assert_equal(nr, 0, "%d collisions" % nr) + +def test_bitmap_collisions(): + # Test init for slabs of points in x + left_edge = np.array([0.0, 0.0, 0.0]) + right_edge = np.array([1.0, 1.0, 1.0]) + periodicity = np.array([0, 0, 0], 'bool') + nfiles = 2 + file_hash = 12345 + order1 = 2 + order2 = 2 + reg = ParticleBitmap(left_edge, right_edge, periodicity, file_hash, nfiles, + order1, order2) + # Use same points for all files to force collisions + pos = cell_centers(order1+order2, left_edge, right_edge) + hsml = None + # Coarse index + max_npart = 0 + for i in range(nfiles): + reg._coarse_index_data_file(pos, hsml, i) + max_npart = max(max_npart, pos.shape[0]) + reg._set_coarse_index_data_file(i) + assert_equal(reg.count_total(i), np.sum(reg.masks[:,i])) + mask = reg.masks.sum(axis=1).astype('uint8') + ncoll = np.sum(mask > 1) + nc, nm = reg.find_collisions_coarse() + assert_equal(ncoll, nc, "%d in mask, %d in bitmap" % (ncoll, nc)) + assert_equal(nc, 2**(3*order1), "%d coarse collisions" % nc) + # Refined index + sub_mi1 = np.zeros(max_npart, "uint64") + sub_mi2 = np.zeros(max_npart, "uint64") + for i in range(nfiles): + nsub_mi, coll = reg._refined_index_data_file( + None, pos, hsml, mask, sub_mi1, sub_mi2, i, + 0, count_threshold = 1, mask_threshold = 2) + reg.bitmasks.append(i, coll) + assert_equal(reg.count_refined(i), ncoll) + nr, nm = reg.find_collisions_refined() + assert_equal(nr, 2**(3*(order1+order2)), "%d collisions" % nr) + + +def test_bitmap_save_load(): + # Test init for slabs of points in x + left_edge = np.array([0.0, 0.0, 0.0]) + right_edge = np.array([1.0, 1.0, 1.0]) + periodicity = np.array([0, 0, 0], 'bool') + npart = NPART + file_hash = 12345 + nfiles = 32 + order1 = 2 + order2 = 2 + fname_fmt = "temp_bitmasks{}.dat" + i = 0 + fname = fname_fmt.format(i) + while os.path.isfile(fname): + i += 1 + fname = fname_fmt.format(i) + # Create bitmap and save to file + reg0 = FakeBitmap(npart, nfiles, order1, order2, + left_edge, right_edge, periodicity) + reg0.save_bitmasks(fname) + # Attempt to load bitmap + reg1 = ParticleBitmap(left_edge, right_edge, periodicity, file_hash, nfiles, + order1, order2) + reg1.load_bitmasks(fname) + assert_true(reg0.iseq_bitmask(reg1)) + # Remove file + os.remove(fname) + + +def test_bitmap_select(): + np.random.seed(int(0x4d3d3d3)) + dx = 0.1 + for periodic in [False, True]: + for nfiles in [2, 15, 31, 32, 33]: + # Now we create particles + # Note: we set order1 to log2(nfiles) here for testing purposes to + # ensure no collisions + order1 = int(np.ceil(np.log2(nfiles))) # Ensures zero collisions + order2 = 2 # No overlap for N = nfiles + exact_division = (nfiles == (1 << order1)) + div = float(nfiles)/float(1 << order1) + reg = FakeBitmap(nfiles**3, nfiles, order1, order2, decomp='grid', + left_edge=np.array([0.0, 0.0, 0.0]), + right_edge=np.array([nfiles, nfiles, nfiles]), + periodicity=np.array([periodic, periodic, periodic])) + # Loop over regions selecting single files + fr = FakeRegion(nfiles, periodic=periodic) + for i in range(nfiles): + fr.set_edges(i, dx) + selector = RegionSelector(fr) + (df, gf), (dmask, gmask) = reg.identify_data_files(selector, ngz=1) + if exact_division: + assert_equal(len(df), 1, "selector {}, number of files".format(i)) + assert_equal(df[0], i, "selector {}, file selected".format(i)) + if periodic and (nfiles != 2): + ans_gf = sorted([(i-1) % nfiles, (i+1) % nfiles]) + elif (i == 0): + ans_gf = [i+1] + elif (i == (nfiles - 1)): + ans_gf = [i-1] + else: + ans_gf = [i-1, i+1] + assert_equal(len(gf), len(ans_gf), "selector {}, number of ghost files".format(i)) + for i in range(len(gf)): + assert_equal(gf[i], ans_gf[i], "selector {}, ghost files".format(i)) + + else: + lf_frac = np.floor(float(fr.left_edge[0])/div)*div + rf_frac = np.floor(float(fr.right_edge[0])/div)*div + # Selected files + lf = int(np.floor(lf_frac) if ((lf_frac % 0.5) == 0) else np.round(lf_frac)) + rf = int(np.floor(rf_frac) if ((rf_frac % 0.5) == 0) else np.round(rf_frac)) + if (rf+0.5) >= (rf_frac+div): rf -= 1 + if (lf+0.5) <= (lf_frac-div): lf += 1 + df_ans = np.arange(max(lf,0),min(rf+1,nfiles)) + assert_array_equal(df, df_ans, "selector {}, file array".format(i)) + # Ghost zones selected files + lf_ghost = int(np.floor(lf_frac - div) if (((lf_frac-div) % 0.5) == 0) else np.round(lf_frac - div)) + rf_ghost = int(np.floor(rf_frac + div) if (((rf_frac+div) % 0.5) == 0) else np.round(rf_frac + div)) + if not periodic: + lf_ghost = max(lf_ghost, 0) + rf_ghost = min(rf_ghost, nfiles-1) + if (rf_ghost+0.5) >= (rf_frac+2*div): rf_ghost -= 1 + gf_ans = [] + if lf_ghost < lf: gf_ans.append(lf_ghost % nfiles) + if rf_ghost > rf: gf_ans.append(rf_ghost % nfiles) + gf_ans = np.array(sorted(gf_ans)) + assert_array_equal(gf, gf_ans, "selector {}, ghost file array".format(i)) + + +def cell_centers(order, left_edge, right_edge): + ndim = left_edge.size + ncells = 2**order + dx = (right_edge - left_edge)/(2*ncells) + d = [np.linspace(left_edge[i]+dx[i], right_edge[i]-dx[i], ncells) for i in range(ndim)] + dd = np.meshgrid(*d) + return np.vstack([x.flatten() for x in dd]).T + +def fake_decomp_random(npart, nfiles, ifile, DLE, DRE, + buff=0.0): + np.random.seed(int(0x4d3d3d3)+ifile) + nPF = int(npart/nfiles) + nR = npart % nfiles + if ifile == 0: + nPF+=nR + pos = np.empty((nPF, 3), 'float64') + for i in range(3): + pos[:,i] = np.random.uniform(DLE[i], DRE[i], nPF) + return pos + +def fake_decomp_sliced(npart, nfiles, ifile, DLE, DRE, + buff=0.0): + np.random.seed(int(0x4d3d3d3)+ifile) + DW = DRE - DLE + div = DW/nfiles + nPF = int(npart/nfiles) + nR = npart % nfiles + inp = nPF + if ifile == 0: inp += nR + iLE = DLE[0] + ifile*div[0] + iRE = iLE + div[0] + if ifile != 0: + iLE -= buff*div[0] + if ifile != (nfiles-1): + iRE += buff*div[0] + pos = np.empty((inp,3), dtype='float') + pos[:,0] = np.random.uniform(iLE, iRE, inp) + for i in range(1,3): + pos[:,i] = np.random.uniform(DLE[i], DRE[i], inp) + return pos + +def makeall_decomp_hilbert_gaussian(npart, nfiles, DLE, DRE, + buff=0.0, order=6, verbose=False, + fname_base=None, nchunk=10, + width=None, center=None, + frac_random=0.1): + import pickle np.random.seed(int(0x4d3d3d3)) - # We are going to test having 31, 127, 128 and 257 data files. - for nfiles in [2, 31, 127, 128, 129]: - # Now we create particles - # Note: we set N to nfiles here for testing purposes. Inside the code - # we set it to min(N, 256) - N = nfiles - reg = ParticleRegions([0.0, 0.0, 0.0, 0.0], - [nfiles, nfiles, nfiles], - [N, N, N], nfiles) - Y, Z = np.mgrid[0.1 : nfiles - 0.1 : nfiles * 1j, - 0.1 : nfiles - 0.1 : nfiles * 1j] - X = 0.5 * np.ones(Y.shape, dtype="float64") - pos = np.array([X.ravel(),Y.ravel(),Z.ravel()], - dtype="float64").transpose() - for i in range(nfiles): - reg.add_data_file(pos, i) - pos[:,0] += 1.0 - pos[:,0] = 0.5 - fr = FakeRegion(nfiles) - for i in range(nfiles): - fr.set_edges(i) - selector = RegionSelector(fr) - df = reg.identify_data_files(selector) - assert_equal(len(df), 1) - assert_equal(df[0], i) - pos[:,0] += 1.0 - - for mask in reg.masks: - maxs = np.unique(mask.max(axis=-1).max(axis=-1)) - mins = np.unique(mask.min(axis=-1).min(axis=-1)) - assert_equal(maxs, mins) - assert_equal(maxs, np.unique(mask)) - -def test_position_location(): + DW = DRE - DLE + if fname_base is None: + fname_base = 'hilbert{}_gaussian_np{}_nf{}_'.format(order,npart,nfiles) + if width is None: + width = 0.1*DW + if center is None: + center = DLE+0.5*DW + def load_pos(file_id): + filename = fname_base+'file{}'.format(file_id) + if os.path.isfile(filename): + fd = open(filename,'rb') + positions = pickle.load(fd) + fd.close() + else: + positions = np.empty((0,3), dtype='float64') + return positions + def save_pos(file_id,positions): + filename = fname_base+'file{}'.format(file_id) + fd = open(filename,'wb') + pickle.dump(positions,fd) + fd.close() + npart_rnd = int(frac_random*npart) + npart_gau = npart - npart_rnd + dim_hilbert = (1<= all_octs[oi]["left_edge"])) - assert(np.all(this_oct <= all_octs[oi]["right_edge"])) - -os33 = "snapshot_033/snap_033.0.hdf5" -@requires_file(os33) -def test_get_smallest_dx(): - ds = yt.load(os33) - small_dx = ( - ds.domain_width / (ds.domain_dimensions*2.**(ds.index.max_level))) - assert_equal(ds.index.get_smallest_dx(), small_dx) + DW = DRE - DLE + dim_hilbert = (1<>> keywords['dpi'] = (50, 100, 200) >>> keywords['cmap'] = ('arbre', 'kelp') >>> list_of_kwargs = expand_keywords(keywords) - >>> print list_of_kwargs + >>> print(list_of_kwargs) array([{'cmap': 'arbre', 'dpi': 50}, {'cmap': 'kelp', 'dpi': 100}, {'cmap': 'arbre', 'dpi': 200}], dtype=object) >>> list_of_kwargs = expand_keywords(keywords, full=True) - >>> print list_of_kwargs + >>> print(list_of_kwargs) array([{'cmap': 'arbre', 'dpi': 50}, {'cmap': 'arbre', 'dpi': 100}, @@ -605,7 +667,7 @@ def expand_keywords(keywords, full=False): # Determine the maximum number of values any of the keywords has num_lists = 0 for val in keywords.values(): - if isinstance(val, string_types): + if isinstance(val, str): num_lists = max(1.0, num_lists) else: num_lists = max(len(val), num_lists) @@ -622,7 +684,7 @@ def expand_keywords(keywords, full=False): list_of_kwarg_dicts[i] = {} for key in keywords.keys(): # if it's a string, use it (there's only one) - if isinstance(keywords[key], string_types): + if isinstance(keywords[key], str): list_of_kwarg_dicts[i][key] = keywords[key] # if there are more options, use the i'th val elif i < len(keywords[key]): @@ -976,7 +1038,7 @@ def _func(*args, **kwargs): ha = hashlib.md5(_rv.tostring()).hexdigest() fn = "func_results_ref_%s.cpkl" % (name) with open(fn, "wb") as f: - cPickle.dump( (mi, ma, st, su, si, ha), f) + pickle.dump( (mi, ma, st, su, si, ha), f) return rv return _func from yt.mods import unparsed_args @@ -1004,7 +1066,7 @@ def _func(*args, **kwargs): print("Answers need to be created with --answer-reference .") return False with open(fn, "rb") as f: - ref = cPickle.load(f) + ref = pickle.load(f) print("Sizes: %s (%s, %s)" % (vals[4] == ref[4], vals[4], ref[4])) assert_allclose(vals[0], ref[0], 1e-8, err_msg="min") assert_allclose(vals[1], ref[1], 1e-8, err_msg="max") @@ -1103,7 +1165,7 @@ def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): try: des = des.in_units(act.units) - except YTUnitOperationError: + except UnitOperationError: raise AssertionError("Units of actual (%s) and desired (%s) do not have " "equivalent dimensions" % (act.units, des.units)) @@ -1117,7 +1179,7 @@ def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): try: at = at.in_units(act.units) - except YTUnitOperationError: + except UnitOperationError: raise AssertionError("Units of atol (%s) and actual (%s) do not have " "equivalent dimensions" % (at.units, act.units)) @@ -1216,3 +1278,112 @@ def setUp(self): def tearDown(self): os.chdir(self.curdir) shutil.rmtree(self.tmpdir) + +class ParticleSelectionComparison: + """ + This is a test helper class that takes a particle dataset, caches the + particles it has on disk (manually reading them using lower-level IO + routines) and then received a data object that it compares against manually + running the data object's selection routines. All supplied data objects + must be created from the input dataset. + """ + + def __init__(self, ds): + self.ds = ds + # Construct an index so that we get all the data_files + ds.index + particles = {} + # hsml is the smoothing length we use for radial selection + hsml = {} + for data_file in ds.index.data_files: + for ptype, pos_arr in ds.index.io._yield_coordinates(data_file): + particles.setdefault(ptype, []).append(pos_arr) + if ptype in getattr(ds, '_sph_ptypes', ()): + hsml.setdefault(ptype, []).append(ds.index.io._get_smoothing_length( + data_file, pos_arr.dtype, pos_arr.shape)) + for ptype in particles: + particles[ptype] = np.concatenate(particles[ptype]) + if ptype in hsml: + hsml[ptype] = np.concatenate(hsml[ptype]) + self.particles = particles + self.hsml = hsml + + def compare_dobj_selection(self, dobj): + for ptype in sorted(self.particles): + x, y, z = self.particles[ptype].T + # Set our radii to zero for now, I guess? + radii = self.hsml.get(ptype, 0.0) + sel_index = dobj.selector.select_points(x, y, z, radii) + if sel_index is None: + sel_pos = np.empty((0, 3)) + else: + sel_pos = self.particles[ptype][sel_index, :] + + obj_results = [] + for chunk in dobj.chunks([], "io"): + obj_results.append(chunk[ptype, "particle_position"]) + if any(_.size > 0 for _ in obj_results): + obj_results = np.concatenate(obj_results, axis = 0) + else: + obj_results = np.empty((0, 3)) + assert_equal(sel_pos, obj_results) + + def run_defaults(self): + """ + This runs lots of samples that touch different types of wraparounds. + + Specifically, it does: + + * sphere in center with radius 0.1 unitary + * sphere in center with radius 0.2 unitary + * sphere in each of the eight corners of the domain with radius 0.1 unitary + * sphere in center with radius 0.5 unitary + * box that covers 0.1 .. 0.9 + * box from 0.8 .. 0.85 + * box from 0.3..0.6, 0.2..0.8, 0.0..0.1 + """ + sp1 = self.ds.sphere("c", (0.1, "unitary")) + self.compare_dobj_selection(sp1) + + sp2 = self.ds.sphere("c", (0.2, "unitary")) + self.compare_dobj_selection(sp2) + + centers = [[0.04, 0.5, 0.5], + [0.5, 0.04, 0.5], + [0.5, 0.5, 0.04], + [0.04, 0.04, 0.04], + [0.96, 0.5, 0.5], + [0.5, 0.96, 0.5], + [0.5, 0.5, 0.96], + [0.96, 0.96, 0.96]] + r = self.ds.quan(0.1, "unitary") + for center in centers: + c = self.ds.arr(center, "unitary") + if not all(self.ds.periodicity): + # filter out the periodic bits for non-periodic datasets + if any(c - r < self.ds.domain_left_edge) or \ + any(c + r > self.ds.domain_right_edge): + continue + sp = self.ds.sphere(c, (0.1, "unitary")) + self.compare_dobj_selection(sp) + + sp = self.ds.sphere("c", (0.5, "unitary")) + self.compare_dobj_selection(sp) + + dd = self.ds.all_data() + self.compare_dobj_selection(dd) + + reg1 = self.ds.r[ (0.1, 'unitary'):(0.9, 'unitary'), + (0.1, 'unitary'):(0.9, 'unitary'), + (0.1, 'unitary'):(0.9, 'unitary')] + self.compare_dobj_selection(reg1) + + reg2 = self.ds.r[ (0.8, 'unitary'):(0.85, 'unitary'), + (0.8, 'unitary'):(0.85, 'unitary'), + (0.8, 'unitary'):(0.85, 'unitary')] + self.compare_dobj_selection(reg2) + + reg3 = self.ds.r[ (0.3, 'unitary'):(0.6, 'unitary'), + (0.2, 'unitary'):(0.8, 'unitary'), + (0.0, 'unitary'):(0.1, 'unitary')] + self.compare_dobj_selection(reg3) diff --git a/yt/tests/test_funcs.py b/yt/tests/test_funcs.py index acec7da21c4..64300822c4c 100644 --- a/yt/tests/test_funcs.py +++ b/yt/tests/test_funcs.py @@ -1,13 +1,3 @@ -""" -Tests for yt.funcs -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2018, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from nose.tools import assert_raises from yt import YTQuantity diff --git a/yt/tests/test_testing.py b/yt/tests/test_testing.py index b2b4ad0edf1..aeab7f5ddc8 100644 --- a/yt/tests/test_testing.py +++ b/yt/tests/test_testing.py @@ -1,13 +1,3 @@ -""" -Tests for yt.testing -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2018, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import matplotlib import numpy as np import pytest diff --git a/yt/units/__init__.py b/yt/units/__init__.py index 2173bb597ea..de967690a72 100644 --- a/yt/units/__init__.py +++ b/yt/units/__init__.py @@ -1,17 +1,114 @@ -from yt.units import unit_symbols -from yt.utilities import physical_constants +from yt.units.physical_constants import * +from yt.units.unit_symbols import * +from yt.utilities.exceptions import YTArrayTooLargeToDisplay +from unyt.array import ( + loadtxt, + savetxt, + uconcatenate, + ucross, + udot, + uhstack, + uintersect1d, + unorm, + ustack, + uunion1d, + uvstack, + unyt_array, + unyt_quantity, +) +from unyt.unit_object import Unit, define_unit # NOQA: F401 +from unyt.unit_registry import UnitRegistry # NOQA: Ffg401 +from unyt.unit_systems import UnitSystem # NOQA: F401 -from yt.units.yt_array import YTQuantity +YTArray = unyt_array +YTQuantity = unyt_quantity -# function to only import quantities into this namespace -# we go through the trouble of doing this instead of "import *" -# to avoid including extraneous variables (e.g. floating point -# constants used to *construct* a physical constant) in this namespace -def import_quantities(module, global_namespace): - for key, value in module.__dict__.items(): - if isinstance(value, YTQuantity): - global_namespace[key] = value +from yt.units.unit_symbols import _SymbolContainer +from yt.units.physical_constants import _ConstantContainer + +class UnitContainer(object): + """A container for units and constants to associate with a dataset + + This object is usually accessed on a Dataset instance via ``ds.units``. + + Parameters + ---------- + registry : UnitRegistry instance + A unit registry to associate with units and constants accessed + on this object. + + Example + ------- + + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> code_mass = ds.units.code_mass + >>> (12*code_mass).to("Msun") + unyt_quantity(4.89719136e+11, 'Msun') + >>> code_mass.registry is ds.unit_registry + True + >>> ds.units.newtons_constant + unyt_quantity(6.67384e-08, 'cm**3/(g*s**2)') + + """ + def __init__(self, registry): + self.unit_symbols = _SymbolContainer(registry) + self.physical_constants = _ConstantContainer(registry) + + def __dir__(self): + all_dir = self.unit_symbols.__dir__() + self.physical_constants.__dir__() + all_dir += object.__dir__(self) + return list(set(all_dir)) + + def __getattr__(self, item): + pc = self.physical_constants + us = self.unit_symbols + ret = getattr(us, item, None) or getattr(pc, item, None) + if not ret: + raise AttributeError(item) + return ret + +def display_ytarray(arr): + r""" + Display a YTArray in a Jupyter widget that enables unit switching. + + The array returned by this function is read-only, and only works with + arrays of size 3 or lower. + + Parameters + ---------- + arr : YTArray + The Array to display; must be of size 3 or lower. + + Examples + -------- + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> display_ytarray(ds.domain_width) + """ + if arr.size > 3: + raise YTArrayTooLargeToDisplay(arr.size, 3) + import ipywidgets + unit_registry = arr.units.registry + equiv = unit_registry.list_same_dimensions(arr.units) + dropdown = ipywidgets.Dropdown(options = sorted(equiv), value = str(arr.units)) + def arr_updater(arr, texts): + def _value_updater(change): + arr2 = arr.in_units(change['new']) + if arr2.shape == (): + arr2 = [arr2] + for v, t in zip(arr2, texts): + t.value = str(v.value) + return _value_updater + if arr.shape == (): + arr_iter = [arr] + else: + arr_iter = arr + texts = [ipywidgets.Text(value = str(_.value), disabled = True) + for _ in arr_iter] + dropdown.observe(arr_updater(arr, texts), names="value") + return ipywidgets.HBox(texts + [dropdown]) + +def _wrap_display_ytarray(arr): + from IPython.core.display import display + display(display_ytarray(arr)) -import_quantities(unit_symbols, globals()) -import_quantities(physical_constants, globals()) diff --git a/yt/units/dimensions.py b/yt/units/dimensions.py index a041c917c83..28784640f5f 100644 --- a/yt/units/dimensions.py +++ b/yt/units/dimensions.py @@ -1,100 +1 @@ -""" -Base dimensions - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from sympy import Symbol, sympify, Rational - -mass = Symbol("(mass)", positive=True) -length = Symbol("(length)", positive=True) -time = Symbol("(time)", positive=True) -temperature = Symbol("(temperature)", positive=True) -angle = Symbol("(angle)", positive=True) -current_mks = Symbol("(current_mks)", positive=True) -dimensionless = sympify(1) - -base_dimensions = [mass, length, time, temperature, angle, current_mks, - dimensionless] - -# -# Derived dimensions -# - -rate = 1 / time -frequency = rate - -solid_angle = angle * angle - -velocity = length / time -acceleration = length / time**2 -jerk = length / time**3 -snap = length / time**4 -crackle = length / time**5 -pop = length / time**6 - -area = length * length -volume = area * length -momentum = mass * velocity -force = mass * acceleration -pressure = force / area -energy = force * length -power = energy / time -flux = power / area -specific_flux = flux / rate -number_density = 1/(length*length*length) -density = mass * number_density -angular_momentum = mass*length*velocity -specific_angular_momentum = angular_momentum / mass -specific_energy = energy / mass -count_flux = 1 / (area*time) -count_intensity = count_flux / solid_angle - -# Gaussian electromagnetic units -charge_cgs = (energy * length)**Rational(1, 2) # proper 1/2 power -current_cgs = charge_cgs / time -electric_field_cgs = charge_cgs / length**2 -magnetic_field_cgs = electric_field_cgs -electric_potential_cgs = energy / charge_cgs -resistance_cgs = electric_potential_cgs / current_cgs - -# SI electromagnetic units -charge_mks = current_mks * time -electric_field_mks = force / charge_mks -magnetic_field_mks = electric_field_mks / velocity -electric_potential_mks = energy / charge_mks -resistance_mks = electric_potential_mks / current_mks - -# Since cgs is our default, I'm adding these aliases for backwards-compatibility -charge = charge_cgs -electric_field = electric_field_cgs -magnetic_field = magnetic_field_cgs -electric_potential = electric_potential_cgs -resistance = resistance_cgs -current = current_cgs - -derived_dimensions = [rate, velocity, acceleration, jerk, snap, crackle, pop, - momentum, force, energy, power, charge_cgs, electric_field_cgs, - magnetic_field_cgs, solid_angle, flux, specific_flux, volume, - area, current_cgs, charge_mks, electric_field_mks, - magnetic_field_mks, electric_potential_cgs, electric_potential_mks, - resistance_cgs, resistance_mks] - -dimensions = base_dimensions + derived_dimensions - -em_dimensions = {magnetic_field_mks:magnetic_field_cgs, - charge_mks:charge_cgs, - current_mks:current_cgs, - electric_potential_mks:electric_potential_cgs, - resistance_mks:resistance_cgs} - -for k,v in list(em_dimensions.items()): - em_dimensions[v] = k +from unyt.dimensions import * diff --git a/yt/units/equivalencies.py b/yt/units/equivalencies.py index f992c80f4b6..94ef2c96de6 100644 --- a/yt/units/equivalencies.py +++ b/yt/units/equivalencies.py @@ -1,217 +1 @@ -""" -Equivalencies between different kinds of units - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.units.dimensions import temperature, mass, energy, length, rate, \ - velocity, dimensionless, density, number_density, flux, current_cgs, \ - current_mks, charge_cgs, charge_mks, magnetic_field_cgs, magnetic_field_mks, \ - electric_potential_cgs, electric_potential_mks, electric_field_cgs, \ - electric_field_mks, resistance_cgs, resistance_mks - -from yt.utilities.physical_ratios import speed_of_light_cm_per_s -from yt.extern.six import add_metaclass -import numpy as np - -equivalence_registry = {} - -class RegisteredEquivalence(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - if hasattr(cls, "_type_name") and not cls._skip_add: - equivalence_registry[cls._type_name] = cls - -@add_metaclass(RegisteredEquivalence) -class Equivalence(object): - _skip_add = False - _one_way = False - def __init__(self): - import yt.utilities.physical_constants as pc - self.pc = pc - -class NumberDensityEquivalence(Equivalence): - _type_name = "number_density" - dims = (density,number_density,) - - def convert(self, x, new_dims, mu=0.6): - if new_dims == number_density: - return x/(mu*self.pc.mh) - elif new_dims == density: - return x*mu*self.pc.mh - - def __str__(self): - return "number density: density <-> number density" - -class ThermalEquivalence(Equivalence): - _type_name = "thermal" - dims = (temperature,energy,) - - def convert(self, x, new_dims): - if new_dims == energy: - return self.pc.kboltz*x - elif new_dims == temperature: - return x/self.pc.kboltz - - def __str__(self): - return "thermal: temperature <-> energy" - -class MassEnergyEquivalence(Equivalence): - _type_name = "mass_energy" - dims = (mass,energy,) - - def convert(self, x, new_dims): - if new_dims == energy: - return x*self.pc.clight*self.pc.clight - elif new_dims == mass: - return x/(self.pc.clight*self.pc.clight) - - def __str__(self): - return "mass_energy: mass <-> energy" - -class SpectralEquivalence(Equivalence): - _type_name = "spectral" - dims = (length,rate,energy,) - - def convert(self, x, new_dims): - if new_dims == energy: - if x.units.dimensions == length: - nu = self.pc.clight/x - elif x.units.dimensions == rate: - nu = x - return self.pc.hcgs*nu - elif new_dims == length: - if x.units.dimensions == rate: - return self.pc.clight/x - elif x.units.dimensions == energy: - return self.pc.hcgs*self.pc.clight/x - elif new_dims == rate: - if x.units.dimensions == length: - return self.pc.clight/x - elif x.units.dimensions == energy: - return x/self.pc.hcgs - - def __str__(self): - return "spectral: length <-> rate <-> energy" - -class SoundSpeedEquivalence(Equivalence): - _type_name = "sound_speed" - dims = (velocity,temperature,energy,) - - def convert(self, x, new_dims, mu=0.6, gamma=5./3.): - if new_dims == velocity: - if x.units.dimensions == temperature: - kT = self.pc.kboltz*x - elif x.units.dimensions == energy: - kT = x - return np.sqrt(gamma*kT/(mu*self.pc.mh)) - else: - kT = x*x*mu*self.pc.mh/gamma - if new_dims == temperature: - return kT/self.pc.kboltz - else: - return kT - - def __str__(self): - return "sound_speed (ideal gas): velocity <-> temperature <-> energy" - -class LorentzEquivalence(Equivalence): - _type_name = "lorentz" - dims = (dimensionless,velocity,) - - def convert(self, x, new_dims): - if new_dims == dimensionless: - beta = x.in_cgs()/self.pc.clight - return 1./np.sqrt(1.-beta**2) - elif new_dims == velocity: - return self.pc.clight*np.sqrt(1.-1./(x*x)) - - def __str__(self): - return "lorentz: velocity <-> dimensionless" - -class SchwarzschildEquivalence(Equivalence): - _type_name = "schwarzschild" - dims = (mass,length,) - - def convert(self, x, new_dims): - if new_dims == length: - return 2.*self.pc.G*x/(self.pc.clight*self.pc.clight) - elif new_dims == mass: - return 0.5*x*self.pc.clight*self.pc.clight/self.pc.G - - def __str__(self): - return "schwarzschild: mass <-> length" - -class ComptonEquivalence(Equivalence): - _type_name = "compton" - dims = (mass,length,) - - def convert(self, x, new_dims): - return self.pc.hcgs/(x*self.pc.clight) - - def __str__(self): - return "compton: mass <-> length" - -class EffectiveTemperature(Equivalence): - _type_name = "effective_temperature" - dims = (flux,temperature,) - - def convert(self, x, new_dims): - if new_dims == flux: - return self.pc.stefan_boltzmann_constant_cgs*x**4 - elif new_dims == temperature: - return (x/self.pc.stefan_boltzmann_constant_cgs)**0.25 - - def __str__(self): - return "effective_temperature: flux <-> temperature" - -em_conversions = { - charge_mks:("esu", 0.1*speed_of_light_cm_per_s), - magnetic_field_mks:("gauss", 1.0e4), - current_mks:("statA", 0.1*speed_of_light_cm_per_s), - electric_potential_mks:("statV", 1.0e-8*speed_of_light_cm_per_s), - resistance_mks:("statohm", 1.0e9/(speed_of_light_cm_per_s**2)), - charge_cgs:("C", 10.0/speed_of_light_cm_per_s), - magnetic_field_cgs:("T", 1.0e-4), - current_cgs:("A", 10.0/speed_of_light_cm_per_s), - electric_potential_cgs:("V", 1.0e8/speed_of_light_cm_per_s), - resistance_cgs:("ohm", speed_of_light_cm_per_s**2*1.0e-9), -} - -class ElectromagneticSI(Equivalence): - _type_name = "SI" - _one_way = True - dims = (current_cgs, charge_cgs, magnetic_field_cgs, - electric_field_cgs, electric_potential_cgs, - resistance_cgs) - - def convert(self, x, new_dims): - old_dims = x.units.dimensions - new_units, convert_factor = em_conversions[old_dims] - return x.in_cgs().v*convert_factor, new_units - - def __str__(self): - return "SI: EM CGS unit -> EM SI unit" - -class ElectromagneticCGS(Equivalence): - _type_name = "CGS" - _one_way = True - dims = (current_mks, charge_mks, magnetic_field_mks, - electric_field_mks, electric_potential_mks, - resistance_mks) - - def convert(self, x, new_dims): - old_dims = x.units.dimensions - new_units, convert_factor = em_conversions[old_dims] - return x.in_mks().v*convert_factor, new_units - - def __str__(self): - return "CGS: EM SI unit -> EM CGS unit" - +from unyt.equivalencies import * diff --git a/yt/units/physical_constants.py b/yt/units/physical_constants.py new file mode 100644 index 00000000000..51a1cff4860 --- /dev/null +++ b/yt/units/physical_constants.py @@ -0,0 +1,46 @@ +from yt.units.unit_registry import default_unit_registry +from unyt.array import unyt_quantity +from unyt.unit_systems import add_constants + +add_constants(globals(), registry=default_unit_registry) + +class _ConstantContainer(object): + """A container for physical constants to associate with a dataset. + + This object is usually accessed on a Dataset instance via + ``ds.units.physical_constants``. + + Parameters + ---------- + registry : UnitRegistry instance + A unit registry to associate with units constants accessed on + this object. + + Example + ------- + + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> ds.units.physical_constants.newtons_constant + unyt_quantity(6.67384e-08, 'cm**3/(g*s**2)') + """ + + def __init__(self, registry): + self._registry = registry + self._cache = {} + + def __dir__(self): + ret = [p for p in globals() if not p.startswith('_')] + object.__dir__(self) + return list(set(ret)) + + def __getattr__(self, item): + if item in self._cache: + return self._cache[item] + if item in globals(): + const = globals()[item].copy() + const.units.registry = self._registry + const.convert_to_base(self._registry.unit_system) + const_v, const_unit = const.v, const.units + ret = unyt_quantity(const_v, const_unit, registry=self._registry) + self._cache[item] = ret + return ret + raise AttributeError(item) diff --git a/yt/units/pint_conversions.py b/yt/units/pint_conversions.py deleted file mode 100644 index 68856b892ee..00000000000 --- a/yt/units/pint_conversions.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Stuff for pint conversions - -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -pint_aliases = { - "meter": "m", - "second": "s", - "gram": "g", - "joule": "J", - "franklin": "esu", - "dyne": "dyn", - "parsec": "pc", - "mole": "mol", - "rankine": "R", - "watt": "W", - "pascal": "Pa", - "tesla": "T", - "kelvin": "K", - "year": "yr", - "minute": "min", - "hour": "hr", - "volt": "V", - "ampere": "A", - "foot": "ft", - "coulomb": "C", - "newton": "N", - "hertz": "Hz", - "arcsecond": "arcsec", - "arcminute": "arcmin", - "speed_of_light": "c", - "esu_per_second": "statA", - "atomic_mass_unit": "amu", - "astronomical_unit": "au", - "light_year": "ly", - "electron_mass": "me", - "proton_mass": "mp", -} - -pint_prefixes = { - 'yotta':'Y', - 'zetta':'Z', - 'exa':'E', - 'peta':'P', - 'tera':'T', - 'giga':'G', - 'mega':'M', - 'kilo':'k', - 'deci':'d', - 'centi':'c', - 'milli':'m', - 'micro':'u', - 'nano':'n', - 'pico':'p', - 'femto':'f', - 'atto':'a', - 'zepto':'z', - 'yocto':'y', -} - -def convert_pint_units(unit_expr): - uexpr = unit_expr - pfx = '' - for prefix in pint_prefixes: - if unit_expr.startswith(prefix): - pfx = pint_prefixes[prefix] - uexpr = uexpr[len(prefix):] - break - if uexpr in pint_aliases: - uexpr = pint_aliases[uexpr] - if pfx == '': - return uexpr - else: - return pfx+uexpr - # If we can't figure it out just pass it and see - # what happens - return unit_expr diff --git a/yt/units/tests/test_define_unit.py b/yt/units/tests/test_define_unit.py deleted file mode 100644 index 22c7d6ebfc3..00000000000 --- a/yt/units/tests/test_define_unit.py +++ /dev/null @@ -1,25 +0,0 @@ -from yt.units.unit_object import define_unit -from yt.units.yt_array import YTQuantity -from yt.convenience import load -from yt.testing import requires_file - -def test_define_unit(): - define_unit("mph", (1.0, "mile/hr")) - a = YTQuantity(2.0, "mph") - b = YTQuantity(1.0, "mile") - c = YTQuantity(1.0, "hr") - assert a == 2.0*b/c - d = YTQuantity(1000.0, "cm**3") - define_unit("L", d, prefixable=True) - e = YTQuantity(1.0, "mL") - f = YTQuantity(1.0, "cm**3") - assert e == f - -gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300" -@requires_file(gslr) -def test_define_unit_dataset(): - ds = load(gslr) - ds.define_unit("fortnight", (14.0, "day")) - a = ds.quan(1.0, "fortnight") - b = ds.quan(3600.0*24.0*14.0, "code_time") - assert a == b diff --git a/yt/units/tests/test_magnetic_code_units.py b/yt/units/tests/test_magnetic_code_units.py new file mode 100644 index 00000000000..b335a35951b --- /dev/null +++ b/yt/units/tests/test_magnetic_code_units.py @@ -0,0 +1,49 @@ +import numpy as np +from yt.frontends.stream.api import load_uniform_grid +from yt.testing import assert_allclose + +def test_magnetic_code_units(): + + sqrt4pi = np.sqrt(4.0*np.pi) + ddims = (16,)*3 + data = {"density": (np.random.uniform(size=ddims), "g/cm**3")} + + ds1 = load_uniform_grid(data, ddims, magnetic_unit=(sqrt4pi, "gauss"), + unit_system='cgs') + + assert_allclose(ds1.magnetic_unit.value, sqrt4pi) + assert str(ds1.magnetic_unit.units) == "G" + + mucu = ds1.magnetic_unit.to("code_magnetic") + assert_allclose(mucu.value, 1.0) + assert str(mucu.units) == "code_magnetic" + + ds2 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), + unit_system='cgs') + + assert_allclose(ds2.magnetic_unit.value, 10000.) + assert str(ds2.magnetic_unit.units) == "G" + + mucu = ds2.magnetic_unit.to("code_magnetic") + assert_allclose(mucu.value, 1.0) + assert str(mucu.units) == "code_magnetic" + + ds3 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "T"), + unit_system='mks') + + assert_allclose(ds3.magnetic_unit.value, 1.0) + assert str(ds3.magnetic_unit.units) == "T" + + mucu = ds3.magnetic_unit.to("code_magnetic") + assert_allclose(mucu.value, 1.0) + assert str(mucu.units) == "code_magnetic" + + ds4 = load_uniform_grid(data, ddims, magnetic_unit=(1.0, "gauss"), + unit_system='mks') + + assert_allclose(ds4.magnetic_unit.value, 1.0e-4) + assert str(ds4.magnetic_unit.units) == "T" + + mucu = ds4.magnetic_unit.to("code_magnetic") + assert_allclose(mucu.value, 1.0) + assert str(mucu.units) == "code_magnetic" diff --git a/yt/units/tests/test_unit_systems.py b/yt/units/tests/test_unit_systems.py deleted file mode 100644 index ddf73b4078a..00000000000 --- a/yt/units/tests/test_unit_systems.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -Test unit systems. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.units.unit_object import Unit, unit_system_registry -from yt.units.unit_systems import UnitSystem -from yt.units import dimensions -from yt.convenience import load -from yt.testing import assert_almost_equal, assert_allclose, requires_file, \ - fake_random_ds, disable_dataset_cache - -def test_unit_systems(): - goofy_unit_system = UnitSystem("goofy", "ly", "lbm", "hr", temperature_unit="R", - angle_unit="arcsec", current_mks_unit="mA") - assert goofy_unit_system["temperature"] == Unit("R") - assert goofy_unit_system[dimensions.solid_angle] == Unit("arcsec**2") - assert goofy_unit_system["energy"] == Unit("lbm*ly**2/hr**2") - goofy_unit_system["energy"] = "eV" - assert goofy_unit_system["energy"] == Unit("eV") - assert goofy_unit_system["magnetic_field_mks"] == Unit("lbm/(hr**2*mA)") - assert "goofy" in unit_system_registry - -test_units = {} -test_units["mks"] = {"density": "kg/m**3", - "kinetic_energy": "Pa", - "velocity_magnitude": "m/s", - "velocity_divergence": "1/s", - "density_gradient_x": "kg/m**4"} -test_units["imperial"] = {"density": "lbm/ft**3", - "kinetic_energy": "lbf/ft**2", - "velocity_magnitude": "ft/s", - "velocity_divergence": "1/s", - "density_gradient_x": "lbm/ft**4"} -test_units["galactic"] = {"density": "Msun/kpc**3", - "kinetic_energy": "Msun/(Myr**2*kpc)", - "velocity_magnitude": "kpc/Myr", - "velocity_divergence": "1/Myr", - "density_gradient_x": "Msun/kpc**4"} -test_units["code"] = {"density": "code_mass/code_length**3", - "kinetic_energy": "code_pressure", - "velocity_magnitude": "code_velocity", - "velocity_divergence": "code_velocity/code_length", - "density_gradient_x": "code_mass/code_length**4"} - -test_fields = ["density", - "kinetic_energy", - "velocity_divergence", - "density_gradient_x", - "velocity_magnitude"] - -gslr = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300" -@requires_file(gslr) -@disable_dataset_cache -def test_fields_diff_systems_sloshing(): - ds_cgs = load(gslr) - dd_cgs = ds_cgs.sphere("c", (15., "kpc")) - - for us in test_units: - ds = load(gslr, unit_system=us) - dd = ds.sphere("c", (15.,"kpc")) - for field in test_fields: - v1 = dd_cgs[field].in_base(us) - v2 = dd[field] - assert_almost_equal(v1.v, v2.v) - assert str(v2.units) == test_units[us][field] - -etc = "enzo_tiny_cosmology/DD0046/DD0046" -@requires_file(etc) -@disable_dataset_cache -def test_fields_diff_systems_etc(): - ds_cgs = load(etc) - dd_cgs = ds_cgs.sphere("max", (500., "kpc")) - - for us in test_units: - ds = load(etc, unit_system=us) - dd = ds.sphere("max", (500., "kpc")) - for field in test_fields: - if us == "code": - v1 = dd_cgs[field].in_units(test_units["code"][field]) - else: - v1 = dd_cgs[field].in_base(us) - v2 = dd[field] - assert_almost_equal(v1.v, v2.v) - assert str(v2.units) == test_units[us][field] - -wdm = 'WDMerger_hdf5_chk_1000/WDMerger_hdf5_chk_1000.hdf5' -@requires_file(wdm) -@disable_dataset_cache -def test_tesla_magnetic_unit(): - for us in ['cgs', 'mks', 'code']: - ds = load(wdm, unit_system=us, - units_override={'magnetic_unit': (1.0, 'T')}) - ad = ds.all_data() - dens = ad['density'] - magx = ad['magx'] - magnetic_field_x = ad['magnetic_field_r'] - - if us == 'cgs': - assert str(dens.units) == 'g/cm**3' - assert str(magx.units) == 'code_magnetic' - assert magx.uq == ds.quan(1e4, 'G') - assert str(magnetic_field_x.units) == 'gauss' - assert_allclose(magx.value, magnetic_field_x.value/1e4) - assert_allclose( - magnetic_field_x.to_equivalent('T', 'SI').value, - magnetic_field_x.value/1e4) - - if us == 'mks': - assert str(dens.units) == 'kg/m**3' - assert str(magx.units) == 'code_magnetic' - assert magx.uq == ds.quan(1, 'T') - assert str(magnetic_field_x.units) == 'T' - assert_allclose(magx.value, magnetic_field_x.value) - assert_allclose(magnetic_field_x.to_equivalent('G', 'CGS').value, - magnetic_field_x.value*1e4) - - if us == 'code': - assert str(dens.units) == 'code_mass/code_length**3' - assert str(magx.units) == 'code_magnetic' - assert magx.uq == ds.quan(1, 'T') - assert str(magnetic_field_x.units) == 'code_magnetic' - assert_allclose(magx.value, magnetic_field_x.value) - assert_allclose(magnetic_field_x.to_equivalent('G', 'CGS').value, - magnetic_field_x.value*1e4) - -def test_code_unit_system_uniqueness(): - ds1 = fake_random_ds(64) - ds2 = fake_random_ds(64, length_unit=2.0) - ds3 = fake_random_ds(64) - - assert ds1.unit_registry.unit_system_id != ds2.unit_registry.unit_system_id - assert ds1.unit_registry.unit_system_id == ds3.unit_registry.unit_system_id - - assert ds1.unit_registry.unit_system_id in unit_system_registry.keys() - assert ds2.unit_registry.unit_system_id in unit_system_registry.keys() diff --git a/yt/units/tests/test_units.py b/yt/units/tests/test_units.py deleted file mode 100644 index 255aa5803d9..00000000000 --- a/yt/units/tests/test_units.py +++ /dev/null @@ -1,537 +0,0 @@ -""" -Test symbolic unit handling. - - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import numpy as np -from numpy.testing import \ - assert_array_almost_equal_nulp, \ - assert_raises, assert_equal -from nose.tools import assert_true -import operator -from sympy import Symbol -from yt.testing import \ - fake_random_ds, assert_allclose_units, \ - assert_almost_equal -from yt.units.unit_registry import UnitRegistry -from yt.units import electrostatic_unit, elementary_charge -from yt.units.unit_object import default_unit_registry - -# dimensions -from yt.units.dimensions import \ - mass, length, time, temperature, energy, magnetic_field, power, rate -# functions -from yt.units.unit_object import get_conversion_factor -# classes -from yt.units.unit_object import Unit, UnitParseError, InvalidUnitOperation -# objects -from yt.units.unit_lookup_table import \ - default_unit_symbol_lut, unit_prefixes, prefixable_units -import yt.units.unit_symbols as unit_symbols -# unit definitions -from yt.utilities.physical_ratios import \ - cm_per_pc, sec_per_year, cm_per_km, cm_per_mpc, \ - mass_sun_grams - -def test_no_conflicting_symbols(): - """ - Check unit symbol definitions for conflicts. - - """ - full_set = set(default_unit_symbol_lut.keys()) - - # go through all possible prefix combos - for symbol in default_unit_symbol_lut.keys(): - if symbol in prefixable_units: - keys = unit_prefixes.keys() - else: - keys = [symbol] - for prefix in keys: - new_symbol = "%s%s" % (prefix, symbol) - - # test if we have seen this symbol - if new_symbol in full_set: - print("Duplicate symbol: %s" % new_symbol) - raise RuntimeError - - full_set.add(new_symbol) - -def test_dimensionless(): - """ - Create dimensionless unit and check attributes. - - """ - u1 = Unit() - - assert_true(u1.is_dimensionless) - assert_true(u1.expr == 1) - assert_true(u1.base_value == 1) - assert_true(u1.dimensions == 1) - - u2 = Unit("") - - assert_true(u2.is_dimensionless) - assert_true(u2.expr == 1) - assert_true(u2.base_value == 1) - assert_true(u2.dimensions == 1) - - assert_equal(u1.latex_repr, '') - assert_equal(u2.latex_repr, '') - -# -# Start init tests -# - -def test_create_from_string(): - """ - Create units with strings and check attributes. - - """ - - u1 = Unit("g * cm**2 * s**-2") - assert_true(u1.dimensions == energy) - assert_true(u1.base_value == 1.0) - - # make sure order doesn't matter - u2 = Unit("cm**2 * s**-2 * g") - assert_true(u2.dimensions == energy) - assert_true(u2.base_value == 1.0) - - # Test rationals - u3 = Unit("g**0.5 * cm**-0.5 * s**-1") - assert_true(u3.dimensions == magnetic_field) - assert_true(u3.base_value == 1.0) - - # sqrt functions - u4 = Unit("sqrt(g)/sqrt(cm)/s") - assert_true(u4.dimensions == magnetic_field) - assert_true(u4.base_value == 1.0) - - # commutative sqrt function - u5 = Unit("sqrt(g/cm)/s") - assert_true(u5.dimensions == magnetic_field) - assert_true(u5.base_value == 1.0) - - # nonzero CGS conversion factor - u6 = Unit("Msun/pc**3") - assert_true(u6.dimensions == mass/length**3) - assert_array_almost_equal_nulp(np.array([u6.base_value]), np.array([mass_sun_grams/cm_per_pc**3])) - - assert_raises(UnitParseError, Unit, 'm**m') - assert_raises(UnitParseError, Unit, 'm**g') - assert_raises(UnitParseError, Unit, 'm+g') - assert_raises(UnitParseError, Unit, 'm-g') - - -def test_create_from_expr(): - """ - Create units from sympy Exprs and check attributes. - - """ - pc_cgs = cm_per_pc - yr_cgs = sec_per_year - - # Symbol expr - s1 = Symbol("pc", positive=True) - s2 = Symbol("yr", positive=True) - # Mul expr - s3 = s1 * s2 - # Pow expr - s4 = s1**2 * s2**(-1) - - u1 = Unit(s1) - u2 = Unit(s2) - u3 = Unit(s3) - u4 = Unit(s4) - - assert_true(u1.expr == s1) - assert_true(u2.expr == s2) - assert_true(u3.expr == s3) - assert_true(u4.expr == s4) - - assert_allclose_units(u1.base_value, pc_cgs, 1e-12) - assert_allclose_units(u2.base_value, yr_cgs, 1e-12) - assert_allclose_units(u3.base_value, pc_cgs * yr_cgs, 1e-12) - assert_allclose_units(u4.base_value, pc_cgs**2 / yr_cgs, 1e-12) - - assert_true(u1.dimensions == length) - assert_true(u2.dimensions == time) - assert_true(u3.dimensions == length * time) - assert_true(u4.dimensions == length**2 / time) - - -def test_create_with_duplicate_dimensions(): - """ - Create units with overlapping dimensions. Ex: km/Mpc. - - """ - - u1 = Unit("erg * s**-1") - u2 = Unit("km/s/Mpc") - km_cgs = cm_per_km - Mpc_cgs = cm_per_mpc - - assert_true(u1.base_value == 1) - assert_true(u1.dimensions == power) - - assert_allclose_units(u2.base_value, km_cgs / Mpc_cgs, 1e-12) - assert_true(u2.dimensions == rate) - -def test_create_new_symbol(): - """ - Create unit with unknown symbol. - - """ - u1 = Unit("abc", base_value=42, dimensions=(mass/time)) - - assert_true(u1.expr == Symbol("abc", positive=True)) - assert_true(u1.base_value == 42) - assert_true(u1.dimensions == mass / time) - - u1 = Unit("abc", base_value=42, dimensions=length**3) - - assert_true(u1.expr == Symbol("abc", positive=True)) - assert_true(u1.base_value == 42) - assert_true(u1.dimensions == length**3) - - u1 = Unit("abc", base_value=42, dimensions=length*(mass*length)) - - assert_true(u1.expr == Symbol("abc", positive=True)) - assert_true(u1.base_value == 42) - assert_true( u1.dimensions == length**2*mass) - - assert_raises(UnitParseError, Unit, 'abc', base_value=42, - dimensions=length**length) - assert_raises(UnitParseError, Unit, 'abc', base_value=42, - dimensions=length**(length*length)) - assert_raises(UnitParseError, Unit, 'abc', base_value=42, - dimensions=length-mass) - assert_raises(UnitParseError, Unit, 'abc', base_value=42, - dimensions=length+mass) - -def test_create_fail_on_unknown_symbol(): - """ - Fail to create unit with unknown symbol, without base_value and dimensions. - - """ - try: - Unit(Symbol("jigawatts")) - except UnitParseError: - assert_true(True) - else: - assert_true(False) - -def test_create_fail_on_bad_symbol_type(): - """ - Fail to create unit with bad symbol type. - - """ - try: - Unit([1]) # something other than Expr and str - except UnitParseError: - assert_true(True) - else: - assert_true(False) - -def test_create_fail_on_bad_dimensions_type(): - """ - Fail to create unit with bad dimensions type. - - """ - try: - Unit("a", base_value=1, dimensions="(mass)") - except UnitParseError: - assert_true(True) - else: - assert_true(False) - - -def test_create_fail_on_dimensions_content(): - """ - Fail to create unit with bad dimensions expr. - - """ - a = Symbol("a") - - try: - Unit("a", base_value=1, dimensions=a) - except UnitParseError: - pass - else: - assert_true(False) - - -def test_create_fail_on_base_value_type(): - """ - Fail to create unit with bad base_value type. - - """ - try: - Unit("a", base_value="a", dimensions=(mass/time)) - except UnitParseError: - assert_true(True) - else: - assert_true(False) - -# -# End init tests -# - -def test_string_representation(): - """ - Check unit string representation. - - """ - pc = Unit("pc") - Myr = Unit("Myr") - speed = pc / Myr - dimensionless = Unit() - - assert_true(str(pc) == "pc") - assert_true(str(Myr) == "Myr") - assert_true(str(speed) == "pc/Myr") - assert_true(repr(speed) == "pc/Myr") - assert_true(str(dimensionless) == "dimensionless") - -# -# Start operation tests -# - -def test_multiplication(): - """ - Multiply two units. - - """ - msun_cgs = mass_sun_grams - pc_cgs = cm_per_pc - - # Create symbols - msun_sym = Symbol("Msun", positive=True) - pc_sym = Symbol("pc", positive=True) - s_sym = Symbol("s", positive=True) - - # Create units - u1 = Unit("Msun") - u2 = Unit("pc") - - # Mul operation - u3 = u1 * u2 - - assert_true(u3.expr == msun_sym * pc_sym) - assert_allclose_units(u3.base_value, msun_cgs * pc_cgs, 1e-12) - assert_true(u3.dimensions == mass * length) - - # Pow and Mul operations - u4 = Unit("pc**2") - u5 = Unit("Msun * s") - - u6 = u4 * u5 - - assert_true(u6.expr == pc_sym**2 * msun_sym * s_sym) - assert_allclose_units(u6.base_value, pc_cgs**2 * msun_cgs, 1e-12) - assert_true(u6.dimensions == length**2 * mass * time) - - -def test_division(): - """ - Divide two units. - - """ - pc_cgs = cm_per_pc - km_cgs = cm_per_km - - # Create symbols - pc_sym = Symbol("pc", positive=True) - km_sym = Symbol("km", positive=True) - s_sym = Symbol("s", positive=True) - - # Create units - u1 = Unit("pc") - u2 = Unit("km * s") - - u3 = u1 / u2 - - assert_true(u3.expr == pc_sym / (km_sym * s_sym)) - assert_allclose_units(u3.base_value, pc_cgs / km_cgs, 1e-12) - assert_true(u3.dimensions == 1 / time) - - -def test_power(): - """ - Take units to some power. - - """ - from sympy import nsimplify - - pc_cgs = cm_per_pc - mK_cgs = 1e-3 - u1_dims = mass * length**2 * time**-3 * temperature**4 - u1 = Unit("g * pc**2 * s**-3 * mK**4") - - u2 = u1**2 - - assert_true(u2.dimensions == u1_dims**2) - assert_allclose_units(u2.base_value, (pc_cgs**2 * mK_cgs**4)**2, 1e-12) - - u3 = u1**(-1.0/3) - - assert_true(u3.dimensions == nsimplify(u1_dims**(-1.0/3))) - assert_allclose_units(u3.base_value, (pc_cgs**2 * mK_cgs**4)**(-1.0/3), 1e-12) - - -def test_equality(): - """ - Check unit equality with different symbols, but same dimensions and base_value. - - """ - u1 = Unit("km * s**-1") - u2 = Unit("m * ms**-1") - - assert_true(u1 == u2) - -# -# End operation tests. -# - -def test_base_equivalent(): - """ - Check base equivalent of a unit. - - """ - Msun_cgs = mass_sun_grams - Mpc_cgs = cm_per_mpc - - u1 = Unit("Msun * Mpc**-3") - u2 = Unit("g * cm**-3") - u3 = u1.get_base_equivalent() - - assert_true(u2.expr == u3.expr) - assert_true(u2 == u3) - - assert_allclose_units(u1.base_value, Msun_cgs / Mpc_cgs**3, 1e-12) - assert_true(u2.base_value == 1) - assert_true(u3.base_value == 1) - - mass_density = mass / length**3 - - assert_true(u1.dimensions == mass_density) - assert_true(u2.dimensions == mass_density) - assert_true(u3.dimensions == mass_density) - - assert_allclose_units(get_conversion_factor(u1, u3)[0], Msun_cgs / Mpc_cgs**3, 1e-12) - -def test_is_code_unit(): - ds = fake_random_ds(64, nprocs=1) - u1 = Unit('code_mass', registry=ds.unit_registry) - u2 = Unit('code_mass/code_length', registry=ds.unit_registry) - u3 = Unit('code_velocity*code_mass**2', registry=ds.unit_registry) - u4 = Unit('code_time*code_mass**0.5', registry=ds.unit_registry) - u5 = Unit('code_mass*g', registry=ds.unit_registry) - u6 = Unit('g/cm**3') - - assert_true(u1.is_code_unit) - assert_true(u2.is_code_unit) - assert_true(u3.is_code_unit) - assert_true(u4.is_code_unit) - assert_true(not u5.is_code_unit) - assert_true(not u6.is_code_unit) - -def test_temperature_offsets(): - u1 = Unit('degC') - u2 = Unit('degF') - - assert_raises(InvalidUnitOperation, operator.mul, u1, u2) - assert_raises(InvalidUnitOperation, operator.truediv, u1, u2) - -def test_latex_repr(): - ds = fake_random_ds(64, nprocs=1) - - # create a fake comoving unit - ds.unit_registry.add('pccm', ds.unit_registry.lut['pc'][0]/(1+2), length, - "\\rm{pc}/(1+z)") - - test_unit = Unit('Mpccm', registry=ds.unit_registry) - assert_almost_equal(test_unit.base_value, cm_per_mpc/3) - assert_equal(test_unit.latex_repr, r'\rm{Mpc}/(1+z)') - - test_unit = Unit('code_mass', registry=ds.unit_registry) - assert_equal(test_unit.latex_repr, '\\rm{code\\ mass}') - - test_unit = Unit('code_mass/code_length**3', registry=ds.unit_registry) - assert_equal(test_unit.latex_repr, - '\\frac{\\rm{code\\ mass}}{\\rm{code\\ length}^{3}}') - - test_unit = Unit('cm**-3', base_value=1.0, registry=ds.unit_registry) - assert_equal(test_unit.latex_repr, '\\frac{1}{\\rm{cm}^{3}}') - - test_unit = Unit('m_geom/l_geom**3') - assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}') - - test_unit = Unit('1e9*cm') - assert_equal(test_unit.latex_repr, '1.0 \\times 10^{9}\\ \\rm{cm}') - -def test_latitude_longitude(): - lat = unit_symbols.lat - lon = unit_symbols.lon - deg = unit_symbols.deg - assert_equal(lat.units.base_offset, 90.0) - assert_equal((deg*90.0).in_units("lat").value, 0.0) - assert_equal((deg*180).in_units("lat").value, -90.0) - assert_equal((lat*0.0).in_units("deg"), deg*90.0) - assert_equal((lat*-90).in_units("deg"), deg*180) - - assert_equal(lon.units.base_offset, -180.0) - assert_equal((deg*0.0).in_units("lon").value, -180.0) - assert_equal((deg*90.0).in_units("lon").value, -90.0) - assert_equal((deg*180).in_units("lon").value, 0.0) - assert_equal((deg*360).in_units("lon").value, 180.0) - - assert_equal((lon*-180.0).in_units("deg"), deg*0.0) - assert_equal((lon*-90.0).in_units("deg"), deg*90.0) - assert_equal((lon*0.0).in_units("deg"), deg*180.0) - assert_equal((lon*180.0).in_units("deg"), deg*360) - -def test_registry_json(): - reg = UnitRegistry() - json_reg = reg.to_json() - unserialized_reg = UnitRegistry.from_json(json_reg) - - assert_equal(reg.lut, unserialized_reg.lut) - -def test_creation_from_ytarray(): - u1 = Unit(electrostatic_unit) - assert_equal(str(u1), 'esu') - assert_equal(u1, Unit('esu')) - assert_equal(u1, electrostatic_unit.units) - - u2 = Unit(elementary_charge) - assert_equal(str(u2), '4.8032056e-10*esu') - assert_equal(u2, Unit('4.8032056e-10*esu')) - assert_equal(u1, elementary_charge.units) - - assert_equal((u1/u2).base_value, electrostatic_unit/elementary_charge) - - assert_raises(UnitParseError, Unit, [1, 2, 3]*elementary_charge) - -def test_list_same_dimensions(): - reg = default_unit_registry - for name1, u1 in reg.unit_objs.items(): - for name2 in reg.list_same_dimensions(u1): - if name2 == name1: continue - if name2 in reg.unit_objs: - dim2 = reg.unit_objs[name2].dimensions - else: - _, dim2, _, _ = reg.lut[name2] - assert_true(u1.dimensions is dim2) diff --git a/yt/units/tests/test_ytarray.py b/yt/units/tests/test_ytarray.py deleted file mode 100644 index 054d4f63f34..00000000000 --- a/yt/units/tests/test_ytarray.py +++ /dev/null @@ -1,1436 +0,0 @@ -""" -Test ndarray subclass that handles symbolic units. - - - - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - -import copy -from yt.extern.six.moves import cPickle as pickle -import itertools -import numpy as np -import operator -import os -import shutil -import tempfile - -from distutils.version import LooseVersion -from nose.tools import assert_true -from numpy.testing import \ - assert_array_equal, \ - assert_equal, assert_raises, \ - assert_array_almost_equal_nulp, \ - assert_array_almost_equal, \ - assert_almost_equal -from numpy import array -from yt.units.yt_array import \ - YTArray, YTQuantity, \ - unary_operators, binary_operators, \ - uconcatenate, uintersect1d, \ - uhstack, uvstack, ustack, \ - uunion1d, loadtxt, savetxt, \ - display_ytarray -from yt.utilities.exceptions import \ - YTUnitOperationError, YTUfuncUnitError, \ - YTArrayTooLargeToDisplay -from yt.testing import \ - fake_random_ds, \ - requires_module, \ - assert_allclose_units -from yt.funcs import fix_length -from yt.units.unit_symbols import \ - cm, m, g, degree -from yt.utilities.physical_ratios import \ - metallicity_sun - -def operate_and_compare(a, b, op, answer): - # Test generator for YTArrays tests - assert_array_equal(op(a, b), answer) - - -def assert_isinstance(a, type): - assert isinstance(a, type) - - -def test_addition(): - """ - Test addition of two YTArrays - - """ - - # Same units - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'cm') - a3 = [4*cm, 5*cm, 6*cm] - answer = YTArray([5, 7, 9], 'cm') - - operate_and_compare(a1, a2, operator.add, answer) - operate_and_compare(a2, a1, operator.add, answer) - operate_and_compare(a1, a3, operator.add, answer) - operate_and_compare(a3, a1, operator.add, answer) - operate_and_compare(a2, a1, np.add, answer) - operate_and_compare(a1, a2, np.add, answer) - operate_and_compare(a1, a3, np.add, answer) - operate_and_compare(a3, a1, np.add, answer) - - # different units - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'm') - a3 = [4*m, 5*m, 6*m] - answer1 = YTArray([401, 502, 603], 'cm') - answer2 = YTArray([4.01, 5.02, 6.03], 'm') - - operate_and_compare(a1, a2, operator.add, answer1) - operate_and_compare(a2, a1, operator.add, answer2) - operate_and_compare(a1, a3, operator.add, answer1) - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - operate_and_compare(a3, a1, operator.add, answer1) - assert_raises(YTUfuncUnitError, np.add, a1, a2) - assert_raises(YTUfuncUnitError, np.add, a1, a3) - else: - operate_and_compare(a3, a1, operator.add, answer2) - operate_and_compare(a1, a2, np.add, answer1) - operate_and_compare(a2, a1, np.add, answer2) - operate_and_compare(a1, a3, np.add, answer1) - operate_and_compare(a3, a1, np.add, answer2) - - - # Test dimensionless quantities - a1 = YTArray([1, 2, 3]) - a2 = array([4, 5, 6]) - a3 = [4, 5, 6] - answer = YTArray([5, 7, 9]) - - operate_and_compare(a1, a2, operator.add, answer) - operate_and_compare(a2, a1, operator.add, answer) - operate_and_compare(a1, a3, operator.add, answer) - operate_and_compare(a3, a1, operator.add, answer) - operate_and_compare(a1, a2, np.add, answer) - operate_and_compare(a2, a1, np.add, answer) - operate_and_compare(a1, a3, np.add, answer) - operate_and_compare(a3, a1, np.add, answer) - - # Catch the different dimensions error - a1 = YTArray([1, 2, 3], 'm') - a2 = YTArray([4, 5, 6], 'kg') - a3 = [7, 8, 9] - a4 = YTArray([10, 11, 12], '') - - assert_raises(YTUnitOperationError, operator.add, a1, a2) - assert_raises(YTUnitOperationError, operator.iadd, a1, a2) - assert_raises(YTUnitOperationError, operator.add, a1, a3) - assert_raises(YTUnitOperationError, operator.iadd, a1, a3) - assert_raises(YTUnitOperationError, operator.add, a3, a1) - assert_raises(YTUnitOperationError, operator.iadd, a3, a1) - assert_raises(YTUnitOperationError, operator.add, a1, a4) - assert_raises(YTUnitOperationError, operator.iadd, a1, a4) - assert_raises(YTUnitOperationError, operator.add, a4, a1) - assert_raises(YTUnitOperationError, operator.iadd, a4, a1) - - # adding with zero is allowed irrespective of the units - zeros = np.zeros(3) - zeros_yta_dimless = YTArray(zeros, 'dimensionless') - zeros_yta_length = YTArray(zeros, 'm') - zeros_yta_mass = YTArray(zeros, 'kg') - operands = [0, YTQuantity(0), YTQuantity(0, 'kg'), zeros, zeros_yta_dimless, - zeros_yta_length, zeros_yta_mass] - - for op in [operator.add, np.add]: - for operand in operands: - operate_and_compare(a1, operand, op, a1) - operate_and_compare(operand, a1, op, a1) - operate_and_compare(4*m, operand, op, 4*m) - operate_and_compare(operand, 4*m, op, 4*m) - -def test_subtraction(): - """ - Test subtraction of two YTArrays - - """ - - # Same units - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'cm') - a3 = [4*cm, 5*cm, 6*cm] - answer1 = YTArray([-3, -3, -3], 'cm') - answer2 = YTArray([3, 3, 3], 'cm') - - operate_and_compare(a1, a2, operator.sub, answer1) - operate_and_compare(a2, a1, operator.sub, answer2) - operate_and_compare(a1, a3, operator.sub, answer1) - operate_and_compare(a3, a1, operator.sub, answer2) - operate_and_compare(a1, a2, np.subtract, answer1) - operate_and_compare(a2, a1, np.subtract, answer2) - operate_and_compare(a1, a3, np.subtract, answer1) - operate_and_compare(a3, a1, np.subtract, answer2) - - # different units - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'm') - a3 = [4*m, 5*m, 6*m] - answer1 = YTArray([-399, -498, -597], 'cm') - answer2 = YTArray([3.99, 4.98, 5.97], 'm') - answer3 = YTArray([399, 498, 597], 'cm') - - operate_and_compare(a1, a2, operator.sub, answer1) - operate_and_compare(a2, a1, operator.sub, answer2) - operate_and_compare(a1, a3, operator.sub, answer1) - operate_and_compare(a3, a1, operator.sub, answer3) - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - assert_raises(YTUfuncUnitError, np.subtract, a1, a2) - assert_raises(YTUfuncUnitError, np.subtract, a1, a3) - else: - operate_and_compare(a1, a2, np.subtract, answer1) - operate_and_compare(a2, a1, np.subtract, answer2) - operate_and_compare(a1, a3, np.subtract, answer1) - operate_and_compare(a3, a1, np.subtract, answer3) - - # Test dimensionless quantities - a1 = YTArray([1, 2, 3]) - a2 = array([4, 5, 6]) - a3 = [4, 5, 6] - answer1 = YTArray([-3, -3, -3]) - answer2 = YTArray([3, 3, 3]) - - operate_and_compare(a1, a2, operator.sub, answer1) - operate_and_compare(a2, a1, operator.sub, answer2) - operate_and_compare(a1, a3, operator.sub, answer1) - operate_and_compare(a3, a1, operator.sub, answer2) - operate_and_compare(a1, a2, np.subtract, answer1) - operate_and_compare(a2, a1, np.subtract, answer2) - operate_and_compare(a1, a3, np.subtract, answer1) - operate_and_compare(a3, a1, np.subtract, answer2) - - # Catch the different dimensions error - a1 = YTArray([1, 2, 3], 'm') - a2 = YTArray([4, 5, 6], 'kg') - a3 = [7, 8, 9] - a4 = YTArray([10, 11, 12], '') - - assert_raises(YTUnitOperationError, operator.sub, a1, a2) - assert_raises(YTUnitOperationError, operator.isub, a1, a2) - assert_raises(YTUnitOperationError, operator.sub, a1, a3) - assert_raises(YTUnitOperationError, operator.isub, a1, a3) - assert_raises(YTUnitOperationError, operator.sub, a3, a1) - assert_raises(YTUnitOperationError, operator.isub, a3, a1) - assert_raises(YTUnitOperationError, operator.sub, a1, a4) - assert_raises(YTUnitOperationError, operator.isub, a1, a4) - assert_raises(YTUnitOperationError, operator.sub, a4, a1) - assert_raises(YTUnitOperationError, operator.isub, a4, a1) - - # subtracting with zero is allowed irrespective of the units - zeros = np.zeros(3) - zeros_yta_dimless = YTArray(zeros, 'dimensionless') - zeros_yta_length = YTArray(zeros, 'm') - zeros_yta_mass = YTArray(zeros, 'kg') - operands = [0, YTQuantity(0), YTQuantity(0, 'kg'), zeros, zeros_yta_dimless, - zeros_yta_length, zeros_yta_mass] - - for op in [operator.sub, np.subtract]: - for operand in operands: - operate_and_compare(a1, operand, op, a1) - operate_and_compare(operand, a1, op, -a1) - operate_and_compare(4*m, operand, op, 4*m) - operate_and_compare(operand, 4*m, op, -4*m) - -def test_multiplication(): - """ - Test multiplication of two YTArrays - - """ - - # Same units - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'cm') - a3 = [4*cm, 5*cm, 6*cm] - answer = YTArray([4, 10, 18], 'cm**2') - - operate_and_compare(a1, a2, operator.mul, answer) - operate_and_compare(a2, a1, operator.mul, answer) - operate_and_compare(a1, a3, operator.mul, answer) - operate_and_compare(a3, a1, operator.mul, answer) - operate_and_compare(a1, a2, np.multiply, answer) - operate_and_compare(a2, a1, np.multiply, answer) - operate_and_compare(a1, a3, np.multiply, answer) - operate_and_compare(a3, a1, np.multiply, answer) - - # different units, same dimension - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'm') - a3 = [4*m, 5*m, 6*m] - answer1 = YTArray([400, 1000, 1800], 'cm**2') - answer2 = YTArray([.04, .10, .18], 'm**2') - answer3 = YTArray([4, 10, 18], 'cm*m') - - operate_and_compare(a1, a2, operator.mul, answer1) - operate_and_compare(a2, a1, operator.mul, answer2) - operate_and_compare(a1, a3, operator.mul, answer1) - operate_and_compare(a3, a1, operator.mul, answer2) - operate_and_compare(a1, a2, np.multiply, answer3) - operate_and_compare(a2, a1, np.multiply, answer3) - operate_and_compare(a1, a3, np.multiply, answer3) - operate_and_compare(a3, a1, np.multiply, answer3) - - # different dimensions - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([4, 5, 6], 'g') - a3 = [4*g, 5*g, 6*g] - answer = YTArray([4, 10, 18], 'cm*g') - - operate_and_compare(a1, a2, operator.mul, answer) - operate_and_compare(a2, a1, operator.mul, answer) - operate_and_compare(a1, a3, operator.mul, answer) - operate_and_compare(a3, a1, operator.mul, answer) - operate_and_compare(a1, a2, np.multiply, answer) - operate_and_compare(a2, a1, np.multiply, answer) - operate_and_compare(a1, a3, np.multiply, answer) - operate_and_compare(a3, a1, np.multiply, answer) - - # One dimensionless, one unitful - a1 = YTArray([1, 2, 3], 'cm') - a2 = array([4, 5, 6]) - a3 = [4, 5, 6] - answer = YTArray([4, 10, 18], 'cm') - - operate_and_compare(a1, a2, operator.mul, answer) - operate_and_compare(a2, a1, operator.mul, answer) - operate_and_compare(a1, a3, operator.mul, answer) - operate_and_compare(a3, a1, operator.mul, answer) - operate_and_compare(a1, a2, np.multiply, answer) - operate_and_compare(a2, a1, np.multiply, answer) - operate_and_compare(a1, a3, np.multiply, answer) - operate_and_compare(a3, a1, np.multiply, answer) - - # Both dimensionless quantities - a1 = YTArray([1, 2, 3]) - a2 = array([4, 5, 6]) - a3 = [4, 5, 6] - answer = YTArray([4, 10, 18]) - - operate_and_compare(a1, a2, operator.mul, answer) - operate_and_compare(a2, a1, operator.mul, answer) - operate_and_compare(a1, a3, operator.mul, answer) - operate_and_compare(a3, a1, operator.mul, answer) - operate_and_compare(a1, a2, np.multiply, answer) - operate_and_compare(a2, a1, np.multiply, answer) - operate_and_compare(a1, a3, np.multiply, answer) - operate_and_compare(a3, a1, np.multiply, answer) - - -def test_division(): - """ - Test multiplication of two YTArrays - - """ - - # Same units - a1 = YTArray([1., 2., 3.], 'cm') - a2 = YTArray([4., 5., 6.], 'cm') - a3 = [4*cm, 5*cm, 6*cm] - answer1 = YTArray([0.25, 0.4, 0.5]) - answer2 = YTArray([4, 2.5, 2]) - if "div" in dir(operator): - op = operator.div - else: - op = operator.truediv - - operate_and_compare(a1, a2, op, answer1) - operate_and_compare(a2, a1, op, answer2) - operate_and_compare(a1, a3, op, answer1) - operate_and_compare(a3, a1, op, answer2) - operate_and_compare(a1, a2, np.divide, answer1) - operate_and_compare(a2, a1, np.divide, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - - # different units, same dimension - a1 = YTArray([1., 2., 3.], 'cm') - a2 = YTArray([4., 5., 6.], 'm') - a3 = [4*m, 5*m, 6*m] - answer1 = YTArray([.0025, .004, .005]) - answer2 = YTArray([400, 250, 200]) - answer3 = YTArray([0.25, 0.4, 0.5], 'cm/m') - answer4 = YTArray([4.0, 2.5, 2.0], 'm/cm') - - operate_and_compare(a1, a2, op, answer1) - operate_and_compare(a2, a1, op, answer2) - operate_and_compare(a1, a3, op, answer1) - operate_and_compare(a3, a1, op, answer2) - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - operate_and_compare(a1, a2, np.divide, answer3) - operate_and_compare(a2, a1, np.divide, answer4) - operate_and_compare(a1, a3, np.divide, answer3) - operate_and_compare(a3, a1, np.divide, answer4) - else: - operate_and_compare(a1, a2, np.divide, answer1) - operate_and_compare(a2, a1, np.divide, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - - # different dimensions - a1 = YTArray([1., 2., 3.], 'cm') - a2 = YTArray([4., 5., 6.], 'g') - a3 = [4*g, 5*g, 6*g] - answer1 = YTArray([0.25, 0.4, 0.5], 'cm/g') - answer2 = YTArray([4, 2.5, 2], 'g/cm') - - operate_and_compare(a1, a2, op, answer1) - operate_and_compare(a2, a1, op, answer2) - operate_and_compare(a1, a3, op, answer1) - operate_and_compare(a3, a1, op, answer2) - operate_and_compare(a1, a2, np.divide, answer1) - operate_and_compare(a2, a1, np.divide, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - - # One dimensionless, one unitful - a1 = YTArray([1., 2., 3.], 'cm') - a2 = array([4., 5., 6.]) - a3 = [4, 5, 6] - answer1 = YTArray([0.25, 0.4, 0.5], 'cm') - answer2 = YTArray([4, 2.5, 2], '1/cm') - - operate_and_compare(a1, a2, op, answer1) - operate_and_compare(a2, a1, op, answer2) - operate_and_compare(a1, a3, op, answer1) - operate_and_compare(a3, a1, op, answer2) - operate_and_compare(a1, a2, np.divide, answer1) - operate_and_compare(a2, a1, np.divide, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - - # Both dimensionless quantities - a1 = YTArray([1., 2., 3.]) - a2 = array([4., 5., 6.]) - a3 = [4, 5, 6] - answer1 = YTArray([0.25, 0.4, 0.5]) - answer2 = YTArray([4, 2.5, 2]) - - operate_and_compare(a1, a2, op, answer1) - operate_and_compare(a2, a1, op, answer2) - operate_and_compare(a1, a3, op, answer1) - operate_and_compare(a3, a1, op, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - operate_and_compare(a1, a3, np.divide, answer1) - operate_and_compare(a3, a1, np.divide, answer2) - - -def test_power(): - """ - Test power operator ensure units are correct. - - """ - - from yt.units import cm - - cm_arr = np.array([1.0, 1.0]) * cm - - assert_equal, cm**3, YTQuantity(1, 'cm**3') - assert_equal, np.power(cm, 3), YTQuantity(1, 'cm**3') - assert_equal, cm**YTQuantity(3), YTQuantity(1, 'cm**3') - assert_raises, YTUnitOperationError, np.power, cm, YTQuantity(3, 'g') - - assert_equal, cm_arr**3, YTArray([1, 1], 'cm**3') - assert_equal, np.power(cm_arr, 3), YTArray([1, 1], 'cm**3') - assert_equal, cm_arr**YTQuantity(3), YTArray([1, 1], 'cm**3') - assert_raises, YTUnitOperationError, np.power, cm_arr, YTQuantity(3, 'g') - - -def test_comparisons(): - """ - Test numpy ufunc comparison operators for unit consistency. - - """ - from yt.units.yt_array import YTArray - - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([2, 1, 3], 'cm') - a3 = YTArray([.02, .01, .03], 'm') - dimless = np.array([2,1,3]) - - ops = ( - np.less, - np.less_equal, - np.greater, - np.greater_equal, - np.equal, - np.not_equal - ) - - answers = ( - [True, False, False], - [True, False, True], - [False, True, False], - [False, True, True], - [False, False, True], - [True, True, False], - ) - - for op, answer in zip(ops, answers): - operate_and_compare(a1, a2, op, answer) - for op, answer in zip(ops, answers): - operate_and_compare(a1, dimless, op, answer) - - for op, answer in zip(ops, answers): - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - assert_raises(YTUfuncUnitError, op, a1, a3) - else: - operate_and_compare(a1, a3, op, answer) - - for op, answer in zip(ops, answers): - operate_and_compare(a1, a3.in_units('cm'), op, answer) - - # Check that comparisons with dimensionless quantities work in both directions. - operate_and_compare(a3, dimless, np.less, [True, True, True]) - operate_and_compare(dimless, a3, np.less, [False, False, False]) - assert_equal(a1 < 2, [True, False, False]) - assert_equal(a1 < 2, np.less(a1, 2)) - assert_equal(2 < a1, [False, False, True]) - assert_equal(2 < a1, np.less(2, a1)) - - -def test_unit_conversions(): - """ - Test operations that convert to different units or cast to ndarray - - """ - from yt.units.yt_array import YTQuantity - from yt.units.unit_object import Unit - - km = YTQuantity(1, 'km') - km_in_cm = km.in_units('cm') - cm_unit = Unit('cm') - kpc_unit = Unit('kpc') - - assert_equal(km_in_cm, km) - assert_equal(km_in_cm.in_cgs(), 1e5) - assert_equal(km_in_cm.in_mks(), 1e3) - assert_equal(km_in_cm.units, cm_unit) - - km_view = km.ndarray_view() - km.convert_to_units('cm') - assert_true(km_view.base is km.base) - - assert_equal(km, YTQuantity(1, 'km')) - assert_equal(km.in_cgs(), 1e5) - assert_equal(km.in_mks(), 1e3) - assert_equal(km.units, cm_unit) - - km.convert_to_units('kpc') - assert_true(km_view.base is km.base) - - assert_array_almost_equal_nulp(km, YTQuantity(1, 'km')) - assert_array_almost_equal_nulp(km.in_cgs(), YTQuantity(1e5, 'cm')) - assert_array_almost_equal_nulp(km.in_mks(), YTQuantity(1e3, 'm')) - assert_equal(km.units, kpc_unit) - - assert_isinstance(km.to_ndarray(), np.ndarray) - assert_isinstance(km.ndarray_view(), np.ndarray) - - dyne = YTQuantity(1.0, 'dyne') - - assert_equal(dyne.in_cgs(), dyne) - assert_equal(dyne.in_cgs(), 1.0) - assert_equal(dyne.in_mks(), dyne) - assert_equal(dyne.in_mks(), 1e-5) - assert_equal(str(dyne.in_mks().units), 'kg*m/s**2') - assert_equal(str(dyne.in_cgs().units), 'cm*g/s**2') - - em3 = YTQuantity(1.0, 'erg/m**3') - - assert_equal(em3.in_cgs(), em3) - assert_equal(em3.in_cgs(), 1e-6) - assert_equal(em3.in_mks(), em3) - assert_equal(em3.in_mks(), 1e-7) - assert_equal(str(em3.in_mks().units), 'kg/(m*s**2)') - assert_equal(str(em3.in_cgs().units), 'g/(cm*s**2)') - - em3_converted = YTQuantity(1545436840.386756, 'Msun/(Myr**2*kpc)') - assert_equal(em3.in_base(unit_system="galactic"), em3) - assert_array_almost_equal(em3.in_base(unit_system="galactic"), em3_converted) - assert_equal(str(em3.in_base(unit_system="galactic").units), 'Msun/(Myr**2*kpc)') - - dimless = YTQuantity(1.0, "") - assert_equal(dimless.in_cgs(), dimless) - assert_equal(dimless.in_cgs(), 1.0) - assert_equal(dimless.in_mks(), dimless) - assert_equal(dimless.in_mks(), 1.0) - assert_equal(str(dimless.in_cgs().units), "dimensionless") - -def test_temperature_conversions(): - """ - Test conversions between various supported temperatue scales. - - Also ensure we only allow compound units with temperature - scales that have a proper zero point. - - """ - from yt.units.unit_object import InvalidUnitOperation - - km = YTQuantity(1, 'km') - balmy = YTQuantity(300, 'K') - balmy_F = YTQuantity(80.33, 'degF') - balmy_C = YTQuantity(26.85, 'degC') - balmy_R = YTQuantity(540, 'R') - - assert_array_almost_equal(balmy.in_units('degF'), balmy_F) - assert_array_almost_equal(balmy.in_units('degC'), balmy_C) - assert_array_almost_equal(balmy.in_units('R'), balmy_R) - - balmy_view = balmy.ndarray_view() - - balmy.convert_to_units('degF') - assert_true(balmy_view.base is balmy.base) - assert_array_almost_equal(np.array(balmy), np.array(balmy_F)) - - balmy.convert_to_units('degC') - assert_true(balmy_view.base is balmy.base) - assert_array_almost_equal(np.array(balmy), np.array(balmy_C)) - - balmy.convert_to_units('R') - assert_true(balmy_view.base is balmy.base) - assert_array_almost_equal(np.array(balmy), np.array(balmy_R)) - - balmy.convert_to_units('degF') - assert_true(balmy_view.base is balmy.base) - assert_array_almost_equal(np.array(balmy), np.array(balmy_F)) - - assert_raises(InvalidUnitOperation, np.multiply, balmy, km) - - # Does CGS conversion from F to K work? - assert_array_almost_equal(balmy.in_cgs(), YTQuantity(300, 'K')) - - -def test_yt_array_yt_quantity_ops(): - """ - Test operations that combine YTArray and YTQuantity - """ - a = YTArray(range(10, 1), 'cm') - b = YTQuantity(5, 'g') - - assert_isinstance(a*b, YTArray) - assert_isinstance(b*a, YTArray) - - assert_isinstance(a/b, YTArray) - assert_isinstance(b/a, YTArray) - - assert_isinstance(a*a, YTArray) - assert_isinstance(a/a, YTArray) - - assert_isinstance(b*b, YTQuantity) - assert_isinstance(b/b, YTQuantity) - - -def test_selecting(): - """ - Test slicing of two YTArrays - - """ - a = YTArray(range(10), 'cm') - a_slice = a[:3] - a_fancy_index = a[[1, 1, 3, 5]] - a_array_fancy_index = a[array([[1, 1], [3, 5]])] - a_boolean_index = a[a > 5] - a_selection = a[0] - - assert_array_equal(a_slice, YTArray([0, 1, 2], 'cm')) - assert_equal(a_slice.units, a.units) - assert_array_equal(a_fancy_index, YTArray([1, 1, 3, 5], 'cm')) - assert_equal(a_fancy_index.units, a.units) - assert_array_equal(a_array_fancy_index, YTArray([[1, 1, ], [3, 5]], 'cm')) - assert_equal(a_array_fancy_index.units, a.units) - assert_array_equal(a_boolean_index, YTArray([6, 7, 8, 9], 'cm')) - assert_equal(a_boolean_index.units, a.units) - assert_isinstance(a_selection, YTQuantity) - assert_equal(a_selection.units, a.units) - - # .base points to the original array for a numpy view. If it is not a - # view, .base is None. - assert_true(a_slice.base is a) - - -def test_iteration(): - """ - Test that iterating over a YTArray returns a sequence of YTQuantity instances - """ - a = np.arange(3) - b = YTArray(np.arange(3), 'cm') - for ia, ib, in zip(a, b): - assert_equal(ia, ib.value) - assert_equal(ib.units, b.units) - - -def test_fix_length(): - """ - Test fixing the length of an array. Used in spheres and other data objects - """ - ds = fake_random_ds(64, nprocs=1, length_unit=10) - length = ds.quan(1.0, 'code_length') - new_length = fix_length(length, ds=ds) - assert_equal(YTQuantity(10, 'cm'), new_length) - - assert_raises(RuntimeError, fix_length, (length, 'code_length'), ds) - -def test_code_unit_combinations(): - """ - Test comparing code units coming from different datasets - """ - ds1 = fake_random_ds(64, nprocs=1, length_unit=1) - ds2 = fake_random_ds(64, nprocs=1, length_unit=10) - - q1 = ds1.quan(1, 'code_length') - q2 = ds2.quan(1, 'code_length') - - assert_equal(10*q1, q2) - assert_equal(q1/q2, 0.1) - assert_true(q1 < q2) - assert_true(q2 > q1) - assert_true(not bool(q1 > q2)) - assert_true(not bool(q2 < q1)) - assert_true(q1 != q2) - assert_true(not bool(q1 == q2)) - - assert_equal((q1 + q2).in_cgs().value, 11) - assert_equal((q2 + q1).in_cgs().value, 11) - assert_equal((q1 - q2).in_cgs().value, -9) - assert_equal((q2 - q1).in_cgs().value, 9) - -def test_ytarray_pickle(): - ds = fake_random_ds(64, nprocs=1) - test_data = [ds.quan(12.0, 'code_length'), - ds.arr([1, 2, 3], 'code_length')] - - for data in test_data: - tempf = tempfile.NamedTemporaryFile(delete=False) - pickle.dump(data, tempf) - tempf.close() - - with open(tempf.name, "rb") as fname: - loaded_data = pickle.load(fname) - os.unlink(tempf.name) - - assert_array_equal(data, loaded_data) - assert_equal(data.units, loaded_data.units) - assert_array_equal(array(data.in_cgs()), array(loaded_data.in_cgs())) - assert_equal(float(data.units.base_value), float(loaded_data.units.base_value)) - - -def test_copy(): - quan = YTQuantity(1, 'g') - arr = YTArray([1, 2, 3], 'cm') - - assert_equal(copy.copy(quan), quan) - assert_array_equal(copy.copy(arr), arr) - - assert_equal( copy.deepcopy(quan), quan) - assert_array_equal(copy.deepcopy(arr), arr) - - assert_equal(quan.copy(), quan) - assert_array_equal(arr.copy(), arr) - - assert_equal(np.copy(quan), quan) - assert_array_equal(np.copy(arr), arr) - -# needed so the tests function on older numpy versions that have -# different sets of ufuncs -def yield_np_ufuncs(ufunc_list): - for u in ufunc_list: - ufunc = getattr(np, u, None) - if ufunc is not None: - yield ufunc - -def unary_ufunc_comparison(ufunc, a): - out = a.copy() - a_array = a.to_ndarray() - if ufunc in (np.isreal, np.iscomplex, ): - # According to the numpy docs, these two explicitly do not do - # in-place copies. - ret = ufunc(a) - assert_true(not hasattr(ret, 'units')) - assert_array_equal(ret, ufunc(a)) - elif ufunc in yield_np_ufuncs([ - 'exp', 'exp2', 'log', 'log2', 'log10', 'expm1', 'log1p', 'sin', - 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh', - 'arccosh', 'arcsinh', 'arctanh', 'deg2rad', 'rad2deg', 'isfinite', - 'isinf', 'isnan', 'signbit', 'sign', 'rint', 'logical_not']): - # These operations should return identical results compared to numpy. - with np.errstate(invalid='ignore'): - try: - ret = ufunc(a, out=out) - except YTUnitOperationError: - assert_true(ufunc in (np.deg2rad, np.rad2deg)) - ret = ufunc(YTArray(a, '1')) - - assert_array_equal(ret, out) - assert_array_equal(ret, ufunc(a_array)) - # In-place copies do not drop units. - assert_true(hasattr(out, 'units')) - assert_true(not hasattr(ret, 'units')) - elif ufunc in yield_np_ufuncs( - ['absolute', 'fabs', 'conjugate', 'floor', 'ceil', 'trunc', - 'negative', 'spacing', 'positive']): - - ret = ufunc(a, out=out) - - assert_array_equal(ret, out) - assert_array_equal(ret.to_ndarray(), ufunc(a_array)) - assert_true(ret.units == out.units) - elif ufunc in yield_np_ufuncs( - ['ones_like', 'square', 'sqrt', 'reciprocal']): - if ufunc is np.ones_like: - ret = ufunc(a) - else: - with np.errstate(invalid='ignore'): - ret = ufunc(a, out=out) - assert_array_equal(ret, out) - - with np.errstate(invalid='ignore'): - assert_array_equal(ret.to_ndarray(), ufunc(a_array)) - if ufunc is np.square: - assert_true(out.units == a.units**2) - assert_true(ret.units == a.units**2) - elif ufunc is np.sqrt: - assert_true(out.units == a.units**0.5) - assert_true(ret.units == a.units**0.5) - elif ufunc is np.reciprocal: - assert_true(out.units == a.units**-1) - assert_true(ret.units == a.units**-1) - elif ufunc is np.modf: - ret1, ret2 = ufunc(a) - npret1, npret2 = ufunc(a_array) - - assert_array_equal(ret1.to_ndarray(), npret1) - assert_array_equal(ret2.to_ndarray(), npret2) - elif ufunc is np.frexp: - ret1, ret2 = ufunc(a) - npret1, npret2 = ufunc(a_array) - - assert_array_equal(ret1, npret1) - assert_array_equal(ret2, npret2) - elif ufunc is np.invert: - assert_raises(TypeError, ufunc, a) - elif hasattr(np, 'isnat') and ufunc is np.isnat: - # numpy 1.13 raises ValueError, numpy 1.14 and newer raise TypeError - assert_raises((TypeError, ValueError), ufunc, a) - else: - # There shouldn't be any untested ufuncs. - assert_true(False) - - -def binary_ufunc_comparison(ufunc, a, b): - if ufunc in [np.divmod]: - out = (a.copy(), a.copy()) - else: - out = a.copy() - if ufunc in yield_np_ufuncs([ - 'add', 'subtract', 'remainder', 'fmod', 'mod', 'arctan2', 'hypot', - 'greater', 'greater_equal', 'less', 'less_equal', 'equal', - 'not_equal', 'logical_and', 'logical_or', 'logical_xor', 'maximum', - 'minimum', 'fmax', 'fmin', 'nextafter', 'heaviside']): - if a.units != b.units and a.units.dimensions == b.units.dimensions: - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - assert_raises(YTUfuncUnitError, ufunc, a, b) - return - elif a.units != b.units: - assert_raises(YTUnitOperationError, ufunc, a, b) - return - if ufunc in yield_np_ufuncs( - ['bitwise_and', 'bitwise_or', 'bitwise_xor', 'left_shift', - 'right_shift', 'ldexp']): - assert_raises(TypeError, ufunc, a, b) - return - - ret = ufunc(a, b, out=out) - ret = ufunc(a, b) - - if ufunc is np.multiply: - assert_true(ret.units == a.units*b.units) - elif ufunc in (np.divide, np.true_divide, np.arctan2): - assert_true(ret.units.dimensions == (a.units/b.units).dimensions) - elif ufunc in (np.greater, np.greater_equal, np.less, np.less_equal, - np.not_equal, np.equal, np.logical_and, np.logical_or, - np.logical_xor): - assert_true(not isinstance(ret, YTArray) and - isinstance(ret, np.ndarray)) - if isinstance(ret, tuple): - assert isinstance(out, tuple) - assert len(out) == len(ret) - for o, r in zip(out, ret): - assert_array_equal(o, r) - else: - assert_array_equal(ret, out) - if (ufunc in (np.divide, np.true_divide, np.arctan2) and - (a.units.dimensions == b.units.dimensions)): - assert_array_almost_equal( - np.array(ret), ufunc(np.array(a.in_cgs()), np.array(b.in_cgs()))) - elif LooseVersion(np.__version__) < LooseVersion('1.13.0'): - assert_array_almost_equal(np.array(ret), ufunc(np.array(a), np.array(b))) - - -def test_ufuncs(): - for ufunc in unary_operators: - if ufunc is None: - continue - unary_ufunc_comparison(ufunc, YTArray([.3, .4, .5], 'cm')) - unary_ufunc_comparison(ufunc, YTArray([12, 23, 47], 'g')) - unary_ufunc_comparison(ufunc, YTArray([2, 4, -6], 'erg/m**3')) - - for ufunc in binary_operators: - if ufunc is None: - continue - # arr**arr is undefined for arrays with units because - # each element of the result would have different units. - if ufunc is np.power: - a = YTArray([.3, .4, .5], 'cm') - b = YTArray([.1, .2, .3], 'dimensionless') - c = np.array(b) - d = YTArray([1., 2., 3.], 'g') - binary_ufunc_comparison(ufunc, a, b) - binary_ufunc_comparison(ufunc, a, c) - assert_raises(YTUnitOperationError, ufunc, a, d) - continue - - a = YTArray([.3, .4, .5], 'cm') - b = YTArray([.1, .2, .3], 'cm') - c = YTArray([.1, .2, .3], 'm') - d = YTArray([.1, .2, .3], 'g') - e = YTArray([.1, .2, .3], 'erg/m**3') - - for pair in itertools.product([a, b, c, d, e], repeat=2): - binary_ufunc_comparison(ufunc, pair[0], pair[1]) - -def test_reductions(): - arr = YTArray([[1, 2, 3], [4, 5, 6]], 'cm') - - ev_result = arr.dot(YTArray([1, 2, 3], 'cm')) - res = YTArray([ 14., 32.], 'cm**2') - assert_equal(ev_result, res) - assert_equal(ev_result.units, res.units) - assert_isinstance(ev_result, YTArray) - - answers = { - 'prod': (YTQuantity(720, 'cm**6'), - YTArray([4, 10, 18], 'cm**2'), - YTArray([6, 120], 'cm**3')), - 'sum': (YTQuantity(21, 'cm'), - YTArray([ 5., 7., 9.], 'cm'), - YTArray([6, 15], 'cm'),), - 'mean': (YTQuantity(3.5, 'cm'), - YTArray([ 2.5, 3.5, 4.5], 'cm'), - YTArray([2, 5], 'cm')), - 'std': (YTQuantity(1.707825127659933, 'cm'), - YTArray([ 1.5, 1.5, 1.5], 'cm'), - YTArray([0.81649658, 0.81649658], 'cm')), - } - for op, (result1, result2, result3) in answers.items(): - ev_result = getattr(arr, op)() - assert_almost_equal(ev_result, result1) - assert_almost_equal(ev_result.units, result1.units) - assert_isinstance(ev_result, YTQuantity) - for axis, result in [(0, result2), (1, result3), (-1, result3)]: - ev_result = getattr(arr, op)(axis=axis) - assert_almost_equal(ev_result, result) - assert_almost_equal(ev_result.units, result.units) - assert_isinstance(ev_result, YTArray) - -def test_convenience(): - - arr = YTArray([1, 2, 3], 'cm') - - assert_equal(arr.unit_quantity, YTQuantity(1, 'cm')) - assert_equal(arr.uq, YTQuantity(1, 'cm')) - assert_isinstance(arr.unit_quantity, YTQuantity) - assert_isinstance(arr.uq, YTQuantity) - - assert_array_equal(arr.unit_array, YTArray(np.ones_like(arr), 'cm')) - assert_array_equal(arr.ua, YTArray(np.ones_like(arr), 'cm')) - assert_isinstance(arr.unit_array, YTArray) - assert_isinstance(arr.ua, YTArray) - - assert_array_equal(arr.ndview, arr.view(np.ndarray)) - assert_array_equal(arr.d, arr.view(np.ndarray)) - assert_true(arr.ndview.base is arr.base) - assert_true(arr.d.base is arr.base) - - assert_array_equal(arr.value, np.array(arr)) - assert_array_equal(arr.v, np.array(arr)) - - -def test_registry_association(): - ds = fake_random_ds(64, nprocs=1, length_unit=10) - a = ds.quan(3, 'cm') - b = YTQuantity(4, 'm') - c = ds.quan(6, '') - d = 5 - - assert_equal(id(a.units.registry), id(ds.unit_registry)) - - def binary_op_registry_comparison(op): - e = op(a, b) - f = op(b, a) - g = op(c, d) - h = op(d, c) - - assert_equal(id(e.units.registry), id(ds.unit_registry)) - assert_equal(id(f.units.registry), id(b.units.registry)) - assert_equal(id(g.units.registry), id(h.units.registry)) - assert_equal(id(g.units.registry), id(ds.unit_registry)) - - def unary_op_registry_comparison(op): - c = op(a) - d = op(b) - - assert_equal(id(c.units.registry), id(ds.unit_registry)) - assert_equal(id(d.units.registry), id(b.units.registry)) - - binary_ops = [operator.add, operator.sub, operator.mul, - operator.truediv] - if hasattr(operator, "div"): - binary_ops.append(operator.div) - for op in binary_ops: - binary_op_registry_comparison(op) - - for op in [operator.abs, operator.neg, operator.pos]: - unary_op_registry_comparison(op) - -def test_to_value(): - - a = YTArray([1.0, 2.0, 3.0], "kpc") - assert_equal(a.to_value(), np.array([1.0, 2.0, 3.0])) - assert_equal(a.to_value(), a.value) - assert_equal(a.to_value("km"), a.in_units("km").value) - - b = YTQuantity(5.5, "Msun") - assert_equal(b.to_value(), 5.5) - assert_equal(b.to_value("g"), b.in_units("g").value) - -@requires_module("astropy") -def test_astropy(): - from yt.utilities.on_demand_imports import _astropy - - ap_arr = np.arange(10)*_astropy.units.km/_astropy.units.hr - yt_arr = YTArray(np.arange(10), "km/hr") - yt_arr2 = YTArray.from_astropy(ap_arr) - - ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3) - yt_quan = YTQuantity(10., "sqrt(Msun)/kpc**3") - yt_quan2 = YTQuantity.from_astropy(ap_quan) - - assert_array_equal(ap_arr, yt_arr.to_astropy()) - assert_array_equal(yt_arr, YTArray.from_astropy(ap_arr)) - assert_array_equal(yt_arr, yt_arr2) - - assert_equal(ap_quan, yt_quan.to_astropy()) - assert_equal(yt_quan, YTQuantity.from_astropy(ap_quan)) - assert_equal(yt_quan, yt_quan2) - - assert_array_equal(yt_arr, YTArray.from_astropy(yt_arr.to_astropy())) - assert_equal(yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy())) - -@requires_module("pint") -def test_pint(): - from pint import UnitRegistry - - ureg = UnitRegistry() - - p_arr = np.arange(10)*ureg.km/ureg.hr - yt_arr = YTArray(np.arange(10), "km/hr") - yt_arr2 = YTArray.from_pint(p_arr) - - p_quan = 10.*ureg.g**0.5/(ureg.mm**3) - yt_quan = YTQuantity(10., "sqrt(g)/mm**3") - yt_quan2 = YTQuantity.from_pint(p_quan) - - assert_array_equal(p_arr, yt_arr.to_pint()) - assert_equal(p_quan, yt_quan.to_pint()) - assert_array_equal(yt_arr, YTArray.from_pint(p_arr)) - assert_array_equal(yt_arr, yt_arr2) - - assert_equal(p_quan.magnitude, yt_quan.to_pint().magnitude) - assert_equal(p_quan, yt_quan.to_pint()) - assert_equal(yt_quan, YTQuantity.from_pint(p_quan)) - assert_equal(yt_quan, yt_quan2) - - assert_array_equal(yt_arr, YTArray.from_pint(yt_arr.to_pint())) - assert_equal(yt_quan, YTQuantity.from_pint(yt_quan.to_pint())) - -def test_subclass(): - - class YTASubclass(YTArray): - pass - - a = YTASubclass([4, 5, 6], 'g') - b = YTASubclass([7, 8, 9], 'kg') - nu = YTASubclass([10, 11, 12], '') - nda = np.array([3, 4, 5]) - yta = YTArray([6, 7, 8], 'mg') - loq = [YTQuantity(6, 'mg'), YTQuantity(7, 'mg'), YTQuantity(8, 'mg')] - ytq = YTQuantity(4, 'cm') - ndf = np.float64(3) - - def op_comparison(op, inst1, inst2, compare_class): - assert_isinstance(op(inst1, inst2), compare_class) - assert_isinstance(op(inst2, inst1), compare_class) - - ops = [operator.mul, operator.truediv] - if hasattr(operator, "div"): - ops.append(operator.div) - for op in ops: - for inst in (b, ytq, ndf, yta, nda, loq): - op_comparison(op, a, inst, YTASubclass) - - op_comparison(op, ytq, nda, YTArray) - op_comparison(op, ytq, yta, YTArray) - - for op in (operator.add, operator.sub): - op_comparison(op, nu, nda, YTASubclass) - op_comparison(op, a, b, YTASubclass) - op_comparison(op, a, yta, YTASubclass) - op_comparison(op, a, loq, YTASubclass) - - assert_isinstance(a[0], YTQuantity) - assert_isinstance(a[:], YTASubclass) - assert_isinstance(a[:2], YTASubclass) - assert_isinstance(YTASubclass(yta), YTASubclass) - -@requires_module('h5py') -def test_h5_io(): - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - ds = fake_random_ds(64, nprocs=1, length_unit=10) - - warr = ds.arr(np.random.random((256, 256)), 'code_length') - - warr.write_hdf5('test.h5') - - iarr = YTArray.from_hdf5('test.h5') - - assert_equal(warr, iarr) - assert_equal(warr.units.registry['code_length'], iarr.units.registry['code_length']) - - warr.write_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group') - - giarr = YTArray.from_hdf5('test.h5', dataset_name="test_dset", group_name='/arrays/test_group') - - assert_equal(warr, giarr) - - os.chdir(curdir) - shutil.rmtree(tmpdir) - -def test_equivalencies(): - from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \ - mass_sun_cgs, G, stefan_boltzmann_constant_cgs - import yt.units as u - - # Mass-energy - - E = mp.in_units("keV","mass_energy") - assert_equal(E, mp*clight*clight) - assert_allclose_units(mp, E.in_units("g", "mass_energy")) - - # Thermal - - T = YTQuantity(1.0e8,"K") - E = T.in_units("W*hr","thermal") - assert_equal(E, (kboltz*T).in_units("W*hr")) - assert_allclose_units(T, E.in_units("K", "thermal")) - - # Spectral - - l = YTQuantity(4000.,"angstrom") - nu = l.in_units("Hz","spectral") - assert_equal(nu, clight/l) - E = hcgs*nu - l2 = E.in_units("angstrom", "spectral") - assert_allclose_units(l, l2) - nu2 = clight/l2.in_units("cm") - assert_allclose_units(nu, nu2) - E2 = nu2.in_units("keV", "spectral") - assert_allclose_units(E2, E.in_units("keV")) - - # Sound-speed - - mu = 0.6 - gg = 5./3. - c_s = T.in_units("km/s", equivalence="sound_speed") - assert_equal(c_s, np.sqrt(gg*kboltz*T/(mu*mh))) - assert_allclose_units(T, c_s.in_units("K","sound_speed")) - - mu = 0.5 - gg = 4./3. - c_s = T.in_units("km/s","sound_speed", mu=mu, gamma=gg) - assert_equal(c_s, np.sqrt(gg*kboltz*T/(mu*mh))) - assert_allclose_units(T, c_s.in_units("K","sound_speed", mu=mu, gamma=gg)) - - # Lorentz - - v = 0.8*clight - g = v.in_units("dimensionless","lorentz") - g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless") - assert_allclose_units(g, g2) - v2 = g2.in_units("mile/hr", "lorentz") - assert_allclose_units(v2, v.in_units("mile/hr")) - - # Schwarzschild - - R = mass_sun_cgs.in_units("kpc","schwarzschild") - assert_equal(R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)) - assert_allclose_units(mass_sun_cgs, R.in_units("g", "schwarzschild")) - - # Compton - - l = me.in_units("angstrom","compton") - assert_equal(l, hcgs/(me*clight)) - assert_allclose_units(me, l.in_units("g", "compton")) - - # Number density - - rho = mp/u.cm**3 - - n = rho.in_units("cm**-3","number_density") - assert_equal(n, rho/(mh*0.6)) - assert_allclose_units(rho, n.in_units("g/cm**3","number_density")) - - n = rho.in_units("cm**-3", equivalence="number_density", mu=0.75) - assert_equal(n, rho/(mh*0.75)) - assert_allclose_units(rho, n.in_units("g/cm**3", equivalence="number_density", mu=0.75)) - - # Effective temperature - - T = YTQuantity(1.0e4, "K") - F = T.in_units("erg/s/cm**2",equivalence="effective_temperature") - assert_equal(F, stefan_boltzmann_constant_cgs*T**4) - assert_allclose_units(T, F.in_units("K", equivalence="effective_temperature")) - - # to_value test - - assert_equal(F.value, T.to_value("erg/s/cm**2", equivalence="effective_temperature")) - assert_equal(n.value, rho.to_value("cm**-3", equivalence="number_density", mu=0.75)) - -def test_electromagnetic(): - from yt.units.dimensions import charge_mks, pressure, current_cgs, \ - magnetic_field_mks, magnetic_field_cgs, power - from yt.utilities.physical_constants import mu_0, qp - from yt.utilities.physical_ratios import speed_of_light_cm_per_s - - # Various tests of SI and CGS electromagnetic units - - qp_mks = qp.in_units("C", "SI") - assert_equal(qp_mks.units.dimensions, charge_mks) - assert_array_almost_equal(qp_mks.v, 10.0*qp.v/speed_of_light_cm_per_s) - - qp_cgs = qp_mks.in_units("esu", "CGS") - assert_array_almost_equal(qp_cgs, qp) - assert_equal(qp_cgs.units.dimensions, qp.units.dimensions) - - qp_mks_k = qp.in_units("kC", "SI") - assert_array_almost_equal(qp_mks_k.v, 1.0e-2*qp.v/speed_of_light_cm_per_s) - - B = YTQuantity(1.0, "T") - B_cgs = B.in_units("gauss", "CGS") - assert_equal(B.units.dimensions, magnetic_field_mks) - assert_equal(B_cgs.units.dimensions, magnetic_field_cgs) - assert_array_almost_equal(B_cgs, YTQuantity(1.0e4, "gauss")) - - u_mks = B*B/(2*mu_0) - assert_equal(u_mks.units.dimensions, pressure) - u_cgs = B_cgs*B_cgs/(8*np.pi) - assert_equal(u_cgs.units.dimensions, pressure) - assert_array_almost_equal(u_mks.in_cgs(), u_cgs) - - I = YTQuantity(1.0, "A") - I_cgs = I.in_units("statA", equivalence="CGS") - assert_array_almost_equal(I_cgs, YTQuantity(0.1*speed_of_light_cm_per_s, "statA")) - assert_array_almost_equal(I_cgs.in_units("mA", equivalence="SI"), I.in_units("mA")) - assert_equal(I_cgs.units.dimensions, current_cgs) - - R = YTQuantity(1.0, "ohm") - R_cgs = R.in_units("statohm", "CGS") - P_mks = I*I*R - P_cgs = I_cgs*I_cgs*R_cgs - assert_equal(P_mks.units.dimensions, power) - assert_equal(P_cgs.units.dimensions, power) - assert_array_almost_equal(P_cgs.in_cgs(), P_mks.in_cgs()) - assert_array_almost_equal(P_cgs.in_mks(), YTQuantity(1.0, "W")) - - V = YTQuantity(1.0, "statV") - V_mks = V.in_units("V", "SI") - assert_array_almost_equal(V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s) - -def test_ytarray_coercion(): - a = YTArray([1, 2, 3], 'cm') - q = YTQuantity(3, 'cm') - na = np.array([1, 2, 3]) - - assert_isinstance(a*q, YTArray) - assert_isinstance(q*na, YTArray) - assert_isinstance(q*3, YTQuantity) - assert_isinstance(q*np.float64(3), YTQuantity) - assert_isinstance(q*np.array(3), YTQuantity) - -def test_numpy_wrappers(): - a1 = YTArray([1, 2, 3], 'cm') - a2 = YTArray([2, 3, 4, 5, 6], 'cm') - a3 = YTArray([7, 8, 9, 10, 11], 'cm') - catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6] - intersect_answer = [2, 3] - union_answer = [1, 2, 3, 4, 5, 6] - vstack_answer = [[2, 3, 4, 5, 6], - [7, 8, 9,10, 11]] - vstack_answer_last_axis = [[ 2, 7], [ 3, 8], [ 4, 9], [ 5, 10], [ 6, 11]] - - assert_array_equal(YTArray(catenate_answer, 'cm'), - uconcatenate((a1, a2))) - assert_array_equal(catenate_answer, np.concatenate((a1, a2))) - - assert_array_equal(YTArray(intersect_answer, 'cm'), - uintersect1d(a1, a2)) - assert_array_equal(intersect_answer, np.intersect1d(a1, a2)) - - assert_array_equal(YTArray(union_answer, 'cm'), uunion1d(a1, a2)) - assert_array_equal(union_answer, np.union1d(a1, a2)) - - assert_array_equal(YTArray(catenate_answer, 'cm'), uhstack([a1, a2])) - assert_array_equal(catenate_answer, np.hstack([a1, a2])) - - assert_array_equal(YTArray(vstack_answer, 'cm'), uvstack([a2, a3])) - assert_array_equal(vstack_answer, np.vstack([a2, a3])) - - assert_array_equal(YTArray(vstack_answer, 'cm'), ustack([a2, a3])) - assert_array_equal(vstack_answer, np.stack([a2, a3])) - - assert_array_equal(YTArray(vstack_answer_last_axis, 'cm'), ustack([a2, a3], axis=-1)) - assert_array_equal(vstack_answer_last_axis, np.stack([a2, a3], axis=-1)) - -def test_dimensionless_conversion(): - a = YTQuantity(1, 'Zsun') - b = a.in_units('Zsun') - a.convert_to_units('Zsun') - assert_true(a.units.base_value == metallicity_sun) - assert_true(b.units.base_value == metallicity_sun) - -def test_modified_unit_division(): - ds1 = fake_random_ds(64) - ds2 = fake_random_ds(64) - - # this mocks comoving coordinates without going through the trouble - # of setting up a fake cosmological dataset - ds1.unit_registry.modify('m', 50) - - a = ds1.quan(3, 'm') - b = ds2.quan(3, 'm') - - ret = a/b - assert_true(ret == 0.5) - assert_true(ret.units.is_dimensionless) - assert_true(ret.units.base_value == 1.0) - -def test_load_and_save(): - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - a = YTArray(np.random.random(10), "kpc") - b = YTArray(np.random.random(10), "Msun") - c = YTArray(np.random.random(10), "km/s") - - savetxt("arrays.dat", [a,b,c], delimiter=",") - - d, e = loadtxt("arrays.dat", usecols=(1,2), delimiter=",") - - assert_array_equal(b, d) - assert_array_equal(c, e) - - os.chdir(curdir) - shutil.rmtree(tmpdir) - -def test_trig_ufunc_degrees(): - for ufunc in (np.sin, np.cos, np.tan): - degree_values = np.random.random(10)*degree - radian_values = degree_values.in_units('radian') - assert_array_equal(ufunc(degree_values), ufunc(radian_values)) - -def test_builtin_sum(): - from yt.units import km - - arr = [1, 2, 3]*km - assert_equal(sum(arr), 6*km) - -def test_initialization_different_registries(): - from yt.testing import fake_random_ds - - ds1 = fake_random_ds(32, length_unit=1) - ds2 = fake_random_ds(32, length_unit=3) - - l1 = ds1.quan(0.3, 'unitary') - l2 = ds2.quan(l1, 'unitary') - - assert_almost_equal(float(l1.in_cgs()), 0.3) - assert_almost_equal(float(l2.in_cgs()), 0.9) - assert_almost_equal(float(ds1.quan(0.3, 'unitary').in_cgs()), 0.3) - assert_almost_equal(float(ds2.quan(0.3, 'unitary').in_cgs()), 0.9) - -def test_ones_and_zeros_like(): - data = YTArray([1, 2, 3], 'cm') - zd = np.zeros_like(data) - od = np.ones_like(data) - - assert_equal(zd, YTArray([0, 0, 0], 'cm')) - assert_equal(zd.units, data.units) - assert_equal(od, YTArray([1, 1, 1], 'cm')) - assert_equal(od.units, data.units) - -@requires_module('ipywidgets') -def test_display_ytarray(): - arr = YTArray([1,2,3], 'cm') - widget = display_ytarray(arr) - dropdown = widget.children[-1] - dropdown.value = 'm' - # Check that our original array did *not* change - assert_equal(arr.to_value(), np.array([1.0, 2.0, 3.0])) - # Check our values did change - assert_equal([float(_.value) for _ in widget.children[:-1]], - arr.to_value()/100.0) - - -def test_display_ytarray_too_large(): - arr = YTArray([1,2,3,4], 'cm') - assert_raises(YTArrayTooLargeToDisplay, display_ytarray, arr) - -def test_clip(): - km = YTQuantity(1, 'km') - - data = [1, 2, 3, 4, 5, 6] * km - answer = [2, 2, 3, 4, 4, 4] * km - - ret = np.clip(data, 2, 4) - assert_array_equal(ret, answer) - assert ret.units == answer.units - - np.clip(data, 2, 4, out=data) - - assert_array_equal(data, answer) - assert data.units == answer.units - - left_edge = [0.0, 0.0, 0.0] * km - right_edge = [1.0, 1.0, 1.0] * km - - positions = [[0.0, 0.0, 0.0], - [1.0, 1.0, -0.1], - [1.5, 1.0, 0.9]] * km - np.clip(positions, left_edge, right_edge, positions) - assert positions.units == left_edge.units - assert positions.max() == 1.0 * km - assert positions.min() == 0.0 * km diff --git a/yt/units/unit_lookup_table.py b/yt/units/unit_lookup_table.py index e091e8e2960..44cb5f65350 100644 --- a/yt/units/unit_lookup_table.py +++ b/yt/units/unit_lookup_table.py @@ -1,229 +1 @@ -""" -The default unit symbol lookup table. - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.units import dimensions -from yt.utilities.physical_ratios import \ - cm_per_pc, cm_per_ly, cm_per_au, cm_per_rsun, cm_per_m, \ - mass_sun_grams, sec_per_year, sec_per_day, sec_per_hr, \ - sec_per_min, temp_sun_kelvin, luminosity_sun_ergs_per_sec, \ - metallicity_sun, erg_per_eV, amu_grams, mass_electron_grams, \ - cm_per_ang, jansky_cgs, mass_jupiter_grams, mass_earth_grams, \ - kelvin_per_rankine, speed_of_light_cm_per_s, planck_length_cm, \ - planck_charge_esu, planck_energy_erg, planck_mass_grams, \ - planck_temperature_K, planck_time_s, mass_hydrogen_grams, \ - grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm, \ - newton_cgs, cm_per_rearth, cm_per_rjup -import numpy as np - -# Lookup a unit symbol with the symbol string, and provide a tuple with the -# conversion factor to cgs and dimensionality. - -default_unit_symbol_lut = { - # base - "g": (1.0, dimensions.mass, 0.0, r"\rm{g}"), - "s": (1.0, dimensions.time, 0.0, r"\rm{s}"), - "K": (1.0, dimensions.temperature, 0.0, r"\rm{K}"), - "radian": (1.0, dimensions.angle, 0.0, r"\rm{radian}"), - - # other cgs - "dyne": (1.0, dimensions.force, 0.0, r"\rm{dyn}"), - "erg": (1.0, dimensions.energy, 0.0, r"\rm{erg}"), - "esu": (1.0, dimensions.charge_cgs, 0.0, r"\rm{esu}"), - "gauss": (1.0, dimensions.magnetic_field_cgs, 0.0, r"\rm{G}"), - "degC": (1.0, dimensions.temperature, -273.15, r"^\circ\rm{C}"), - "statA": (1.0, dimensions.current_cgs, 0.0, r"\rm{statA}"), - "statV": (1.0, dimensions.electric_potential_cgs, 0.0, r"\rm{statV}"), - "statohm": (1.0, dimensions.resistance_cgs, 0.0, r"\rm{statohm}"), - - # some SI - "m": (1.0e2, dimensions.length, 0.0, r"\rm{m}"), - "J": (1.0e7, dimensions.energy, 0.0, r"\rm{J}"), - "W": (1.0e7, dimensions.power, 0.0, r"\rm{W}"), - "Hz": (1.0, dimensions.rate, 0.0, r"\rm{Hz}"), - "N": (1.0e5, dimensions.force, 0.0, r"\rm{N}"), - "C": (1.0, dimensions.charge_mks, 0.0, r"\rm{C}"), - "A": (1.0, dimensions.current_mks, 0.0, r"\rm{A}"), - "T": (1000.0, dimensions.magnetic_field_mks, 0.0, r"\rm{T}"), - "Pa": (10.0, dimensions.pressure, 0.0, r"\rm{Pa}"), - "V": (1.0e7, dimensions.electric_potential_mks, 0.0, r"\rm{V}"), - "ohm": (1.0e7, dimensions.resistance_mks, 0.0, r"\Omega"), - - # Imperial and other non-metric units - "ft": (30.48, dimensions.length, 0.0, r"\rm{ft}"), - "mile": (160934, dimensions.length, 0.0, r"\rm{mile}"), - "degF": (kelvin_per_rankine, dimensions.temperature, -459.67, - "^\circ\rm{F}"), - "R": (kelvin_per_rankine, dimensions.temperature, 0.0, r"^\circ\rm{R}"), - "lbf": (grams_per_pound*standard_gravity_cm_per_s2, dimensions.force, 0.0, r"\rm{lbf}"), - "lbm": (grams_per_pound, dimensions.mass, 0.0, r"\rm{lbm}"), - "atm": (pascal_per_atm*10., dimensions.pressure, 0.0, r"\rm{atm}"), - - # dimensionless stuff - "h": (1.0, dimensions.dimensionless, 0.0, r"h"), # needs to be added for rho_crit_now - "dimensionless": (1.0, dimensions.dimensionless, 0.0, r""), - - # times - "min": (sec_per_min, dimensions.time, 0.0, r"\rm{min}"), - "hr": (sec_per_hr, dimensions.time, 0.0, r"\rm{hr}"), - "day": (sec_per_day, dimensions.time, 0.0, r"\rm{d}"), - "d": (sec_per_day, dimensions.time, 0.0, r"\rm{d}"), - "yr": (sec_per_year, dimensions.time, 0.0, r"\rm{yr}"), - - # Velocities - "c": (speed_of_light_cm_per_s, dimensions.velocity, 0.0, r"\rm{c}"), - - # Solar units - "Msun": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"), - "msun": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"), - "Rsun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"), - "rsun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"), - "R_sun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"), - "r_sun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"), - "Lsun": (luminosity_sun_ergs_per_sec, dimensions.power, 0.0, r"L_\odot"), - "Tsun": (temp_sun_kelvin, dimensions.temperature, 0.0, r"T_\odot"), - "Zsun": (metallicity_sun, dimensions.dimensionless, 0.0, r"Z_\odot"), - "Mjup": (mass_jupiter_grams, dimensions.mass, 0.0, r"M_{\rm{Jup}}"), - "Mearth": (mass_earth_grams, dimensions.mass, 0.0, r"M_\oplus"), - - # astro distances - "AU": (cm_per_au, dimensions.length, 0.0, r"\rm{AU}"), - "au": (cm_per_au, dimensions.length, 0.0, r"\rm{AU}"), - "ly": (cm_per_ly, dimensions.length, 0.0, r"\rm{ly}"), - "pc": (cm_per_pc, dimensions.length, 0.0, r"\rm{pc}"), - - # angles - "degree": (np.pi/180., dimensions.angle, 0.0, r"\rm{deg}"), # degrees - "arcmin": (np.pi/10800., dimensions.angle, 0.0, - r"\rm{arcmin}"), # arcminutes - "arcsec": (np.pi/648000., dimensions.angle, 0.0, - r"\rm{arcsec}"), # arcseconds - "mas": (np.pi/648000000., dimensions.angle, 0.0, - r"\rm{mas}"), # milliarcseconds - "hourangle": (np.pi/12., dimensions.angle, 0.0, r"\rm{HA}"), # hour angle - "steradian": (1.0, dimensions.solid_angle, 0.0, r"\rm{sr}"), - "lat": (-np.pi/180.0, dimensions.angle, 90.0, r"\rm{Latitude}"), - "lon": (np.pi/180.0, dimensions.angle, -180.0, r"\rm{Longitude}"), - - # misc - "eV": (erg_per_eV, dimensions.energy, 0.0, r"\rm{eV}"), - "amu": (amu_grams, dimensions.mass, 0.0, r"\rm{amu}"), - "angstrom": (cm_per_ang, dimensions.length, 0.0, r"\AA"), - "Jy": (jansky_cgs, dimensions.specific_flux, 0.0, r"\rm{Jy}"), - "counts": (1.0, dimensions.dimensionless, 0.0, r"\rm{counts}"), - "photons": (1.0, dimensions.dimensionless, 0.0, r"\rm{photons}"), - "me": (mass_electron_grams, dimensions.mass, 0.0, r"m_e"), - "mp": (mass_hydrogen_grams, dimensions.mass, 0.0, r"m_p"), - "mol": (1.0/amu_grams, dimensions.dimensionless, 0.0, r"\rm{mol}"), - 'Sv': (cm_per_m**2, dimensions.specific_energy, 0.0, r"\rm{Sv}"), - "rayleigh": (0.25e6/np.pi, dimensions.count_intensity, 0.0, r"\rm{R}"), - - # for AstroPy compatibility - "solMass": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"), - "solRad": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"), - "solLum": (luminosity_sun_ergs_per_sec, dimensions.power, 0.0, r"L_\odot"), - "dyn": (1.0, dimensions.force, 0.0, r"\rm{dyn}"), - "sr": (1.0, dimensions.solid_angle, 0.0, r"\rm{sr}"), - "rad": (1.0, dimensions.angle, 0.0, r"\rm{rad}"), - "deg": (np.pi/180., dimensions.angle, 0.0, r"\rm{deg}"), - "Fr": (1.0, dimensions.charge_cgs, 0.0, r"\rm{Fr}"), - "G": (1.0, dimensions.magnetic_field_cgs, 0.0, r"\rm{G}"), - "Angstrom": (cm_per_ang, dimensions.length, 0.0, r"\AA"), - "statC": (1.0, dimensions.charge_cgs, 0.0, r"\rm{statC}"), - - # Planck units - "m_pl": (planck_mass_grams, dimensions.mass, 0.0, r"m_{\rm{P}}"), - "l_pl": (planck_length_cm, dimensions.length, 0.0, r"\ell_\rm{P}"), - "t_pl": (planck_time_s, dimensions.time, 0.0, r"t_{\rm{P}}"), - "T_pl": (planck_temperature_K, dimensions.temperature, 0.0, r"T_{\rm{P}}"), - "q_pl": (planck_charge_esu, dimensions.charge_cgs, 0.0, r"q_{\rm{P}}"), - "E_pl": (planck_energy_erg, dimensions.energy, 0.0, r"E_{\rm{P}}"), - - # Geometrized units - "m_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"), - "l_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"), - "t_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"), - - # Some Solar System units - "R_earth": (cm_per_rearth, dimensions.length, 0.0, r"R_\oplus"), - "r_earth": (cm_per_rearth, dimensions.length, 0.0, r"R_\oplus"), - "R_jup": (cm_per_rjup, dimensions.length, 0.0, r"R_\mathrm{Jup}"), - "r_jup": (cm_per_rjup, dimensions.length, 0.0, r"R_\mathrm{Jup}"), -} - -# This dictionary formatting from magnitude package, credit to Juan Reyero. -unit_prefixes = { - 'Y': 1e24, # yotta - 'Z': 1e21, # zetta - 'E': 1e18, # exa - 'P': 1e15, # peta - 'T': 1e12, # tera - 'G': 1e9, # giga - 'M': 1e6, # mega - 'k': 1e3, # kilo - 'd': 1e1, # deci - 'c': 1e-2, # centi - 'm': 1e-3, # mili - 'u': 1e-6, # micro - 'n': 1e-9, # nano - 'p': 1e-12, # pico - 'f': 1e-15, # femto - 'a': 1e-18, # atto - 'z': 1e-21, # zepto - 'y': 1e-24, # yocto -} - -latex_prefixes = { - "u": r"\mu", - } - -prefixable_units = [ - "m", - "pc", - "mcm", - "pccm", - "g", - "eV", - "s", - "yr", - "K", - "dyne", - "erg", - "esu", - "J", - "Hz", - "W", - "gauss", - "G", - "Jy", - "N", - "T", - "A", - "C", - "statA", - "Pa", - "V", - "statV", - "ohm", - "statohm", - "Sv", -] - -default_base_units = { - dimensions.mass: 'g', - dimensions.length: 'cm', - dimensions.time: 's', - dimensions.temperature: 'K', - dimensions.angle: 'radian', - dimensions.current_mks: 'A', -} +from unyt._unit_lookup_table import * diff --git a/yt/units/unit_object.py b/yt/units/unit_object.py index cff3b994221..f53c2948350 100644 --- a/yt/units/unit_object.py +++ b/yt/units/unit_object.py @@ -1,750 +1 @@ -""" -A class that represents a unit symbol. - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six import text_type -from sympy import \ - Expr, Mul, Add, Number, \ - Pow, Symbol, Integer, \ - Float, Basic, Rational, sqrt -from sympy.core.numbers import One -from sympy import sympify, latex -from sympy.parsing.sympy_parser import \ - parse_expr, auto_number, rationalize -from keyword import iskeyword -from yt.units.dimensions import \ - base_dimensions, temperature, \ - dimensionless, current_mks, \ - angle -from yt.units.equivalencies import \ - equivalence_registry -from yt.units.unit_lookup_table import \ - unit_prefixes, prefixable_units, latex_prefixes, \ - default_base_units -from yt.units.unit_registry import \ - UnitRegistry, \ - UnitParseError -from yt.utilities.exceptions import YTUnitsNotReducible - -import copy -import token - -class InvalidUnitOperation(Exception): - pass - -default_unit_registry = UnitRegistry() - -sympy_one = sympify(1) - -global_dict = { - 'Symbol': Symbol, - 'Integer': Integer, - 'Float': Float, - 'Rational': Rational, - 'sqrt': sqrt -} - -unit_system_registry = {} - -def auto_positive_symbol(tokens, local_dict, global_dict): - """ - Inserts calls to ``Symbol`` for undefined variables. - Passes in positive=True as a keyword argument. - Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol - """ - result = [] - prevTok = (None, None) - - tokens.append((None, None)) # so zip traverses all tokens - for tok, nextTok in zip(tokens, tokens[1:]): - tokNum, tokVal = tok - nextTokNum, nextTokVal = nextTok - if tokNum == token.NAME: - name = tokVal - - if (name in ['True', 'False', 'None'] - or iskeyword(name) - or name in local_dict - # Don't convert attribute access - or (prevTok[0] == token.OP and prevTok[1] == '.') - # Don't convert keyword arguments - or (prevTok[0] == token.OP and prevTok[1] in ('(', ',') - and nextTokNum == token.OP and nextTokVal == '=')): - result.append((token.NAME, name)) - continue - elif name in global_dict: - obj = global_dict[name] - if isinstance(obj, (Basic, type)) or callable(obj): - result.append((token.NAME, name)) - continue - - result.extend([ - (token.NAME, 'Symbol'), - (token.OP, '('), - (token.NAME, repr(str(name))), - (token.OP, ','), - (token.NAME, 'positive'), - (token.OP, '='), - (token.NAME, 'True'), - (token.OP, ')'), - ]) - else: - result.append((tokNum, tokVal)) - - prevTok = (tokNum, tokVal) - - return result - -def get_latex_representation(expr, registry): - symbol_table = {} - for ex in expr.free_symbols: - try: - symbol_table[ex] = registry.lut[str(ex)][3] - except Exception: - symbol_table[ex] = r"\rm{" + str(ex).replace('_', '\ ') + "}" - - # invert the symbol table dict to look for keys with identical values - invert_symbols = {} - for key, value in symbol_table.items(): - if value not in invert_symbols: - invert_symbols[value] = [key] - else: - invert_symbols[value].append(key) - - # if there are any units with identical latex representations, substitute - # units to avoid uncanceled terms in the final latex expression. - for val in invert_symbols: - symbols = invert_symbols[val] - for i in range(1, len(symbols)): - expr = expr.subs(symbols[i], symbols[0]) - prefix = None - if isinstance(expr, Mul): - coeffs = expr.as_coeff_Mul() - if coeffs[0] == 1 or not isinstance(coeffs[0], Number): - pass - else: - expr = coeffs[1] - prefix = Float(coeffs[0], 2) - latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot", - fold_frac_powers=True, fold_short_frac=True) - - if prefix is not None: - latex_repr = latex(prefix, mul_symbol="times") + '\\ ' + latex_repr - - if latex_repr == '1': - return '' - else: - return latex_repr - -unit_text_transform = (auto_positive_symbol, auto_number, rationalize) - -class Unit(Expr): - """ - A symbolic unit, using sympy functionality. We only add "dimensions" so - that sympy understands relations between different units. - - """ - - # Set some assumptions for sympy. - is_positive = True # make sqrt(m**2) --> m - is_commutative = True - is_number = False - - # Extra attributes - __slots__ = ["expr", "is_atomic", "base_value", "base_offset", "dimensions", - "registry", "_latex_repr"] - - def __new__(cls, unit_expr=sympy_one, base_value=None, base_offset=0.0, - dimensions=None, registry=None, latex_repr=None, **assumptions): - """ - Create a new unit. May be an atomic unit (like a gram) or combinations - of atomic units (like g / cm**3). - - Parameters - ---------- - unit_expr : Unit object, sympy.core.expr.Expr object, or str - The symbolic unit expression. - base_value : float - The unit's value in yt's base units. - base_offset : float - The offset necessary to normalize temperature units to a common - zero point. - dimensions : sympy.core.expr.Expr - A sympy expression representing the dimensionality of this unit. - It must contain only mass, length, time, temperature and angle - symbols. - registry : UnitRegistry object - The unit registry we use to interpret unit symbols. - latex_repr : string - A string to render the unit as LaTeX - - Additional keyword arguments are passed as assumptions to the Sympy Expr - initializer - - """ - # Simplest case. If user passes a Unit object, just use the expr. - unit_key = None - if isinstance(unit_expr, (str, bytes, text_type)): - if isinstance(unit_expr, bytes): - unit_expr = unit_expr.decode("utf-8") - - if registry and unit_expr in registry.unit_objs: - return registry.unit_objs[unit_expr] - else: - unit_key = unit_expr - if not unit_expr: - # Bug catch... - # if unit_expr is an empty string, parse_expr fails hard... - unit_expr = "1" - try: - unit_expr = parse_expr(unit_expr, global_dict=global_dict, - transformations=unit_text_transform) - except SyntaxError as e: - msg = ("Unit expression %s raised an error " - "during parsing:\n%s" % (unit_expr, repr(e))) - raise UnitParseError(msg) - elif isinstance(unit_expr, Unit): - # grab the unit object's sympy expression. - unit_expr = unit_expr.expr - elif hasattr(unit_expr, 'units') and hasattr(unit_expr, 'value'): - # something that looks like a YTArray, grab the unit and value - if unit_expr.shape != (): - raise UnitParseError( - 'Cannot create a unit from a non-scalar YTArray, received: ' - '%s' % (unit_expr, )) - value = unit_expr.value - if value == 1: - unit_expr = unit_expr.units.expr - else: - unit_expr = unit_expr.value*unit_expr.units.expr - # Make sure we have an Expr at this point. - if not isinstance(unit_expr, Expr): - raise UnitParseError("Unit representation must be a string or " \ - "sympy Expr. %s has type %s." \ - % (unit_expr, type(unit_expr))) - - if unit_expr == sympy_one and dimensions is None: - dimensions = dimensionless - - if registry is None: - # Caller did not set the registry, so use the default. - registry = default_unit_registry - - # done with argument checking... - - # see if the unit is atomic. - is_atomic = False - if isinstance(unit_expr, Symbol): - is_atomic = True - - # - # check base_value and dimensions - # - - if base_value is not None: - # check that base_value is a float or can be converted to one - try: - base_value = float(base_value) - except ValueError: - raise UnitParseError("Could not use base_value as a float. " \ - "base_value is '%s' (type %s)." \ - % (base_value, type(base_value)) ) - - # check that dimensions is valid - if dimensions is not None: - validate_dimensions(dimensions) - else: - # lookup the unit symbols - unit_data = _get_unit_data_from_expr(unit_expr, registry.lut) - base_value = unit_data[0] - dimensions = unit_data[1] - if len(unit_data) > 2: - base_offset = unit_data[2] - latex_repr = unit_data[3] - else: - base_offset = 0.0 - - # Create obj with superclass construct. - obj = Expr.__new__(cls, **assumptions) - - # Attach attributes to obj. - obj.expr = unit_expr - obj.is_atomic = is_atomic - obj.base_value = base_value - obj.base_offset = base_offset - obj.dimensions = dimensions - obj._latex_repr = latex_repr - obj.registry = registry - - if unit_key is not None: - registry.unit_objs[unit_key] = obj - - # Return `obj` so __init__ can handle it. - - return obj - - _latex_expr = None - @property - def latex_repr(self): - if self._latex_repr is not None: - return self._latex_repr - if self.expr.is_Atom: - expr = self.expr - else: - expr = self.expr.copy() - self._latex_repr = get_latex_representation(expr, self.registry) - return self._latex_repr - - ### Some sympy conventions - def __getnewargs__(self): - return (self.expr, self.is_atomic, self.base_value, self.dimensions, - self.registry) - - def __hash__(self): - return super(Unit, self).__hash__() - - def _hashable_content(self): - return (self.expr, self.is_atomic, self.base_value, self.dimensions, - self.registry) - ### end sympy conventions - - def __repr__(self): - if self.expr == sympy_one: - return "(dimensionless)" - # @todo: don't use dunder method? - return self.expr.__repr__() - - def __str__(self): - if self.expr == sympy_one: - return "dimensionless" - # @todo: don't use dunder method? - return self.expr.__str__() - - # for sympy.printing - def _sympystr(self, *args): - return str(self.expr) - - # - # Start unit operations - # - - def __mul__(self, u): - """ Multiply Unit with u (Unit object). """ - if not isinstance(u, Unit): - raise InvalidUnitOperation("Tried to multiply a Unit object with " - "'%s' (type %s). This behavior is " - "undefined." % (u, type(u))) - - base_offset = 0.0 - if self.base_offset or u.base_offset: - if u.dimensions in (temperature, angle) and self.is_dimensionless: - base_offset = u.base_offset - elif self.dimensions in (temperature, angle) and u.is_dimensionless: - base_offset = self.base_offset - else: - raise InvalidUnitOperation("Quantities with units of Fahrenheit " - "and Celsius or angles cannot be multiplied.") - - return Unit(self.expr * u.expr, - base_value=(self.base_value * u.base_value), - base_offset=base_offset, - dimensions=(self.dimensions * u.dimensions), - registry=self.registry) - - def __div__(self, u): - """ Divide Unit by u (Unit object). """ - if not isinstance(u, Unit): - raise InvalidUnitOperation("Tried to divide a Unit object by '%s' " - "(type %s). This behavior is " - "undefined." % (u, type(u))) - - base_offset = 0.0 - if self.base_offset or u.base_offset: - if u.dimensions in (temperature, angle) and self.is_dimensionless: - base_offset = u.base_offset - elif self.dimensions in (temperature, angle) and u.is_dimensionless: - base_offset = self.base_offset - else: - raise InvalidUnitOperation("Quantities with units of Farhenheit " - "and Celsius cannot be multiplied.") - - return Unit(self.expr / u.expr, - base_value=(self.base_value / u.base_value), - base_offset=base_offset, - dimensions=(self.dimensions / u.dimensions), - registry=self.registry) - - __truediv__ = __div__ - - def __pow__(self, p): - """ Take Unit to power p (float). """ - try: - p = Rational(str(p)).limit_denominator() - except ValueError: - raise InvalidUnitOperation("Tried to take a Unit object to the " \ - "power '%s' (type %s). Failed to cast " \ - "it to a float." % (p, type(p)) ) - - return Unit(self.expr**p, base_value=(self.base_value**p), - dimensions=(self.dimensions**p), - registry=self.registry) - - def __eq__(self, u): - """ Test unit equality. """ - if not isinstance(u, Unit): - return False - return \ - (self.base_value == u.base_value and self.dimensions == u.dimensions) - - def __ne__(self, u): - """ Test unit inequality. """ - if not isinstance(u, Unit): - return True - if self.base_value != u.base_value: - return True - # use 'is' comparison dimensions to avoid expensive sympy operation - if self.dimensions is u.dimensions: - return False - return self.dimensions != u.dimensions - - def copy(self): - return copy.deepcopy(self) - - def __deepcopy__(self, memodict=None): - if memodict is None: - memodict = {} - expr = str(self.expr) - base_value = copy.deepcopy(self.base_value) - base_offset = copy.deepcopy(self.base_offset) - dimensions = copy.deepcopy(self.dimensions) - lut = copy.deepcopy(self.registry.lut) - registry = UnitRegistry(lut=lut) - return Unit(expr, base_value, base_offset, dimensions, registry) - - # - # End unit operations - # - - def same_dimensions_as(self, other_unit): - """ Test if dimensions are the same. """ - # test first for 'is' equality to avoid expensive sympy operation - if self.dimensions is other_unit.dimensions: - return True - return self.dimensions == other_unit.dimensions - - @property - def is_dimensionless(self): - return self.dimensions is sympy_one - - @property - def is_code_unit(self): - for atom in self.expr.atoms(): - if str(atom).startswith("code") or atom.is_Number: - pass - else: - return False - return True - - def list_equivalencies(self): - """ - Lists the possible equivalencies associated with this unit object - """ - for k, v in equivalence_registry.items(): - if self.has_equivalent(k): - print(v()) - - def has_equivalent(self, equiv): - """ - Check to see if this unit object as an equivalent unit in *equiv*. - """ - try: - this_equiv = equivalence_registry[equiv]() - except KeyError: - raise KeyError("No such equivalence \"%s\"." % equiv) - old_dims = self.dimensions - return old_dims in this_equiv.dims - - def get_base_equivalent(self, unit_system="cgs"): - """ - Create and return dimensionally-equivalent units in a specified base. - """ - yt_base_unit_string = _get_system_unit_string(self.dimensions, default_base_units) - yt_base_unit = Unit(yt_base_unit_string, base_value=1.0, - dimensions=self.dimensions, registry=self.registry) - if unit_system == "cgs": - if current_mks in self.dimensions.free_symbols: - raise YTUnitsNotReducible(self, "cgs") - return yt_base_unit - else: - if hasattr(unit_system, "unit_registry"): - unit_system = unit_system.unit_registry.unit_system_id - elif unit_system == "code": - unit_system = self.registry.unit_system_id - unit_system = unit_system_registry[str(unit_system)] - units_string = _get_system_unit_string(self.dimensions, unit_system.base_units) - u = Unit(units_string, registry=self.registry) - base_value = get_conversion_factor(self, yt_base_unit)[0] - base_value /= get_conversion_factor(self, u)[0] - return Unit(units_string, base_value=base_value, - dimensions=self.dimensions, registry=self.registry) - - def get_cgs_equivalent(self): - """ - Create and return dimensionally-equivalent cgs units. - """ - return self.get_base_equivalent(unit_system="cgs") - - def get_mks_equivalent(self): - """ - Create and return dimensionally-equivalent mks units. - """ - return self.get_base_equivalent(unit_system="mks") - - def get_conversion_factor(self, other_units): - return get_conversion_factor(self, other_units) - - def latex_representation(self): - return self.latex_repr - -# -# Unit manipulation functions -# - -def get_conversion_factor(old_units, new_units): - """ - Get the conversion factor between two units of equivalent dimensions. This - is the number you multiply data by to convert from values in `old_units` to - values in `new_units`. - - Parameters - ---------- - old_units: str or Unit object - The current units. - new_units : str or Unit object - The units we want. - - Returns - ------- - conversion_factor : float - `old_units / new_units` - offset : float or None - Offset between the old unit and new unit. - - """ - ratio = old_units.base_value / new_units.base_value - if old_units.base_offset == 0 and new_units.base_offset == 0: - return (ratio, None) - else: - if old_units.dimensions in (temperature, angle): - return ratio, ratio*old_units.base_offset - new_units.base_offset - else: - raise InvalidUnitOperation( - "Fahrenheit and Celsius are not absolute temperature scales " - "and cannot be used in compound unit symbols.") - -# -# Helper functions -# - -def _get_unit_data_from_expr(unit_expr, unit_symbol_lut): - """ - Grabs the total base_value and dimensions from a valid unit expression. - - Parameters - ---------- - unit_expr: Unit object, or sympy Expr object - The expression containing unit symbols. - unit_symbol_lut: dict - Provides the unit data for each valid unit symbol. - - """ - # The simplest case first - if isinstance(unit_expr, Unit): - return (unit_expr.base_value, unit_expr.dimensions) - - # Now for the sympy possibilities - if isinstance(unit_expr, Symbol): - return _lookup_unit_symbol(str(unit_expr), unit_symbol_lut) - - if isinstance(unit_expr, Number): - return (float(unit_expr), sympy_one) - - if isinstance(unit_expr, Pow): - unit_data = _get_unit_data_from_expr(unit_expr.args[0], unit_symbol_lut) - power = unit_expr.args[1] - if isinstance(power, Symbol): - raise UnitParseError("Invalid unit expression '%s'." % unit_expr) - conv = float(unit_data[0]**power) - unit = unit_data[1]**power - return (conv, unit) - - if isinstance(unit_expr, Mul): - base_value = 1.0 - dimensions = 1 - for expr in unit_expr.args: - unit_data = _get_unit_data_from_expr(expr, unit_symbol_lut) - base_value *= unit_data[0] - dimensions *= unit_data[1] - - return (float(base_value), dimensions) - - raise UnitParseError("Cannot parse for unit data from '%s'. Please supply" \ - " an expression of only Unit, Symbol, Pow, and Mul" \ - "objects." % str(unit_expr)) - - -def _lookup_unit_symbol(symbol_str, unit_symbol_lut): - """ - Searches for the unit data tuple corresponding to the given symbol. - - Parameters - ---------- - symbol_str : str - The unit symbol to look up. - unit_symbol_lut : dict - Dictionary with symbols as keys and unit data tuples as values. - - """ - if symbol_str in unit_symbol_lut: - # lookup successful, return the tuple directly - return unit_symbol_lut[symbol_str] - - # could still be a known symbol with a prefix - possible_prefix = symbol_str[0] - if possible_prefix in unit_prefixes: - # the first character could be a prefix, check the rest of the symbol - symbol_wo_prefix = symbol_str[1:] - - unit_is_si_prefixable = (symbol_wo_prefix in unit_symbol_lut and - symbol_wo_prefix in prefixable_units) - - if unit_is_si_prefixable: - # lookup successful, it's a symbol with a prefix - unit_data = unit_symbol_lut[symbol_wo_prefix] - prefix_value = unit_prefixes[possible_prefix] - - if possible_prefix in latex_prefixes: - latex_repr = symbol_str.replace( - possible_prefix, '{'+latex_prefixes[possible_prefix]+'}') - else: - # Need to add some special handling for comoving units - # this is fine for now, but it wouldn't work for a general - # unit that has an arbitrary LaTeX representation - if symbol_wo_prefix != 'cm' and symbol_wo_prefix.endswith('cm'): - sub_symbol_wo_prefix = symbol_wo_prefix[:-2] - sub_symbol_str = symbol_str[:-2] - else: - sub_symbol_wo_prefix = symbol_wo_prefix - sub_symbol_str = symbol_str - latex_repr = unit_data[3].replace( - '{' + sub_symbol_wo_prefix + '}', '{' + sub_symbol_str + '}') - - # Leave offset and dimensions the same, but adjust scale factor and - # LaTeX representation - ret = (unit_data[0] * prefix_value, unit_data[1], unit_data[2], - latex_repr) - - unit_symbol_lut[symbol_str] = ret - - return ret - - # no dice - if symbol_str.startswith('code_'): - raise UnitParseError( - "Code units have not been defined. \n" - "Try creating the array or quantity using ds.arr or ds.quan instead.") - else: - raise UnitParseError("Could not find unit symbol '%s' in the provided " \ - "symbols." % symbol_str) - -def validate_dimensions(dimensions): - if isinstance(dimensions, Mul): - for dim in dimensions.args: - validate_dimensions(dim) - elif isinstance(dimensions, Symbol): - if dimensions not in base_dimensions: - raise UnitParseError("Dimensionality expression contains an " - "unknown symbol '%s'." % dimensions) - elif isinstance(dimensions, Pow): - if not isinstance(dimensions.args[1], Number): - raise UnitParseError("Dimensionality expression '%s' contains a " - "unit symbol as a power." % dimensions) - elif isinstance(dimensions, (Add, Number)): - if not isinstance(dimensions, One): - raise UnitParseError("Only dimensions that are instances of Pow, " - "Mul, or symbols in the base dimensions are " - "allowed. Got dimensions '%s'" % dimensions) - elif not isinstance(dimensions, Basic): - raise UnitParseError("Bad dimensionality expression '%s'." % dimensions) - -def _get_system_unit_string(dimensions, base_units): - # The dimensions of a unit object is the product of the base dimensions. - # Use sympy to factor the dimensions into base CGS unit symbols. - units = [] - my_dims = dimensions.expand() - if my_dims is dimensionless: - return "" - if my_dims in base_units: - return base_units[my_dims] - for factor in my_dims.as_ordered_factors(): - dim = list(factor.free_symbols)[0] - unit_string = str(base_units[dim]) - if factor.is_Pow: - power_string = "**(%s)" % factor.as_base_exp()[1] - else: - power_string = "" - units.append("(%s)%s" % (unit_string, power_string)) - return " * ".join(units) - -def _define_unit(registry, symbol, value, tex_repr=None, offset=None, prefixable=False): - from yt.units.yt_array import YTQuantity, iterable - if symbol in registry: - raise RuntimeError("The symbol \"%s\" is already in the unit registry!" % symbol) - if not isinstance(value, YTQuantity): - if iterable(value) and len(value) == 2: - value = YTQuantity(value[0], value[1]) - else: - raise RuntimeError("\"value\" needs to be a (value, unit) tuple!") - base_value = float(value.in_base(unit_system='cgs-ampere')) - dimensions = value.units.dimensions - registry.add(symbol, base_value, dimensions, tex_repr=tex_repr, offset=offset) - if prefixable: - prefixable_units.append(symbol) - -def define_unit(symbol, value, tex_repr=None, offset=None, prefixable=False): - """ - Define a new unit and add it to the default unit registry. - - Parameters - ---------- - symbol : string - The symbol for the new unit. - value : tuple or ~yt.units.yt_array.YTQuantity - The definition of the new unit in terms of some other units. For example, - one would define a new "mph" unit with (1.0, "mile/hr") - tex_repr : string, optional - The LaTeX representation of the new unit. If one is not supplied, it will - be generated automatically based on the symbol string. - offset : float, optional - The default offset for the unit. If not set, an offset of 0 is assumed. - prefixable : boolean, optional - Whether or not the new unit can use SI prefixes. Default: False - - Examples - -------- - >>> yt.define_unit("mph", (1.0, "mile/hr")) - >>> two_weeks = YTQuantity(14.0, "days") - >>> yt.define_unit("fortnight", two_weeks) - """ - _define_unit(default_unit_registry, symbol, value, tex_repr=tex_repr, - offset=offset, prefixable=prefixable) +from unyt.unit_object import * diff --git a/yt/units/unit_registry.py b/yt/units/unit_registry.py index 097e8a8d970..89ff3cdfa61 100644 --- a/yt/units/unit_registry.py +++ b/yt/units/unit_registry.py @@ -1,188 +1,6 @@ -""" -A registry for units that can be added to and modified. +from unyt.unit_registry import * +from unyt.dimensions import dimensionless +default_unit_registry = UnitRegistry(unit_system='cgs') -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import json -import re - -from distutils.version import LooseVersion -from yt.units.unit_lookup_table import \ - default_unit_symbol_lut -from yt.utilities.lib.fnv_hash import fnv_hash -from yt.extern import six -from sympy import \ - sympify, \ - srepr, \ - __version__ as sympy_version - -SYMPY_VERSION = LooseVersion(sympy_version) - -def positive_symbol_replacer(match): - return match.group().replace(')\')', ')\', positive=True)') - -class SymbolNotFoundError(Exception): - pass - -class UnitParseError(Exception): - pass - -class UnitRegistry: - """A registry for unit symbols""" - def __init__(self, add_default_symbols=True, lut=None): - if lut: - self.lut = lut - else: - self.lut = {} - self.unit_objs = {} - - if add_default_symbols: - self.lut.update(default_unit_symbol_lut) - - def __getitem__(self, key): - return self.lut[key] - - def __contains__(self, item): - return item in self.lut - - _unit_system_id = None - @property - def unit_system_id(self): - """ - This is a unique identifier for the unit registry created - from a FNV hash. It is needed to register a dataset's code - unit system in the unit system registry. - """ - if self._unit_system_id is None: - hash_data = bytearray() - for k, v in self.lut.items(): - hash_data.extend(k.encode('ascii')) - hash_data.extend(repr(v).encode('ascii')) - self._unit_system_id = "us_%d" % fnv_hash(hash_data) - return self._unit_system_id - - def add(self, symbol, base_value, dimensions, tex_repr=None, offset=None): - """ - Add a symbol to this registry. - - """ - from yt.units.unit_object import validate_dimensions - - # Validate - if not isinstance(base_value, float): - raise UnitParseError("base_value (%s) must be a float, got a %s." - % (base_value, type(base_value))) - - if offset is not None: - if not isinstance(offset, float): - raise UnitParseError( - "offset value (%s) must be a float, got a %s." - % (offset, type(offset))) - else: - offset = 0.0 - - validate_dimensions(dimensions) - - if tex_repr is None: - # make educated guess that will look nice in most cases - tex_repr = r"\rm{" + symbol.replace('_', '\ ') + "}" - - # Add to lut - self.lut.update({symbol: (base_value, dimensions, offset, tex_repr)}) - - def remove(self, symbol): - """ - Remove the entry for the unit matching `symbol`. - - """ - if symbol not in self.lut: - raise SymbolNotFoundError( - "Tried to remove the symbol '%s', but it does not exist" \ - "in this registry." % symbol) - - del self.lut[symbol] - if symbol in self.unit_objs: - del self.unit_objs[symbol] - - def modify(self, symbol, base_value): - """ - Change the base value of a unit symbol. Useful for adjusting code units - after parsing parameters. - - """ - if symbol not in self.lut: - raise SymbolNotFoundError( - "Tried to modify the symbol '%s', but it does not exist" \ - "in this registry." % symbol) - - if hasattr(base_value, "in_base"): - new_dimensions = base_value.units.dimensions - base_value = base_value.in_base('cgs-ampere') - base_value = base_value.value - else: - new_dimensions = self.lut[symbol][1] - - self.lut[symbol] = ((float(base_value), new_dimensions) + - self.lut[symbol][2:]) - - if symbol in self.unit_objs: - del self.unit_objs[symbol] - - def keys(self): - """ - Print out the units contained in the lookup table. - - """ - return self.lut.keys() - - def to_json(self): - """ - Returns a json-serialized version of the unit registry - """ - sanitized_lut = {} - for k, v in six.iteritems(self.lut): - san_v = list(v) - repr_dims = srepr(v[1]) - if SYMPY_VERSION < LooseVersion("1.0.0"): - # see https://github.com/sympy/sympy/issues/6131 - repr_dims = re.sub("Symbol\('\([a-z_]*\)'\)", - positive_symbol_replacer, repr_dims) - san_v[1] = repr_dims - sanitized_lut[k] = tuple(san_v) - - return json.dumps(sanitized_lut) - - @classmethod - def from_json(cls, json_text): - """ - Returns a UnitRegistry object from a json-serialized unit registry - """ - data = json.loads(json_text) - lut = {} - for k, v in six.iteritems(data): - unsan_v = list(v) - unsan_v[1] = sympify(v[1]) - lut[k] = tuple(unsan_v) - - return cls(lut=lut, add_default_symbols=False) - - def list_same_dimensions(self, unit_object): - """ - Return a list of base unit names that this registry knows about that - are of equivalent dimensions to *unit_object*. - """ - equiv = [k for k, v in self.lut.items() - if v[1] is unit_object.dimensions] - equiv += [n for n, u in self.unit_objs.items() - if u.dimensions is unit_object.dimensions] - equiv = list(sorted(set(equiv))) - return equiv +default_unit_registry.add('h', 1.0, dimensionless, tex_repr=r"h") diff --git a/yt/units/unit_symbols.py b/yt/units/unit_symbols.py index 67f4421adb6..911d280993f 100644 --- a/yt/units/unit_symbols.py +++ b/yt/units/unit_symbols.py @@ -1,185 +1,49 @@ -""" -A place to statically create unit quantities. +from yt.units.unit_registry import default_unit_registry +from unyt.unit_object import Unit +from unyt.unit_systems import add_symbols + +add_symbols(globals(), registry=default_unit_registry) + +class _SymbolContainer(object): + """A container for units to associate with a dataset. + + This object is usually accessed on a Dataset instance via + ``ds.units.unit_symbols``. + + Parameters + ---------- + registry : UnitRegistry instance + A unit registry to associate with units accessed on this object. + + Example + ------- + + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> code_mass = ds.units.code_mass + >>> (12*code_mass).to("Msun") + unyt_quantity(4.89719136e+11, 'Msun') + >>> code_mass.registry is ds.unit_registry + True + """ + def __init__(self, registry): + self._registry = registry + self._cache = {} + + def __dir__(self): + ret = [u for u in globals() if not u.startswith('_')] + ret += list(self._registry.keys()) + ret += object.__dir__(self) + return list(set(ret)) + + def __getattr__(self, item): + if item in self._cache: + return self._cache[item] + if hasattr(globals(), item): + ret = Unit(globals()[item].expr, registry=self._registry) + elif item in self._registry: + ret = Unit(item, registry=self._registry) + else: + raise AttributeError(item) + self._cache[item] = ret + return ret - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -from yt.units.yt_array import YTQuantity as quan - -# -# meter -# - -fm = femtometer = quan(1.0, "fm") -pm = picometer = quan(1.0, "pm") -nm = nanometer = quan(1.0, "nm") -um = micrometer = quan(1.0, "um") -mm = millimeter = quan(1.0, "mm") -cm = centimeter = quan(1.0, "cm") -m = meter = quan(1.0, "m") -km = kilometer = quan(1.0, "km") -Mm = Megameter = megameter = quan(1.0, "Mm") - -# -# parsec -# - -pc = parsec = quan(1.0, "pc") -kpc = kiloparsec = quan(1.0, "kpc") -Mpc = mpc = megaparsec = quan(1.0, "Mpc") -Gpc = gpc = Gigaparsec = quan(1.0, "Gpc") - -# -# gram -# - -mg = milligram = quan(1.0, "mg") -g = gram = quan(1.0, "g") -kg = kilogram = quan(1.0, "kg") - -# -# second -# - -fs = femtoseconds = quan(1.0, "fs") -ps = picosecond = quan(1.0, "ps") -ns = nanosecond = quan(1.0, "ns") -ms = millisecond = quan(1.0, "ms") -s = second = quan(1.0, "s") - -# -# minute -# - -min = minute = quan(1.0, "min") - -# -# hr -# - -hr = hour = quan(1.0, "hr") - -# -# day -# - -day = quan(1.0, "day") - -# -# year -# - -yr = year = quan(1.0, "yr") -kyr = kiloyear = quan(1.0, "kyr") -Myr = Megayear = megayear = quan(1.0, "Myr") -Gyr = Gigayear = gigayear = quan(1.0, "Gyr") - -# -# Kelvin -# - -degree_kelvin = Kelvin = K = quan(1.0, "K") - -# -# Misc CGS -# - -dyne = dyn = quan(1.0, "dyne") -erg = ergs = quan(1.0, "erg") - -# -# Misc SI -# - -N = Newton = newton = quan(1.0, "N") -J = Joule = joule = quan(1.0, "J") -W = Watt = watt = quan(1.0, "W") -Hz = Hertz = hertz = quan(1.0, "Hz") - -# -# Imperial units -# - -ft = foot = quan(1.0, "ft") -mile = quan(1.0, "mile") - -# -# Solar units -# - -Msun = solar_mass = quan(1.0, "Msun") -msun = quan(1.0, "msun") -Rsun = R_sun = solar_radius = quan(1.0, "Rsun") -rsun = r_sun = quan(1.0, "rsun") -Lsun = lsun = l_sun = solar_luminosity = quan(1.0, "Lsun") -Tsun = T_sun = solar_temperature = quan(1.0, "Tsun") -Zsun = Z_sun = solar_metallicity = quan(1.0, "Zsun") - -# -# Misc Astronomical units -# - -AU = astronomical_unit = quan(1.0, "AU") -au = quan(1.0, "au") -ly = light_year = quan(1.0, "ly") -Rearth = R_earth = earth_radius = quan(1.0, 'R_earth') -rearth = r_earth = quan(1.0, 'r_earth') -Rjup = R_jup = jupiter_radius = quan(1.0, 'R_jup') -rjup = r_jup = quan(1.0, 'r_jup') - -# -# Physical units -# - -eV = electron_volt = quan(1.0, "eV") -keV = kilo_electron_volt = quan(1.0, "keV") -MeV = mega_electron_volt = quan(1.0, "MeV") -GeV = giga_electron_volt = quan(1.0, "GeV") -amu = atomic_mass_unit = quan(1.0, "amu") -angstrom = quan(1.0, "angstrom") -me = electron_mass = quan(1.0, "me") - -# -# Angle units -# - -deg = degree = quan(1.0, "degree") -rad = radian = quan(1.0, "radian") -arcsec = arcsecond = quan(1.0, "arcsec") -arcmin = arcminute = quan(1.0, "arcmin") -mas = milliarcsecond = quan(1.0, "mas") -sr = steradian = quan(1.0, "steradian") - -# -# CGS electromagnetic units -# - -electrostatic_unit = esu = quan(1.0, "esu") -gauss = G = quan(1.0, "gauss") -statampere = statA = quan(1.0, "statA") -statvolt = statV = quan(1.0, "statV") -statohm = quan(1.0, "statohm") - -# -# SI electromagnetic units -# - -C = coulomb = Coulomb = quan(1.0, "C") -T = tesla = Tesla = quan(1.0, "T") -A = ampere = Ampere = quan(1.0, "A") -V = volt = Volt = quan(1.0, "V") -ohm = Ohm = quan(1.0, "ohm") - -# -# Geographic units -# - -latitude = lat = quan(1.0, "lat") -longitude = lon = quan(1.0, "lon") diff --git a/yt/units/unit_systems.py b/yt/units/unit_systems.py index 1dc33208a36..184a05c2f3c 100644 --- a/yt/units/unit_systems.py +++ b/yt/units/unit_systems.py @@ -1,155 +1,9 @@ -""" -Unit system class. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six import string_types -from yt.units import dimensions -from yt.units.unit_object import Unit, unit_system_registry, _get_system_unit_string -from yt.utilities import physical_constants as pc - - -class UnitSystemConstants(object): - """ - A class to facilitate conversions of physical constants into a given unit - system specified by *name*. - """ - def __init__(self, name): - self.name = name - - def __repr__(self): - return "Physical constants in %s units." % self.name - - def __str__(self): - return self.name - - def __getattr__(self, item): - return getattr(pc, item).in_base(self.name) - - -class UnitSystem(object): - """ - Create a UnitSystem for facilitating conversions to a default set of units. - - Parameters - ---------- - name : string - The name of the unit system. Will be used as the key in the - *unit_system_registry* dict to reference the unit system by. - length_unit : string - The base length unit of this unit system. - mass_unit : string - The base mass unit of this unit system. - time_unit : string - The base time unit of this unit system. - temperature_unit : string, optional - The base temperature unit of this unit system. Defaults to "K". - angle_unit : string, optional - The base angle unit of this unit system. Defaults to "rad". - current_mks_unit : string, optional - The base current unit of this unit system. Only used in MKS - or MKS-based unit systems. - registry : :class:`yt.units.unit_registry.UnitRegistry` object - The unit registry associated with this unit system. Only - useful for defining unit systems based on code units. - """ - def __init__(self, name, length_unit, mass_unit, time_unit, - temperature_unit="K", angle_unit="rad", current_mks_unit=None, - registry=None): - self.registry = registry - self.units_map = {dimensions.length: Unit(length_unit, registry=self.registry), - dimensions.mass: Unit(mass_unit, registry=self.registry), - dimensions.time: Unit(time_unit, registry=self.registry), - dimensions.temperature: Unit(temperature_unit, registry=self.registry), - dimensions.angle: Unit(angle_unit, registry=self.registry)} - self._dims = ["length","mass","time","temperature","angle"] - if current_mks_unit is not None: - self.units_map[dimensions.current_mks] = Unit(current_mks_unit, registry=self.registry) - self._dims.append("current_mks") - self.registry = registry - self.base_units = self.units_map.copy() - unit_system_registry[name] = self - self.name = name - self.constants = UnitSystemConstants(self.name) - - def __getitem__(self, key): - if isinstance(key, string_types): - key = getattr(dimensions, key) - um = self.units_map - if key not in um or um[key].dimensions is not key: - units = _get_system_unit_string(key, self.units_map) - self.units_map[key] = Unit(units, registry=self.registry) - return self.units_map[key] - - def __setitem__(self, key, value): - if isinstance(key, string_types): - if key not in self._dims: - self._dims.append(key) - key = getattr(dimensions, key) - self.units_map[key] = Unit(value, registry=self.registry) - - def __str__(self): - return self.name - - def __repr__(self): - repr = "%s Unit System\n" % self.name - repr += " Base Units:\n" - for dim in self.base_units: - repr += " %s: %s\n" % (str(dim).strip("()"), self.base_units[dim]) - repr += " Other Units:\n" - for key in self._dims: - dim = getattr(dimensions, key) - if dim not in self.base_units: - repr += " %s: %s\n" % (key, self.units_map[dim]) - return repr - - -def _make_unit_system_copy(unit_registry, unit_system): - """ - Make a copy of a unit system with a different unit registry. - - Parameters - ---------- - unit_registry : UnitRegistry instance - The unit registry we want to use with this unit system, - most likely from a dataset. - unit_system : string - The name of the unit system we want to make a copy of - with the new registry. - """ - unit_system = unit_system_registry[unit_system] - base_units = ["length", "mass", "time", "temperature", "angle"] - if "current_mks" in unit_system._dims: - current_mks_unit = str(unit_system["current_mks"]) - base_units.append("current_mks") - else: - current_mks_unit = None - sys_name = "{}_{}".format(unit_system.name, unit_registry.unit_system_id) - ds_unit_system = UnitSystem(sys_name, str(unit_system["length"]), - str(unit_system["mass"]), - str(unit_system["time"]), - temperature_unit=str(unit_system["temperature"]), - angle_unit=str(unit_system["angle"]), - current_mks_unit=current_mks_unit, - registry=unit_registry) - for dim in unit_system._dims: - if dim not in base_units: - ds_unit_system[dim] = str(unit_system[dim]) - return ds_unit_system - +from unyt.unit_systems import * def create_code_unit_system(unit_registry, current_mks_unit=None): - code_unit_system = UnitSystem(unit_registry.unit_system_id, "code_length", + code_unit_system = UnitSystem(unit_registry.unit_system_id, "code_length", "code_mass", "code_time", "code_temperature", - current_mks_unit=current_mks_unit, + current_mks_unit=current_mks_unit, registry=unit_registry) code_unit_system["velocity"] = "code_velocity" if current_mks_unit: @@ -157,52 +11,4 @@ def create_code_unit_system(unit_registry, current_mks_unit=None): else: code_unit_system["magnetic_field_cgs"] = "code_magnetic" code_unit_system["pressure"] = "code_pressure" - - -cgs_unit_system = UnitSystem("cgs", "cm", "g", "s") -cgs_unit_system["energy"] = "erg" -cgs_unit_system["specific_energy"] = "erg/g" -cgs_unit_system["pressure"] = "dyne/cm**2" -cgs_unit_system["force"] = "dyne" -cgs_unit_system["magnetic_field_cgs"] = "gauss" -cgs_unit_system["charge_cgs"] = "esu" -cgs_unit_system["current_cgs"] = "statA" - -mks_unit_system = UnitSystem("mks", "m", "kg", "s", current_mks_unit="A") -mks_unit_system["energy"] = "J" -mks_unit_system["specific_energy"] = "J/kg" -mks_unit_system["pressure"] = "Pa" -mks_unit_system["force"] = "N" -mks_unit_system["magnetic_field_mks"] = "T" -mks_unit_system["charge_mks"] = "C" - -imperial_unit_system = UnitSystem("imperial", "ft", "lbm", "s", - temperature_unit="R") -imperial_unit_system["force"] = "lbf" -imperial_unit_system["energy"] = "ft*lbf" -imperial_unit_system["pressure"] = "lbf/ft**2" - -galactic_unit_system = UnitSystem("galactic", "kpc", "Msun", "Myr") -galactic_unit_system["energy"] = "keV" -galactic_unit_system["magnetic_field_cgs"] = "uG" - -solar_unit_system = UnitSystem("solar", "AU", "Mearth", "yr") - -geometrized_unit_system = UnitSystem("geometrized", "l_geom", - "m_geom", "t_geom") - -planck_unit_system = UnitSystem("planck", "l_pl", "m_pl", "t_pl", - temperature_unit="T_pl") -planck_unit_system["energy"] = "E_pl" -planck_unit_system["charge_cgs"] = "q_pl" - - -cgs_ampere_unit_system = UnitSystem('cgs-ampere', 'cm', 'g', 's', - current_mks_unit='A') -cgs_ampere_unit_system["energy"] = "erg" -cgs_ampere_unit_system["specific_energy"] = "erg/g" -cgs_ampere_unit_system["pressure"] = "dyne/cm**2" -cgs_ampere_unit_system["force"] = "dyne" -cgs_ampere_unit_system["magnetic_field_cgs"] = "gauss" -cgs_ampere_unit_system["charge_cgs"] = "esu" -cgs_ampere_unit_system["current_cgs"] = "statA" + return code_unit_system diff --git a/yt/units/yt_array.py b/yt/units/yt_array.py index 0f5d20cb873..d26b52c4faf 100644 --- a/yt/units/yt_array.py +++ b/yt/units/yt_array.py @@ -1,1899 +1,6 @@ -""" -YTArray class. - - - -""" -from __future__ import print_function -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import copy -import numpy as np - -from distutils.version import LooseVersion -from functools import wraps -from numpy import \ - add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ - floor_divide, negative, power, remainder, mod, absolute, rint, \ - sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ - reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ - hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ - bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ - greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ - logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ - isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ - modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing - -try: - # numpy 1.13 or newer - from numpy import positive, divmod as divmod_, isnat, heaviside -except ImportError: - positive, divmod_, isnat, heaviside = (None,)*4 - -from yt.units.unit_object import Unit, UnitParseError -from yt.units.unit_registry import UnitRegistry -from yt.units.dimensions import \ - angle, \ - current_mks, \ - dimensionless, \ - em_dimensions -from yt.utilities.exceptions import \ - YTUnitOperationError, YTUnitConversionError, \ - YTUfuncUnitError, YTIterableUnitCoercionError, \ - YTInvalidUnitEquivalence, YTEquivalentDimsError, \ - YTArrayTooLargeToDisplay -from yt.utilities.lru_cache import lru_cache -from yt.utilities.on_demand_imports import _h5py as h5py -from numbers import Number as numeric_type -from yt.utilities.on_demand_imports import _astropy -try: - from numpy.core.umath import clip -except ImportError: - clip = None -from sympy import Rational -from yt.units.unit_lookup_table import \ - default_unit_symbol_lut -from yt.units.equivalencies import equivalence_registry -from yt.utilities.logger import ytLogger as mylog -from .pint_conversions import convert_pint_units - -NULL_UNIT = Unit() -POWER_SIGN_MAPPING = {multiply: 1, divide: -1} - -# redefine this here to avoid a circular import from yt.funcs -def iterable(obj): - try: len(obj) - except Exception: return False - return True - -def return_arr(func): - @wraps(func) - def wrapped(*args, **kwargs): - ret, units = func(*args, **kwargs) - if ret.shape == (): - return YTQuantity(ret, units) - else: - # This could be a subclass, so don't call YTArray directly. - return type(args[0])(ret, units) - return wrapped - -@lru_cache(maxsize=128, typed=False) -def sqrt_unit(unit): - return unit**0.5 - -@lru_cache(maxsize=128, typed=False) -def multiply_units(unit1, unit2): - return unit1 * unit2 - -def preserve_units(unit1, unit2=None): - return unit1 - -@lru_cache(maxsize=128, typed=False) -def power_unit(unit, power): - return unit**power - -@lru_cache(maxsize=128, typed=False) -def square_unit(unit): - return unit*unit - -@lru_cache(maxsize=128, typed=False) -def divide_units(unit1, unit2): - return unit1/unit2 - -@lru_cache(maxsize=128, typed=False) -def reciprocal_unit(unit): - return unit**-1 - -def passthrough_unit(unit, unit2=None): - return unit - -def return_without_unit(unit, unit2=None): - return None - -def arctan2_unit(unit1, unit2): - return NULL_UNIT - -def comparison_unit(unit1, unit2=None): - return None - -def invert_units(unit): - raise TypeError( - "Bit-twiddling operators are not defined for YTArray instances") - -def bitop_units(unit1, unit2): - raise TypeError( - "Bit-twiddling operators are not defined for YTArray instances") - -def get_inp_u_unary(ufunc, inputs, out_arr=None): - inp = inputs[0] - u = getattr(inp, 'units', None) - if u is None: - u = NULL_UNIT - if u.dimensions is angle and ufunc in trigonometric_operators: - inp = inp.in_units('radian').v - if out_arr is not None: - out_arr = ufunc(inp).view(np.ndarray) - return out_arr, inp, u - -def get_inp_u_binary(ufunc, inputs): - inp1 = coerce_iterable_units(inputs[0]) - inp2 = coerce_iterable_units(inputs[1]) - unit1 = getattr(inp1, 'units', None) - unit2 = getattr(inp2, 'units', None) - ret_class = get_binary_op_return_class(type(inp1), type(inp2)) - if unit1 is None: - unit1 = Unit(registry=getattr(unit2, 'registry', None)) - if unit2 is None and ufunc is not power: - unit2 = Unit(registry=getattr(unit1, 'registry', None)) - elif ufunc is power: - unit2 = inp2 - if isinstance(unit2, np.ndarray): - if isinstance(unit2, YTArray): - if unit2.units.is_dimensionless: - pass - else: - raise YTUnitOperationError(ufunc, unit1, unit2) - unit2 = 1.0 - return (inp1, inp2), (unit1, unit2), ret_class - -def handle_preserve_units(inps, units, ufunc, ret_class): - if units[0] != units[1]: - any_nonzero = [np.any(inps[0]), np.any(inps[1])] - if any_nonzero[0] == np.bool_(False): - units = (units[1], units[1]) - elif any_nonzero[1] == np.bool_(False): - units = (units[0], units[0]) - else: - if not units[0].same_dimensions_as(units[1]): - raise YTUnitOperationError(ufunc, *units) - inps = (inps[0], ret_class(inps[1]).to( - ret_class(inps[0]).units)) - return inps, units - -def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): - if units[0] != units[1]: - u1d = units[0].is_dimensionless - u2d = units[1].is_dimensionless - any_nonzero = [np.any(inps[0]), np.any(inps[1])] - if any_nonzero[0] == np.bool_(False): - units = (units[1], units[1]) - elif any_nonzero[1] == np.bool_(False): - units = (units[0], units[0]) - elif not any([u1d, u2d]): - if not units[0].same_dimensions_as(units[1]): - raise YTUnitOperationError(ufunc, *units) - else: - if raise_error: - raise YTUfuncUnitError(ufunc, *units) - inps = (inps[0], ret_class(inps[1]).to( - ret_class(inps[0]).units)) - return inps, units - -def handle_multiply_divide_units(unit, units, out, out_arr): - if unit.is_dimensionless and unit.base_value != 1.0: - if not units[0].is_dimensionless: - if units[0].dimensions == units[1].dimensions: - out_arr = np.multiply(out_arr.view(np.ndarray), - unit.base_value, out=out) - unit = Unit(registry=unit.registry) - return out, out_arr, unit - -def coerce_iterable_units(input_object): - if isinstance(input_object, np.ndarray): - return input_object - if iterable(input_object): - if any([isinstance(o, YTArray) for o in input_object]): - ff = getattr(input_object[0], 'units', NULL_UNIT, ) - if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): - raise YTIterableUnitCoercionError(input_object) - # This will create a copy of the data in the iterable. - return YTArray(input_object) - return input_object - else: - return input_object - -def sanitize_units_mul(this_object, other_object): - inp = coerce_iterable_units(this_object) - ret = coerce_iterable_units(other_object) - # If the other object is a YTArray and has the same dimensions as the object - # under consideration, convert so we don't mix units with the same - # dimensions. - if isinstance(ret, YTArray): - if inp.units.same_dimensions_as(ret.units): - ret.in_units(inp.units) - return ret - -def sanitize_units_add(this_object, other_object, op_string): - inp = coerce_iterable_units(this_object) - ret = coerce_iterable_units(other_object) - # Make sure the other object is a YTArray before we use the `units` - # attribute. - if isinstance(ret, YTArray): - if not inp.units.same_dimensions_as(ret.units): - # handle special case of adding or subtracting with zero or - # array filled with zero - if not np.any(other_object): - return ret.view(np.ndarray) - elif not np.any(this_object): - return ret - raise YTUnitOperationError(op_string, inp.units, ret.units) - ret = ret.in_units(inp.units) - else: - # If the other object is not a YTArray, then one of the arrays must be - # dimensionless or filled with zeros - if not inp.units.is_dimensionless and np.any(ret): - raise YTUnitOperationError(op_string, inp.units, dimensionless) - return ret - -def validate_comparison_units(this, other, op_string): - # Check that other is a YTArray. - if hasattr(other, 'units'): - if this.units.expr is other.units.expr: - if this.units.base_value == other.units.base_value: - return other - if not this.units.same_dimensions_as(other.units): - raise YTUnitOperationError(op_string, this.units, other.units) - return other.in_units(this.units) - - return other - -@lru_cache(maxsize=128, typed=False) -def _unit_repr_check_same(my_units, other_units): - """ - Takes a Unit object, or string of known unit symbol, and check that it - is compatible with this quantity. Returns Unit object. - - """ - # let Unit() handle units arg if it's not already a Unit obj. - if not isinstance(other_units, Unit): - other_units = Unit(other_units, registry=my_units.registry) - - equiv_dims = em_dimensions.get(my_units.dimensions, None) - if equiv_dims == other_units.dimensions: - if current_mks in equiv_dims.free_symbols: - base = "SI" - else: - base = "CGS" - raise YTEquivalentDimsError(my_units, other_units, base) - - if not my_units.same_dimensions_as(other_units): - raise YTUnitConversionError( - my_units, my_units.dimensions, other_units, other_units.dimensions) - - return other_units - -unary_operators = ( - negative, absolute, rint, sign, conj, exp, exp2, log, log2, - log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, - arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, - rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, - signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, -) - -binary_operators = ( - add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, - remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, - left_shift, right_shift, greater, greater_equal, less, less_equal, - not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, - fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside -) - -trigonometric_operators = ( - sin, cos, tan, -) - -multiple_output_operators = {modf: 2, frexp: 2, divmod_: 2} - -class YTArray(np.ndarray): - """ - An ndarray subclass that attaches a symbolic unit object to the array data. - - Parameters - ---------- - - input_array : :obj:`!iterable` - A tuple, list, or array to attach units to - input_units : String unit specification, unit symbol object, or astropy units - The units of the array. Powers must be specified using python - syntax (cm**3, not cm^3). - registry : ~yt.units.unit_registry.UnitRegistry - The registry to create units from. If input_units is already associated - with a unit registry and this is specified, this will be used instead of - the registry associated with the unit object. - dtype : data-type - The dtype of the array data. Defaults to the dtype of the input data, - or, if none is found, uses np.float64 - bypass_validation : boolean - If True, all input validation is skipped. Using this option may produce - corrupted, invalid units or array data, but can lead to significant - speedups in the input validation logic adds significant overhead. If set, - input_units *must* be a valid unit object. Defaults to False. - - Examples - -------- - - >>> from yt import YTArray - >>> a = YTArray([1, 2, 3], 'cm') - >>> b = YTArray([4, 5, 6], 'm') - >>> a + b - YTArray([ 401., 502., 603.]) cm - >>> b + a - YTArray([ 4.01, 5.02, 6.03]) m - - NumPy ufuncs will pass through units where appropriate. - - >>> import numpy as np - >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') - >>> np.abs(a) - YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 - - and strip them when it would be annoying to deal with them. - - >>> np.log10(a) - array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, - 0.69897 , 0.77815125, 0.84509804]) - - YTArray is tightly integrated with yt datasets: - - >>> import yt - >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') - >>> a = ds.arr(np.ones(5), 'code_length') - >>> a.in_cgs() - YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, - 3.08600000e+24, 3.08600000e+24]) cm - - This is equivalent to: - - >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) - >>> np.all(a == b) - True - - """ - _ufunc_registry = { - add: preserve_units, - subtract: preserve_units, - multiply: multiply_units, - divide: divide_units, - logaddexp: return_without_unit, - logaddexp2: return_without_unit, - true_divide: divide_units, - floor_divide: divide_units, - negative: passthrough_unit, - power: power_unit, - remainder: preserve_units, - mod: preserve_units, - fmod: preserve_units, - absolute: passthrough_unit, - fabs: passthrough_unit, - rint: return_without_unit, - sign: return_without_unit, - conj: passthrough_unit, - exp: return_without_unit, - exp2: return_without_unit, - log: return_without_unit, - log2: return_without_unit, - log10: return_without_unit, - expm1: return_without_unit, - log1p: return_without_unit, - sqrt: sqrt_unit, - square: square_unit, - reciprocal: reciprocal_unit, - sin: return_without_unit, - cos: return_without_unit, - tan: return_without_unit, - sinh: return_without_unit, - cosh: return_without_unit, - tanh: return_without_unit, - arcsin: return_without_unit, - arccos: return_without_unit, - arctan: return_without_unit, - arctan2: arctan2_unit, - arcsinh: return_without_unit, - arccosh: return_without_unit, - arctanh: return_without_unit, - hypot: preserve_units, - deg2rad: return_without_unit, - rad2deg: return_without_unit, - bitwise_and: bitop_units, - bitwise_or: bitop_units, - bitwise_xor: bitop_units, - invert: invert_units, - left_shift: bitop_units, - right_shift: bitop_units, - greater: comparison_unit, - greater_equal: comparison_unit, - less: comparison_unit, - less_equal: comparison_unit, - not_equal: comparison_unit, - equal: comparison_unit, - logical_and: comparison_unit, - logical_or: comparison_unit, - logical_xor: comparison_unit, - logical_not: return_without_unit, - maximum: preserve_units, - minimum: preserve_units, - fmax: preserve_units, - fmin: preserve_units, - isreal: return_without_unit, - iscomplex: return_without_unit, - isfinite: return_without_unit, - isinf: return_without_unit, - isnan: return_without_unit, - signbit: return_without_unit, - copysign: passthrough_unit, - nextafter: preserve_units, - modf: passthrough_unit, - ldexp: bitop_units, - frexp: return_without_unit, - floor: passthrough_unit, - ceil: passthrough_unit, - trunc: passthrough_unit, - spacing: passthrough_unit, - positive: passthrough_unit, - divmod_: passthrough_unit, - isnat: return_without_unit, - heaviside: preserve_units, - clip: passthrough_unit, - } - - __array_priority__ = 2.0 - - def __new__(cls, input_array, input_units=None, registry=None, dtype=None, - bypass_validation=False): - if dtype is None: - dtype = getattr(input_array, 'dtype', np.float64) - if bypass_validation: - obj = np.asarray(input_array, dtype=dtype).view(cls) - obj.units = input_units - if registry is not None: - obj.units.registry = registry - return obj - if input_array is NotImplemented: - return input_array.view(cls) - if registry is None and isinstance(input_units, (str, bytes)): - if input_units.startswith('code_'): - raise UnitParseError( - "Code units used without referring to a dataset. \n" - "Perhaps you meant to do something like this instead: \n" - "ds.arr(%s, \"%s\")" % (input_array, input_units) - ) - if isinstance(input_array, YTArray): - ret = input_array.view(cls) - if input_units is None: - if registry is None: - ret.units = input_array.units - else: - units = Unit(str(input_array.units), registry=registry) - ret.units = units - elif isinstance(input_units, Unit): - ret.units = input_units - else: - ret.units = Unit(input_units, registry=registry) - return ret - elif isinstance(input_array, np.ndarray): - pass - elif iterable(input_array) and input_array: - if isinstance(input_array[0], YTArray): - return YTArray(np.array(input_array, dtype=dtype), - input_array[0].units, registry=registry) - - # Input array is an already formed ndarray instance - # We first cast to be our class type - - obj = np.asarray(input_array, dtype=dtype).view(cls) - - # Check units type - if input_units is None: - # Nothing provided. Make dimensionless... - units = Unit() - elif isinstance(input_units, Unit): - if registry and registry is not input_units.registry: - units = Unit(str(input_units), registry=registry) - else: - units = input_units - else: - # units kwarg set, but it's not a Unit object. - # don't handle all the cases here, let the Unit class handle if - # it's a str. - units = Unit(input_units, registry=registry) - - # Attach the units - obj.units = units - - return obj - - def __repr__(self): - """ - - """ - return super(YTArray, self).__repr__()+' '+self.units.__repr__() - - def __str__(self): - """ - - """ - return str(self.view(np.ndarray)) + ' ' + str(self.units) - - def __format__(self, format_spec): - ret = super(YTArray, self).__format__(format_spec) - return ret + ' {}'.format(self.units) - - # - # Start unit conversion methods - # - - def convert_to_units(self, units): - """ - Convert the array and units to the given units. - - Parameters - ---------- - units : Unit object or str - The units you want to convert to. - - """ - new_units = _unit_repr_check_same(self.units, units) - (conversion_factor, offset) = self.units.get_conversion_factor(new_units) - - self.units = new_units - values = self.d - values *= conversion_factor - - if offset: - np.subtract(self, offset*self.uq, self) - - return self - - def convert_to_base(self, unit_system="cgs"): - """ - Convert the array and units to the equivalent base units in - the specified unit system. - - Parameters - ---------- - unit_system : string, optional - The unit system to be used in the conversion. If not specified, - the default base units of cgs are used. - - Examples - -------- - >>> E = YTQuantity(2.5, "erg/s") - >>> E.convert_to_base(unit_system="galactic") - """ - return self.convert_to_units(self.units.get_base_equivalent(unit_system)) - - def convert_to_cgs(self): - """ - Convert the array and units to the equivalent cgs units. - - """ - return self.convert_to_units(self.units.get_cgs_equivalent()) - - def convert_to_mks(self): - """ - Convert the array and units to the equivalent mks units. - - """ - return self.convert_to_units(self.units.get_mks_equivalent()) - - def in_units(self, units, equivalence=None, **kwargs): - """ - Creates a copy of this array with the data in the supplied - units, and returns it. - - Optionally, an equivalence can be specified to convert to an - equivalent quantity which is not in the same dimensions. - - .. note:: - - All additional keyword arguments are passed to the - equivalency, which should be used if that particular - equivalency requires them. - - Parameters - ---------- - units : Unit object or string - The units you want to get a new quantity in. - equivalence : string, optional - The equivalence you wish to use. To see which - equivalencies are supported for this unitful - quantity, try the :meth:`list_equivalencies` - method. Default: None - - Returns - ------- - YTArray - """ - if equivalence is None: - new_units = _unit_repr_check_same(self.units, units) - (conversion_factor, offset) = self.units.get_conversion_factor(new_units) - - new_array = type(self)(self.ndview * conversion_factor, new_units) - - if offset: - np.subtract(new_array, offset*new_array.uq, new_array) - - return new_array - else: - return self.to_equivalent(units, equivalence, **kwargs) - - def to(self, units, equivalence=None, **kwargs): - """ - An alias for YTArray.in_units(). - - See the docstrings of that function for details. - """ - return self.in_units(units, equivalence=equivalence, **kwargs) - - def to_value(self, units=None, equivalence=None, **kwargs): - """ - Creates a copy of this array with the data in the supplied - units, and returns it without units. Output is therefore a - bare NumPy array. - - Optionally, an equivalence can be specified to convert to an - equivalent quantity which is not in the same dimensions. - - .. note:: - - All additional keyword arguments are passed to the - equivalency, which should be used if that particular - equivalency requires them. - - Parameters - ---------- - units : Unit object or string, optional - The units you want to get the bare quantity in. If not - specified, the value will be returned in the current units. - - equivalence : string, optional - The equivalence you wish to use. To see which - equivalencies are supported for this unitful - quantity, try the :meth:`list_equivalencies` - method. Default: None - - Returns - ------- - NumPy array - """ - if units is None: - v = self.value - else: - v = self.in_units(units, equivalence=equivalence, **kwargs).value - if isinstance(self, YTQuantity): - return float(v) - else: - return v - - def in_base(self, unit_system="cgs"): - """ - Creates a copy of this array with the data in the specified unit system, - and returns it in that system's base units. - - Parameters - ---------- - unit_system : string, optional - The unit system to be used in the conversion. If not specified, - the default base units of cgs are used. - - Examples - -------- - >>> E = YTQuantity(2.5, "erg/s") - >>> E_new = E.in_base(unit_system="galactic") - """ - return self.in_units(self.units.get_base_equivalent(unit_system)) - - def in_cgs(self): - """ - Creates a copy of this array with the data in the equivalent cgs units, - and returns it. - - Returns - ------- - Quantity object with data converted to cgs units. - - """ - return self.in_units(self.units.get_cgs_equivalent()) - - def in_mks(self): - """ - Creates a copy of this array with the data in the equivalent mks units, - and returns it. - - Returns - ------- - Quantity object with data converted to mks units. - - """ - return self.in_units(self.units.get_mks_equivalent()) - - def to_equivalent(self, unit, equiv, **kwargs): - """ - Convert a YTArray or YTQuantity to an equivalent, e.g., something that is - related by only a constant factor but not in the same units. - - Parameters - ---------- - unit : string - The unit that you wish to convert to. - equiv : string - The equivalence you wish to use. To see which equivalencies are - supported for this unitful quantity, try the - :meth:`list_equivalencies` method. - - Examples - -------- - >>> a = yt.YTArray(1.0e7,"K") - >>> a.to_equivalent("keV", "thermal") - """ - conv_unit = Unit(unit, registry=self.units.registry) - if self.units.same_dimensions_as(conv_unit): - return self.in_units(conv_unit) - this_equiv = equivalence_registry[equiv]() - oneway_or_equivalent = ( - conv_unit.has_equivalent(equiv) or this_equiv._one_way) - if self.has_equivalent(equiv) and oneway_or_equivalent: - new_arr = this_equiv.convert( - self, conv_unit.dimensions, **kwargs) - if isinstance(new_arr, tuple): - try: - return type(self)(new_arr[0], new_arr[1]).in_units(unit) - except YTUnitConversionError: - raise YTInvalidUnitEquivalence(equiv, self.units, unit) - else: - return new_arr.in_units(unit) - else: - raise YTInvalidUnitEquivalence(equiv, self.units, unit) - - def list_equivalencies(self): - """ - Lists the possible equivalencies associated with this YTArray or - YTQuantity. - """ - self.units.list_equivalencies() - - def has_equivalent(self, equiv): - """ - Check to see if this YTArray or YTQuantity has an equivalent unit in - *equiv*. - """ - return self.units.has_equivalent(equiv) - - def ndarray_view(self): - """ - Returns a view into the array, but as an ndarray rather than ytarray. - - Returns - ------- - View of this array's data. - """ - return self.view(np.ndarray) - - def to_ndarray(self): - """ - Creates a copy of this array with the unit information stripped - - """ - return np.array(self) - - def argsort(self, axis=-1, kind='quicksort', order=None): - """ - Returns the indices that would sort the array. - - See the documentation of ndarray.argsort for details about the keyword - arguments. - - Example - ------- - >>> from yt.units import km - >>> data = [3, 8, 7]*km - >>> np.argsort(data) - array([0, 2, 1]) - >>> data.argsort() - array([0, 2, 1]) - """ - return self.view(np.ndarray).argsort(axis, kind, order) - - @classmethod - def from_astropy(cls, arr, unit_registry=None): - """ - Convert an AstroPy "Quantity" to a YTArray or YTQuantity. - - Parameters - ---------- - arr : AstroPy Quantity - The Quantity to convert from. - unit_registry : yt UnitRegistry, optional - A yt unit registry to use in the conversion. If one is not - supplied, the default one will be used. - """ - # Converting from AstroPy Quantity - u = arr.unit - ap_units = [] - for base, exponent in zip(u.bases, u.powers): - unit_str = base.to_string() - # we have to do this because AstroPy is silly and defines - # hour as "h" - if unit_str == "h": unit_str = "hr" - ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) - ap_units = "*".join(ap_units) - if isinstance(arr.value, np.ndarray): - return YTArray(arr.value, ap_units, registry=unit_registry) - else: - return YTQuantity(arr.value, ap_units, registry=unit_registry) - - - def to_astropy(self, **kwargs): - """ - Creates a new AstroPy quantity with the same unit information. - """ - if _astropy.units is None: - raise ImportError("You don't have AstroPy installed, so you can't convert to " + - "an AstroPy quantity.") - return self.value*_astropy.units.Unit(str(self.units), **kwargs) - - @classmethod - def from_pint(cls, arr, unit_registry=None): - """ - Convert a Pint "Quantity" to a YTArray or YTQuantity. - - Parameters - ---------- - arr : Pint Quantity - The Quantity to convert from. - unit_registry : yt UnitRegistry, optional - A yt unit registry to use in the conversion. If one is not - supplied, the default one will be used. - - Examples - -------- - >>> from pint import UnitRegistry - >>> import numpy as np - >>> ureg = UnitRegistry() - >>> a = np.random.random(10) - >>> b = ureg.Quantity(a, "erg/cm**3") - >>> c = yt.YTArray.from_pint(b) - """ - p_units = [] - for base, exponent in arr._units.items(): - bs = convert_pint_units(base) - p_units.append("%s**(%s)" % (bs, Rational(exponent))) - p_units = "*".join(p_units) - if isinstance(arr.magnitude, np.ndarray): - return YTArray(arr.magnitude, p_units, registry=unit_registry) - else: - return YTQuantity(arr.magnitude, p_units, registry=unit_registry) - - def to_pint(self, unit_registry=None): - """ - Convert a YTArray or YTQuantity to a Pint Quantity. - - Parameters - ---------- - arr : YTArray or YTQuantity - The unitful quantity to convert from. - unit_registry : Pint UnitRegistry, optional - The Pint UnitRegistry to use in the conversion. If one is not - supplied, the default one will be used. NOTE: This is not - the same as a yt UnitRegistry object. - - Examples - -------- - >>> a = YTQuantity(4.0, "cm**2/s") - >>> b = a.to_pint() - """ - from pint import UnitRegistry - if unit_registry is None: - unit_registry = UnitRegistry() - powers_dict = self.units.expr.as_powers_dict() - units = [] - for unit, pow in powers_dict.items(): - # we have to do this because Pint doesn't recognize - # "yr" as "year" - if str(unit).endswith("yr") and len(str(unit)) in [2,3]: - unit = str(unit).replace("yr","year") - units.append("%s**(%s)" % (unit, Rational(pow))) - units = "*".join(units) - return unit_registry.Quantity(self.value, units) - - # - # End unit conversion methods - # - - def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): - r"""Writes a YTArray to hdf5 file. - - Parameters - ---------- - filename: string - The filename to create and write a dataset to - - dataset_name: string - The name of the dataset to create in the file. - - info: dictionary - A dictionary of supplementary info to write to append as attributes - to the dataset. - - group_name: string - An optional group to write the arrays to. If not specified, the arrays - are datasets at the top level by default. - - Examples - -------- - >>> a = YTArray([1,2,3], 'cm') - >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} - >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', - ... info=myinfo) - """ - from yt.extern.six.moves import cPickle as pickle - if info is None: - info = {} - - info['units'] = str(self.units) - info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) - - if dataset_name is None: - dataset_name = 'array_data' - - f = h5py.File(filename, mode="a") - if group_name is not None: - if group_name in f: - g = f[group_name] - else: - g = f.create_group(group_name) - else: - g = f - if dataset_name in g.keys(): - d = g[dataset_name] - # Overwrite without deleting if we can get away with it. - if d.shape == self.shape and d.dtype == self.dtype: - d[...] = self - for k in d.attrs.keys(): - del d.attrs[k] - else: - del f[dataset_name] - d = g.create_dataset(dataset_name, data=self) - else: - d = g.create_dataset(dataset_name, data=self) - - for k, v in info.items(): - d.attrs[k] = v - f.close() - - @classmethod - def from_hdf5(cls, filename, dataset_name=None, group_name=None): - r"""Attempts read in and convert a dataset in an hdf5 file into a - YTArray. - - Parameters - ---------- - filename: string - The filename to of the hdf5 file. - - dataset_name: string - The name of the dataset to read from. If the dataset has a units - attribute, attempt to infer units as well. - - group_name: string - An optional group to read the arrays from. If not specified, the - arrays are datasets at the top level by default. - - """ - from yt.extern.six.moves import cPickle as pickle - - if dataset_name is None: - dataset_name = 'array_data' - - f = h5py.File(filename, mode="r") - if group_name is not None: - g = f[group_name] - else: - g = f - dataset = g[dataset_name] - data = dataset[:] - units = dataset.attrs.get('units', '') - if 'unit_registry' in dataset.attrs.keys(): - unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) - else: - unit_lut = None - f.close() - registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) - return cls(data, units, registry=registry) - - # - # Start convenience methods - # - - @property - def value(self): - """Get a copy of the array data as a numpy ndarray""" - return np.array(self) - - v = value - - @property - def ndview(self): - """Get a view of the array data.""" - return self.ndarray_view() - - d = ndview - - @property - def unit_quantity(self): - """Get a YTQuantity with the same unit as this array and a value of - 1.0""" - return YTQuantity(1.0, self.units) - - uq = unit_quantity - - @property - def unit_array(self): - """Get a YTArray filled with ones with the same unit and shape as this - array""" - return np.ones_like(self) - - ua = unit_array - - def __getitem__(self, item): - ret = super(YTArray, self).__getitem__(item) - if ret.shape == (): - return YTQuantity(ret, self.units, bypass_validation=True) - else: - if hasattr(self, 'units'): - ret.units = self.units - return ret - - # - # Start operation methods - # - - if LooseVersion(np.__version__) < LooseVersion('1.13.0'): - - def __add__(self, right_object): - """ - Add this ytarray to the object on the right of the `+` operator. - Must check for the correct (same dimension) units. - - """ - ro = sanitize_units_add(self, right_object, "addition") - return super(YTArray, self).__add__(ro) - - def __radd__(self, left_object): - """ See __add__. """ - lo = sanitize_units_add(self, left_object, "addition") - return super(YTArray, self).__radd__(lo) - - def __iadd__(self, other): - """ See __add__. """ - oth = sanitize_units_add(self, other, "addition") - np.add(self, oth, out=self) - return self - - def __sub__(self, right_object): - """ - Subtract the object on the right of the `-` from this ytarray. Must - check for the correct (same dimension) units. - - """ - ro = sanitize_units_add(self, right_object, "subtraction") - return super(YTArray, self).__sub__(ro) - - def __rsub__(self, left_object): - """ See __sub__. """ - lo = sanitize_units_add(self, left_object, "subtraction") - return super(YTArray, self).__rsub__(lo) - - def __isub__(self, other): - """ See __sub__. """ - oth = sanitize_units_add(self, other, "subtraction") - np.subtract(self, oth, out=self) - return self - - def __neg__(self): - """ Negate the data. """ - return super(YTArray, self).__neg__() - - def __mul__(self, right_object): - """ - Multiply this YTArray by the object on the right of the `*` - operator. The unit objects handle being multiplied. - - """ - ro = sanitize_units_mul(self, right_object) - return super(YTArray, self).__mul__(ro) - - def __rmul__(self, left_object): - """ See __mul__. """ - lo = sanitize_units_mul(self, left_object) - return super(YTArray, self).__rmul__(lo) - - def __imul__(self, other): - """ See __mul__. """ - oth = sanitize_units_mul(self, other) - np.multiply(self, oth, out=self) - return self - - def __div__(self, right_object): - """ - Divide this YTArray by the object on the right of the `/` operator. - - """ - ro = sanitize_units_mul(self, right_object) - return super(YTArray, self).__div__(ro) - - def __rdiv__(self, left_object): - """ See __div__. """ - lo = sanitize_units_mul(self, left_object) - return super(YTArray, self).__rdiv__(lo) - - def __idiv__(self, other): - """ See __div__. """ - oth = sanitize_units_mul(self, other) - np.divide(self, oth, out=self) - return self - - def __truediv__(self, right_object): - ro = sanitize_units_mul(self, right_object) - return super(YTArray, self).__truediv__(ro) - - def __rtruediv__(self, left_object): - """ See __div__. """ - lo = sanitize_units_mul(self, left_object) - return super(YTArray, self).__rtruediv__(lo) - - def __itruediv__(self, other): - """ See __div__. """ - oth = sanitize_units_mul(self, other) - np.true_divide(self, oth, out=self) - return self - - def __floordiv__(self, right_object): - ro = sanitize_units_mul(self, right_object) - return super(YTArray, self).__floordiv__(ro) - - def __rfloordiv__(self, left_object): - """ See __div__. """ - lo = sanitize_units_mul(self, left_object) - return super(YTArray, self).__rfloordiv__(lo) - - def __ifloordiv__(self, other): - """ See __div__. """ - oth = sanitize_units_mul(self, other) - np.floor_divide(self, oth, out=self) - return self - - def __or__(self, right_object): - return super(YTArray, self).__or__(right_object) - - def __ror__(self, left_object): - return super(YTArray, self).__ror__(left_object) - - def __ior__(self, other): - np.bitwise_or(self, other, out=self) - return self - - def __xor__(self, right_object): - return super(YTArray, self).__xor__(right_object) - - def __rxor__(self, left_object): - return super(YTArray, self).__rxor__(left_object) - - def __ixor__(self, other): - np.bitwise_xor(self, other, out=self) - return self - - def __and__(self, right_object): - return super(YTArray, self).__and__(right_object) - - def __rand__(self, left_object): - return super(YTArray, self).__rand__(left_object) - - def __iand__(self, other): - np.bitwise_and(self, other, out=self) - return self - - def __pow__(self, power): - """ - Raise this YTArray to some power. - - Parameters - ---------- - power : float or dimensionless YTArray. - The pow value. - - """ - if isinstance(power, YTArray): - if not power.units.is_dimensionless: - raise YTUnitOperationError('power', power.unit) - - # Work around a sympy issue (I think?) - # - # If I don't do this, super(YTArray, self).__pow__ returns a YTArray - # with a unit attribute set to the sympy expression 1/1 rather than - # a dimensionless Unit object. - if self.units.is_dimensionless and power == -1: - ret = super(YTArray, self).__pow__(power) - return type(self)(ret, input_units='') - - return super(YTArray, self).__pow__(power) - - def __abs__(self): - """ Return a YTArray with the abs of the data. """ - return super(YTArray, self).__abs__() - - # - # Start comparison operators. - # - - def __lt__(self, other): - """ Test if this is less than the object on the right. """ - # converts if possible - oth = validate_comparison_units(self, other, 'less_than') - return super(YTArray, self).__lt__(oth) - - def __le__(self, other): - """Test if this is less than or equal to the object on the right. - """ - oth = validate_comparison_units(self, other, 'less_than or equal') - return super(YTArray, self).__le__(oth) - - def __eq__(self, other): - """ Test if this is equal to the object on the right. """ - # Check that other is a YTArray. - if other is None: - # self is a YTArray, so it can't be None. - return False - oth = validate_comparison_units(self, other, 'equal') - return super(YTArray, self).__eq__(oth) - - def __ne__(self, other): - """ Test if this is not equal to the object on the right. """ - # Check that the other is a YTArray. - if other is None: - return True - oth = validate_comparison_units(self, other, 'not equal') - return super(YTArray, self).__ne__(oth) - - def __ge__(self, other): - """ Test if this is greater than or equal to other. """ - # Check that the other is a YTArray. - oth = validate_comparison_units( - self, other, 'greater than or equal') - return super(YTArray, self).__ge__(oth) - - def __gt__(self, other): - """ Test if this is greater than the object on the right. """ - # Check that the other is a YTArray. - oth = validate_comparison_units(self, other, 'greater than') - return super(YTArray, self).__gt__(oth) - - # - # End comparison operators - # - - # - # Begin reduction operators - # - - @return_arr - def prod(self, axis=None, dtype=None, out=None): - if axis is not None: - units = self.units**self.shape[axis] - else: - units = self.units**self.size - return super(YTArray, self).prod(axis, dtype, out), units - - @return_arr - def mean(self, axis=None, dtype=None, out=None): - return super(YTArray, self).mean(axis, dtype, out), self.units - - @return_arr - def sum(self, axis=None, dtype=None, out=None): - return super(YTArray, self).sum(axis, dtype, out), self.units - - @return_arr - def std(self, axis=None, dtype=None, out=None, ddof=0): - return super(YTArray, self).std(axis, dtype, out, ddof), self.units - - def __array_wrap__(self, out_arr, context=None): - ret = super(YTArray, self).__array_wrap__(out_arr, context) - if isinstance(ret, YTQuantity) and ret.shape != (): - ret = ret.view(YTArray) - if context is None: - if ret.shape == (): - return ret[()] - else: - return ret - ufunc = context[0] - inputs = context[1] - if ufunc in unary_operators: - out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) - unit = self._ufunc_registry[context[0]](u) - ret_class = type(self) - elif ufunc in binary_operators: - unit_operator = self._ufunc_registry[context[0]] - inps, units, ret_class = get_inp_u_binary(ufunc, inputs) - if unit_operator in (preserve_units, comparison_unit, - arctan2_unit): - inps, units = handle_comparison_units( - inps, units, ufunc, ret_class, raise_error=True) - unit = unit_operator(*units) - if unit_operator in (multiply_units, divide_units): - out_arr, out_arr, unit = handle_multiply_divide_units( - unit, units, out_arr, out_arr) - else: - raise RuntimeError( - "Support for the %s ufunc has not been added " - "to YTArray." % str(context[0])) - if unit is None: - out_arr = np.array(out_arr, copy=False) - return out_arr - out_arr.units = unit - if out_arr.size == 1: - return YTQuantity(np.array(out_arr), unit) - else: - if ret_class is YTQuantity: - # This happens if you do ndarray * YTQuantity. Explicitly - # casting to YTArray avoids creating a YTQuantity with - # size > 1 - return YTArray(np.array(out_arr), unit) - return ret_class(np.array(out_arr, copy=False), unit) - - else: # numpy version equal to or newer than 1.13 - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - func = getattr(ufunc, method) - if 'out' in kwargs: - out_orig = kwargs.pop('out') - if ufunc in multiple_output_operators: - outs = [] - for arr in out_orig: - outs.append(arr.view(np.ndarray)) - out = tuple(outs) - else: - out_element = out_orig[0] - if out_element.dtype.kind in ("u", "i"): - new_dtype = "f" + str(out_element.dtype.itemsize) - float_values = out_element.astype(new_dtype) - out_element.dtype = new_dtype - np.copyto(out_element, float_values) - out = out_element.view(np.ndarray) - else: - if ufunc in multiple_output_operators: - num_outputs = multiple_output_operators[ufunc] - out = (None,) * num_outputs - else: - out = None - if len(inputs) == 1: - _, inp, u = get_inp_u_unary(ufunc, inputs) - out_arr = func(np.asarray(inp), out=out, **kwargs) - if ufunc in (multiply, divide) and method == 'reduce': - power_sign = POWER_SIGN_MAPPING[ufunc] - if 'axis' in kwargs and kwargs['axis'] is not None: - unit = u**(power_sign*inp.shape[kwargs['axis']]) - else: - unit = u**(power_sign*inp.size) - else: - unit = self._ufunc_registry[ufunc](u) - ret_class = type(self) - elif len(inputs) == 2: - unit_operator = self._ufunc_registry[ufunc] - inps, units, ret_class = get_inp_u_binary(ufunc, inputs) - if unit_operator in (comparison_unit, arctan2_unit): - inps, units = handle_comparison_units( - inps, units, ufunc, ret_class) - elif unit_operator is preserve_units: - inps, units = handle_preserve_units( - inps, units, ufunc, ret_class) - unit = unit_operator(*units) - out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), - out=out, **kwargs) - if unit_operator in (multiply_units, divide_units): - out, out_arr, unit = handle_multiply_divide_units( - unit, units, out, out_arr) - else: - if ufunc is clip: - inp = [] - for i in inputs: - if isinstance(i, YTArray): - inp.append(i.to(inputs[0].units).view(np.ndarray)) - elif iterable(i): - inp.append(np.asarray(i)) - else: - inp.append(i) - if out is not None: - _out = out.view(np.ndarray) - else: - _out = None - out_arr = ufunc(*inp, out=_out) - unit = inputs[0].units - ret_class = type(inputs[0]) - # This was added after unyt was spun out, but is not presently used: - # mul = 1 - else: - raise RuntimeError( - "Support for the %s ufunc with %i inputs has not been " - "added to unyt_array." % (str(ufunc), len(inputs)) - ) - if unit is None: - out_arr = np.array(out_arr, copy=False) - elif ufunc in (modf, divmod_): - out_arr = tuple((ret_class(o, unit) for o in out_arr)) - elif out_arr.size == 1: - out_arr = YTQuantity(np.asarray(out_arr), unit) - else: - if ret_class is YTQuantity: - # This happens if you do ndarray * YTQuantity. Explicitly - # casting to YTArray avoids creating a YTQuantity with - # size > 1 - out_arr = YTArray(np.asarray(out_arr), unit) - else: - out_arr = ret_class(np.asarray(out_arr), unit) - if out is not None: - if ufunc not in multiple_output_operators: - out_orig[0].flat[:] = out.flat[:] - if isinstance(out_orig[0], YTArray): - out_orig[0].units = unit - return out_arr - - def copy(self, order='C'): - return type(self)(np.copy(np.asarray(self)), self.units) - - def __array_finalize__(self, obj): - if obj is None and hasattr(self, 'units'): - return - self.units = getattr(obj, 'units', NULL_UNIT) - - def __pos__(self): - """ Posify the data. """ - # this needs to be defined for all numpy versions, see - # numpy issue #9081 - return type(self)(super(YTArray, self).__pos__(), self.units) - - @return_arr - def dot(self, b, out=None): - return super(YTArray, self).dot(b), self.units*b.units - - def __reduce__(self): - """Pickle reduction method - - See the documentation for the standard library pickle module: - http://docs.python.org/2/library/pickle.html - - Unit metadata is encoded in the zeroth element of third element of the - returned tuple, itself a tuple used to restore the state of the ndarray. - This is always defined for numpy arrays. - """ - np_ret = super(YTArray, self).__reduce__() - obj_state = np_ret[2] - unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],) - new_ret = np_ret[:2] + unit_state + np_ret[3:] - return new_ret - - def __setstate__(self, state): - """Pickle setstate method - - This is called inside pickle.read() and restores the unit data from the - metadata extracted in __reduce__ and then serialized by pickle. - """ - super(YTArray, self).__setstate__(state[1:]) - try: - unit, lut = state[0] - except TypeError: - # this case happens when we try to load an old pickle file - # created before we serialized the unit symbol lookup table - # into the pickle file - unit, lut = str(state[0]), default_unit_symbol_lut.copy() - # need to fix up the lut if the pickle was saved prior to PR #1728 - # when the pickle format changed - if len(lut['m']) == 2: - lut.update(default_unit_symbol_lut) - for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]: - lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}') - # sympy 1.5 added new assumptions, making dimensions not compare - # as equal, so we update the loaded assumptions based on the assumptions - # for angle. I could have used any dimension, angle just - # happened to already be imported in this file - for v in lut.values(): - v[1]._assumptions.update(angle._assumptions) - registry = UnitRegistry(lut=lut, add_default_symbols=False) - self.units = Unit(unit, registry=registry) - - def __deepcopy__(self, memodict=None): - """copy.deepcopy implementation - - This is necessary for stdlib deepcopy of arrays and quantities. - """ - if memodict is None: - memodict = {} - ret = super(YTArray, self).__deepcopy__(memodict) - return type(self)(ret, copy.deepcopy(self.units)) - -class YTQuantity(YTArray): - """ - A scalar associated with a unit. - - Parameters - ---------- - - input_scalar : an integer or floating point scalar - The scalar to attach units to - input_units : String unit specification, unit symbol object, or astropy units - The units of the quantity. Powers must be specified using python syntax - (cm**3, not cm^3). - registry : A UnitRegistry object - The registry to create units from. If input_units is already associated - with a unit registry and this is specified, this will be used instead of - the registry associated with the unit object. - dtype : data-type - The dtype of the array data. - - Examples - -------- - - >>> from yt import YTQuantity - >>> a = YTQuantity(1, 'cm') - >>> b = YTQuantity(2, 'm') - >>> a + b - 201.0 cm - >>> b + a - 2.01 m - - NumPy ufuncs will pass through units where appropriate. - - >>> import numpy as np - >>> a = YTQuantity(12, 'g/cm**3') - >>> np.abs(a) - 12 g/cm**3 - - and strip them when it would be annoying to deal with them. - - >>> print(np.log10(a)) - 1.07918124605 - - YTQuantity is tightly integrated with yt datasets: - - >>> import yt - >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') - >>> a = ds.quan(5, 'code_length') - >>> a.in_cgs() - 1.543e+25 cm - - This is equivalent to: - - >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) - >>> np.all(a == b) - True - - """ - def __new__(cls, input_scalar, input_units=None, registry=None, - dtype=np.float64, bypass_validation=False): - if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)): - raise RuntimeError("YTQuantity values must be numeric") - ret = YTArray.__new__(cls, input_scalar, input_units, registry, - dtype=dtype, bypass_validation=bypass_validation) - if ret.size > 1: - raise RuntimeError("YTQuantity instances must be scalars") - return ret - - def __repr__(self): - return str(self) - -def validate_numpy_wrapper_units(v, arrs): - if not any(isinstance(a, YTArray) for a in arrs): - return v - if not all(isinstance(a, YTArray) for a in arrs): - raise RuntimeError("Not all of your arrays are YTArrays.") - a1 = arrs[0] - if not all(a.units == a1.units for a in arrs[1:]): - raise RuntimeError("Your arrays must have identical units.") - v.units = a1.units - return v - -def uconcatenate(arrs, axis=0): - """Concatenate a sequence of arrays. - - This wrapper around numpy.concatenate preserves units. All input arrays must - have the same units. See the documentation of numpy.concatenate for full - details. - - Examples - -------- - >>> A = yt.YTArray([1, 2, 3], 'cm') - >>> B = yt.YTArray([2, 3, 4], 'cm') - >>> uconcatenate((A, B)) - YTArray([ 1., 2., 3., 2., 3., 4.]) cm - - """ - v = np.concatenate(arrs, axis=axis) - v = validate_numpy_wrapper_units(v, arrs) - return v - -def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None): - """Applies the cross product to two YT arrays. - - This wrapper around numpy.cross preserves units. - See the documentation of numpy.cross for full - details. - """ - v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) - units = arr1.units * arr2.units - arr = YTArray(v, units, registry=registry) - return arr - -def uintersect1d(arr1, arr2, assume_unique=False): - """Find the sorted unique elements of the two input arrays. - - A wrapper around numpy.intersect1d that preserves units. All input arrays - must have the same units. See the documentation of numpy.intersect1d for - full details. - - Examples - -------- - >>> A = yt.YTArray([1, 2, 3], 'cm') - >>> B = yt.YTArray([2, 3, 4], 'cm') - >>> uintersect1d(A, B) - YTArray([ 2., 3.]) cm - - """ - v = np.intersect1d(arr1, arr2, assume_unique=assume_unique) - v = validate_numpy_wrapper_units(v, [arr1, arr2]) - return v - -def uunion1d(arr1, arr2): - """Find the union of two arrays. - - A wrapper around numpy.intersect1d that preserves units. All input arrays - must have the same units. See the documentation of numpy.intersect1d for - full details. - - Examples - -------- - >>> A = yt.YTArray([1, 2, 3], 'cm') - >>> B = yt.YTArray([2, 3, 4], 'cm') - >>> uunion1d(A, B) - YTArray([ 1., 2., 3., 4.]) cm - - """ - v = np.union1d(arr1, arr2) - v = validate_numpy_wrapper_units(v, [arr1, arr2]) - return v - -def unorm(data, ord=None, axis=None, keepdims=False): - """Matrix or vector norm that preserves units - - This is a wrapper around np.linalg.norm that preserves units. See - the documentation for that function for descriptions of the keyword - arguments. - - The keepdims argument is ignored if the version of numpy installed is - older than numpy 1.10.0. - """ - if LooseVersion(np.__version__) < LooseVersion('1.10.0'): - norm = np.linalg.norm(data, ord=ord, axis=axis) - else: - norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims) - if norm.shape == (): - return YTQuantity(norm, data.units) - return YTArray(norm, data.units) - -def udot(op1, op2): - """Matrix or vector dot product that preserves units - - This is a wrapper around np.dot that preserves units. - """ - dot = np.dot(op1.d, op2.d) - units = op1.units*op2.units - if dot.shape == (): - return YTQuantity(dot, units) - return YTArray(dot, units) - -def uvstack(arrs): - """Stack arrays in sequence vertically (row wise) while preserving units - - This is a wrapper around np.vstack that preserves units. - """ - v = np.vstack(arrs) - v = validate_numpy_wrapper_units(v, arrs) - return v - -def uhstack(arrs): - """Stack arrays in sequence horizontally (column wise) while preserving units - - This is a wrapper around np.hstack that preserves units. - """ - v = np.hstack(arrs) - v = validate_numpy_wrapper_units(v, arrs) - return v - -def ustack(arrs, axis=0): - """Join a sequence of arrays along a new axis while preserving units - - The axis parameter specifies the index of the new axis in the - dimensions of the result. For example, if ``axis=0`` it will be the - first dimension and if ``axis=-1`` it will be the last dimension. - - This is a wrapper around np.stack that preserves units. - - """ - v = np.stack(arrs, axis=axis) - v = validate_numpy_wrapper_units(v, arrs) - return v - -def array_like_field(data, x, field): - field = data._determine_fields(field)[0] - if isinstance(field, tuple): - finfo = data.ds._get_field_info(field[0],field[1]) - else: - finfo = data.ds._get_field_info(field) - if finfo.sampling_type == 'particle': - units = finfo.output_units - else: - units = finfo.units - if isinstance(x, YTArray): - arr = copy.deepcopy(x) - arr.convert_to_units(units) - return arr - if isinstance(x, np.ndarray): - return data.ds.arr(x, units) - else: - return data.ds.quan(x, units) - -def get_binary_op_return_class(cls1, cls2): - if cls1 is cls2: - return cls1 - if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)): - return cls2 - if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)): - return cls1 - if issubclass(cls1, YTQuantity): - return cls2 - if issubclass(cls2, YTQuantity): - return cls1 - if issubclass(cls1, cls2): - return cls1 - if issubclass(cls2, cls1): - return cls2 - else: - raise RuntimeError("Undefined operation for a YTArray subclass. " - "Received operand types (%s) and (%s)" % (cls1, cls2)) - -def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'): - r""" - Load YTArrays with unit information from a text file. Each row in the - text file must have the same number of values. - - Parameters - ---------- - fname : str - Filename to read. - dtype : data-type, optional - Data-type of the resulting array; default: float. - delimiter : str, optional - The string used to separate values. By default, this is any - whitespace. - usecols : sequence, optional - Which columns to read, with 0 being the first. For example, - ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. - The default, None, results in all columns being read. - comments : str, optional - The character used to indicate the start of a comment; - default: '#'. - - Examples - -------- - >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t") - """ - f = open(fname, 'r') - next_one = False - units = [] - num_cols = -1 - for line in f.readlines(): - words = line.strip().split() - if len(words) == 0: - continue - if line[0] == comments: - if next_one: - units = words[1:] - if len(words) == 2 and words[1] == "Units": - next_one = True - else: - # Here we catch the first line of numbers - try: - col_words = line.strip().split(delimiter) - for word in col_words: - float(word) - num_cols = len(col_words) - break - except ValueError: - mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0]) - f.close() - if len(units) != num_cols: - mylog.warning("Malformed or incomplete units header. Arrays will be " - "dimensionless!") - units = ["dimensionless"]*num_cols - arrays = np.loadtxt(fname, dtype=dtype, comments=comments, - delimiter=delimiter, converters=None, - unpack=True, usecols=usecols, ndmin=0) - if usecols is not None: - units = [units[col] for col in usecols] - mylog.info("Array units: %s" % ", ".join(units)) - return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)]) - -def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='', - footer='', comments='#'): - r""" - Write YTArrays with unit information to a text file. - - Parameters - ---------- - fname : str - The file to write the YTArrays to. - arrays : list of YTArrays or single YTArray - The array(s) to write to the file. - fmt : str or sequence of strs, optional - A single format (%10.5f), or a sequence of formats. - delimiter : str, optional - String or character separating columns. - header : str, optional - String that will be written at the beginning of the file, before the - unit header. - footer : str, optional - String that will be written at the end of the file. - comments : str, optional - String that will be prepended to the ``header`` and ``footer`` strings, - to mark them as comments. Default: '# ', as expected by e.g. - ``yt.loadtxt``. - - Examples - -------- - >>> sp = ds.sphere("c", (100,"kpc")) - >>> a = sp["density"] - >>> b = sp["temperature"] - >>> c = sp["velocity_x"] - >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t") - """ - if not isinstance(arrays, list): - arrays = [arrays] - units = [] - for array in arrays: - if hasattr(array, "units"): - units.append(str(array.units)) - else: - units.append("dimensionless") - if header != '': - header += '\n' - header += " Units\n " + '\t'.join(units) - np.savetxt(fname, np.transpose(arrays), header=header, - fmt=fmt, delimiter=delimiter, footer=footer, - newline='\n', comments=comments) +from unyt.array import * +from yt.units import YTArray, YTQuantity +from yt.funcs import array_like_field def display_ytarray(arr): r""" @@ -1938,4 +45,3 @@ def _value_updater(change): def _wrap_display_ytarray(arr): from IPython.core.display import display display(display_ytarray(arr)) - diff --git a/yt/utilities/amr_kdtree/__init__.py b/yt/utilities/amr_kdtree/__init__.py index 98e06f34ef3..06cde376cf3 100644 --- a/yt/utilities/amr_kdtree/__init__.py +++ b/yt/utilities/amr_kdtree/__init__.py @@ -3,10 +3,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/utilities/amr_kdtree/amr_kdtools.py b/yt/utilities/amr_kdtree/amr_kdtools.py index c5cbb051d40..78e7a53de14 100644 --- a/yt/utilities/amr_kdtree/amr_kdtools.py +++ b/yt/utilities/amr_kdtree/amr_kdtools.py @@ -1,17 +1,3 @@ -""" -AMR kD-Tree Tools - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index 07af7b93661..f5dd4ef1877 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -1,19 +1,3 @@ -""" -AMR kD-Tree Framework - - -""" -from __future__ import print_function -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import operator import numpy as np @@ -105,15 +89,15 @@ def check_tree(self): grid = self.ds.index.grids[node.grid - self._id_offset] dds = grid.dds gle = grid.LeftEdge - nle = self.ds.arr(node.get_left_edge(), input_units="code_length") - nre = self.ds.arr(node.get_right_edge(), input_units="code_length") + nle = self.ds.arr(node.get_left_edge(), units="code_length") + nre = self.ds.arr(node.get_right_edge(), units="code_length") li = np.rint((nle-gle)/dds).astype('int32') ri = np.rint((nre-gle)/dds).astype('int32') dims = (ri - li).astype('int32') assert(np.all(grid.LeftEdge <= nle)) assert(np.all(grid.RightEdge >= nre)) assert(np.all(dims > 0)) - # print grid, dims, li, ri + # print(grid, dims, li, ri) # Calculate the Volume vol = self.trunk.kd_sum_volume() @@ -130,8 +114,8 @@ def sum_cells(self, all_cells=False): grid = self.ds.index.grids[node.grid - self._id_offset] dds = grid.dds gle = grid.LeftEdge - nle = self.ds.arr(node.get_left_edge(), input_units="code_length") - nre = self.ds.arr(node.get_right_edge(), input_units="code_length") + nle = self.ds.arr(node.get_left_edge(), units="code_length") + nre = self.ds.arr(node.get_right_edge(), units="code_length") li = np.rint((nle-gle)/dds).astype('int32') ri = np.rint((nre-gle)/dds).astype('int32') dims = (ri - li).astype('int32') diff --git a/yt/utilities/amr_kdtree/api.py b/yt/utilities/amr_kdtree/api.py index 9a5b34befc0..843f763d90c 100644 --- a/yt/utilities/amr_kdtree/api.py +++ b/yt/utilities/amr_kdtree/api.py @@ -1,15 +1 @@ -""" -API for yt.utilities.amr_kdtree - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .amr_kdtree import AMRKDTree diff --git a/yt/utilities/answer_testing/__init__.py b/yt/utilities/answer_testing/__init__.py index 9e417ac9642..81c61259ce7 100644 --- a/yt/utilities/answer_testing/__init__.py +++ b/yt/utilities/answer_testing/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/utilities/answer_testing/answer_tests.py b/yt/utilities/answer_testing/answer_tests.py index 9330462e018..5a888a0640f 100644 --- a/yt/utilities/answer_testing/answer_tests.py +++ b/yt/utilities/answer_testing/answer_tests.py @@ -9,12 +9,6 @@ import matplotlib.image as mpimg import numpy as np -from yt.analysis_modules.cosmological_observation.api import \ - LightCone -from yt.analysis_modules.halo_analysis.api import HaloCatalog -from yt.analysis_modules.halo_mass_function.api import HaloMassFcn -from yt.utilities.on_demand_imports import \ - _h5py as h5py from . import utils import yt.visualization.plot_window as pw @@ -132,22 +126,6 @@ def pixelized_projection_values(ds, axis, field, return result.hexdigest() -def simulated_halo_mass_function(ds, finder): - hc = HaloCatalog(data_ds=ds, finder_method=finder) - hc.create() - hmf = HaloMassFcn(halos_ds=hc.halos_ds) - result = np.empty((2, hmf.masses_sim.size)) - result[0] = hmf.masses_sim.d - result[1] = hmf.n_cumulative_sim.d - return result - -def analytic_halo_mass_function(ds, fit): - hmf = HaloMassFcn(simulation_ds=ds, fitting_function=fit) - result = np.empty((2, hmf.masses_analytic.size)) - result[0] = hmf.masses_analytic.d - result[1] = hmf.n_cumulative_analytic.d - return result - def small_patch_amr(ds, field, weight, axis, ds_obj): hex_digests = {} # Grid hierarchy test @@ -274,25 +252,6 @@ def axial_pixelization(ds): pix_y return pix_x, pix_y -def light_cone_projection(parameter_file, simulation_type): - lc = LightCone( - parameter_file, simulation_type, 0., 0.1, - observer_redshift=0.0, time_data=False) - lc.calculate_light_cone_solution( - seed=123456789, filename="LC/solution.txt") - lc.project_light_cone( - (600.0, "arcmin"), (60.0, "arcsec"), "density", - weight_field=None, save_stack=True) - fh = h5py.File("LC/LightCone.h5", mode="r") - data = fh["density_None"].value - units = fh["density_None"].attrs["units"] - assert units == "g/cm**2" - fh.close() - mean = data.mean() - mi = data[data.nonzero()].min() - ma = data.max() - return np.array([mean, mi, ma]) - def extract_connected_sets(ds_fn, data_source, field, num_levels, min_val, max_val): n, all_sets = data_source.extract_connected_sets( field, num_levels, min_val, max_val) diff --git a/yt/utilities/answer_testing/api.py b/yt/utilities/answer_testing/api.py index ecf34552306..536df4d5171 100644 --- a/yt/utilities/answer_testing/api.py +++ b/yt/utilities/answer_testing/api.py @@ -1,17 +1,2 @@ -""" -API for enzo_test - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.answer_testing.framework import AnswerTesting diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 20fb1143a16..2397077877c 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -9,7 +9,8 @@ import hashlib import contextlib import sys -from yt.extern.six.moves import cPickle, urllib +import pickle +import urllib import shelve import zlib import tempfile @@ -195,7 +196,7 @@ def get(self, ds_name, default = None): # Raise error if all tries were unsuccessful raise YTCloudError(url) # This is dangerous, but we have a controlled S3 environment - rv = cPickle.loads(data) + rv = pickle.loads(data) self.cache[ds_name] = rv return rv @@ -214,7 +215,7 @@ def dump(self, result_storage): pb = get_pbar("Storing results ", len(result_storage)) for i, ds_name in enumerate(result_storage): pb.update(i) - rs = cPickle.dumps(result_storage[ds_name]) + rs = pickle.dumps(result_storage[ds_name]) object_name = "%s_%s" % (self.answer_name, ds_name) if object_name in c.get_object_names(): obj = c.get_object(object_name) @@ -436,6 +437,9 @@ def description(self): oname = "_".join((str(s) for s in obj_type)) args = [self._type_name, str(self.ds), oname] args += [str(getattr(self, an)) for an in self._attrs] + suffix = getattr(self, "suffix", None) + if suffix: + args.append(suffix) return "_".join(args).replace('.', '_') class FieldValuesTest(AnswerTestingTest): @@ -453,8 +457,11 @@ def __init__(self, ds_fn, field, obj_type = None, def run(self): obj = create_obj(self.ds, self.obj_type) field = obj._determine_fields(self.field)[0] + fd = self.ds.field_info[field] if self.particle_type: weight_field = (field[0], "particle_ones") + elif fd.is_sph_field: + weight_field = (field[0], "ones") else: weight_field = ("index", "ones") avg = obj.quantities.weighted_average_quantity( @@ -464,6 +471,10 @@ def run(self): def compare(self, new_result, old_result): err_msg = "Field values for %s not equal." % (self.field,) + if hasattr(new_result, "d"): + new_result = new_result.d + if hasattr(old_result, "d"): + old_result = old_result.d if self.decimals is None: assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) @@ -493,6 +504,10 @@ def run(self): def compare(self, new_result, old_result): err_msg = "All field values for %s not equal." % self.field + if hasattr(new_result, "d"): + new_result = new_result.d + if hasattr(old_result, "d"): + old_result = old_result.d if self.decimals is None: assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) @@ -548,6 +563,10 @@ def compare(self, new_result, old_result): # not do the test here. continue nres, ores = new_result[k][nind], old_result[k][oind] + if hasattr(nres, "d"): + nres = nres.d + if hasattr(ores, "d"): + ores = ores.d if self.decimals is None: assert_equal(nres, ores, err_msg=err_msg) else: @@ -566,6 +585,13 @@ def __init__(self, ds_fn, axis, field, weight_field = None, self.weight_field = weight_field self.obj_type = obj_type + def _get_frb(self, obj): + proj = self.ds.proj(self.field, self.axis, + weight_field=self.weight_field, + data_source = obj) + frb = proj.to_frb((1.0, 'unitary'), 256) + return proj, frb + def run(self): if self.obj_type is not None: obj = create_obj(self.ds, self.obj_type) @@ -593,6 +619,13 @@ def compare(self, new_result, old_result): if k == "weight_field_sum": continue assert_allclose_units(new_result[k], old_result[k], 1e-10) +class PixelizedParticleProjectionValuesTest(PixelizedProjectionValuesTest): + + def _get_frb(self, obj): + proj_plot = particle_plots.ParticleProjectionPlot(self.ds, self.axis, [self.field], + weight_field = self.weight_field) + return proj_plot.data_source, proj_plot.frb + class GridValuesTest(AnswerTestingTest): _type_name = "GridValues" _attrs = ("field",) @@ -613,6 +646,10 @@ def compare(self, new_result, old_result): for k in new_result: assert (k in old_result) for k in new_result: + if hasattr(new_result[k], "d"): + new_result[k] = new_result[k].d + if hasattr(old_result[k], "d"): + old_result[k] = old_result[k].d assert_equal(new_result[k], old_result[k]) class VerifySimulationSameTest(AnswerTestingTest): @@ -650,6 +687,10 @@ def run(self): def compare(self, new_result, old_result): for k in new_result: + if hasattr(new_result[k], "d"): + new_result[k] = new_result[k].d + if hasattr(old_result[k], "d"): + old_result[k] = old_result[k].d assert_equal(new_result[k], old_result[k]) class ParentageRelationshipsTest(AnswerTestingTest): @@ -676,55 +717,6 @@ def compare(self, new_result, old_result): for newc, oldc in zip(new_result["children"], old_result["children"]): assert(newc == oldc) -class SimulatedHaloMassFunctionTest(AnswerTestingTest): - _type_name = "SimulatedHaloMassFunction" - _attrs = ("finder",) - - def __init__(self, ds_fn, finder): - super(SimulatedHaloMassFunctionTest, self).__init__(ds_fn) - self.finder = finder - - def run(self): - from yt.analysis_modules.halo_analysis.api import HaloCatalog - from yt.analysis_modules.halo_mass_function.api import HaloMassFcn - hc = HaloCatalog(data_ds=self.ds, finder_method=self.finder) - hc.create() - - hmf = HaloMassFcn(halos_ds=hc.halos_ds) - result = np.empty((2, hmf.masses_sim.size)) - result[0] = hmf.masses_sim.d - result[1] = hmf.n_cumulative_sim.d - return result - - def compare(self, new_result, old_result): - err_msg = ("Simulated halo mass functions not equation for " + - "%s halo finder.") % self.finder - assert_equal(new_result, old_result, - err_msg=err_msg, verbose=True) - -class AnalyticHaloMassFunctionTest(AnswerTestingTest): - _type_name = "AnalyticHaloMassFunction" - _attrs = ("fitting_function",) - - def __init__(self, ds_fn, fitting_function): - super(AnalyticHaloMassFunctionTest, self).__init__(ds_fn) - self.fitting_function = fitting_function - - def run(self): - from yt.analysis_modules.halo_mass_function.api import HaloMassFcn - hmf = HaloMassFcn(simulation_ds=self.ds, - fitting_function=self.fitting_function) - result = np.empty((2, hmf.masses_analytic.size)) - result[0] = hmf.masses_analytic.d - result[1] = hmf.n_cumulative_analytic.d - return result - - def compare(self, new_result, old_result): - err_msg = ("Analytic halo mass functions not equal for " + - "fitting function %d.") % self.fitting_function - assert_almost_equal(new_result, old_result, - err_msg=err_msg, verbose=True) - def compare_image_lists(new_result, old_result, decimals): fns = [] for i in range(2): @@ -884,6 +876,10 @@ def compare(self, new_result, old_result): err_msg="Number of outputs not equal.", verbose=True) for k in new_result: + if hasattr(new_result[k], "d"): + new_result[k] = new_result[k].d + if hasattr(old_result[k], "d"): + old_result[k] = old_result[k].d if self.decimals is None: assert_almost_equal(new_result[k], old_result[k]) else: @@ -957,6 +953,10 @@ def compare(self, new_result, old_result): err_msg="Number of outputs not equal.", verbose=True) for k in new_result: + if hasattr(new_result[k], "d"): + new_result[k] = new_result[k].d + if hasattr(old_result[k], "d"): + old_result[k] = old_result[k].d if self.decimals is None: assert_almost_equal(new_result[k], old_result[k]) else: @@ -1029,34 +1029,38 @@ def big_patch_amr(ds_fn, fields, input_center="max", input_weight="density"): dobj_name) -def sph_answer(ds, ds_str_repr, ds_nparticles, fields): +def _particle_answers(ds, ds_str_repr, ds_nparticles, fields, proj_test_class): if not can_run_ds(ds): return assert_equal(str(ds), ds_str_repr) dso = [None, ("sphere", ("c", (0.1, 'unitary')))] dd = ds.all_data() - assert_equal(dd["particle_position"].shape, (ds_nparticles, 3)) + # this needs to explicitly be "all" + assert_equal(dd["all", "particle_position"].shape, + (ds_nparticles, 3)) tot = sum(dd[ptype, "particle_position"].shape[0] - for ptype in ds.particle_types if ptype != "all") + for ptype in ds.particle_types_raw) assert_equal(tot, ds_nparticles) for dobj_name in dso: - dobj = create_obj(ds, dobj_name) - s1 = dobj["ones"].sum() - s2 = sum(mask.sum() for block, mask in dobj.blocks) - assert_equal(s1, s2) for field, weight_field in fields.items(): - if field[0] in ds.particle_types: - particle_type = True - else: - particle_type = False + particle_type = field[0] in ds.particle_types for axis in [0, 1, 2]: if not particle_type: - yield PixelizedProjectionValuesTest( + yield proj_test_class( ds, axis, field, weight_field, dobj_name) yield FieldValuesTest(ds, field, dobj_name, particle_type=particle_type) + +def nbody_answer(ds, ds_str_repr, ds_nparticles, fields): + return _particle_answers(ds, ds_str_repr, ds_nparticles, fields, + PixelizedParticleProjectionValuesTest) + +def sph_answer(ds, ds_str_repr, ds_nparticles, fields): + return _particle_answers(ds, ds_str_repr, ds_nparticles, fields, + PixelizedProjectionValuesTest) + def create_obj(ds, obj_type): # obj_type should be tuple of # ( obj_name, ( args ) ) diff --git a/yt/utilities/answer_testing/level_sets_tests.py b/yt/utilities/answer_testing/level_sets_tests.py index ed8217751b0..652627d67a8 100644 --- a/yt/utilities/answer_testing/level_sets_tests.py +++ b/yt/utilities/answer_testing/level_sets_tests.py @@ -1,18 +1,3 @@ -""" -Answer Testing for level sets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import \ diff --git a/yt/utilities/api.py b/yt/utilities/api.py index eed2c947b68..7c097dfc5f5 100644 --- a/yt/utilities/api.py +++ b/yt/utilities/api.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/utilities/chemical_formulas.py b/yt/utilities/chemical_formulas.py index d17d3e21db5..4f199ae6c8d 100644 --- a/yt/utilities/chemical_formulas.py +++ b/yt/utilities/chemical_formulas.py @@ -1,20 +1,7 @@ -""" -Very basic chemical formula parser. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import re from .periodic_table import periodic_table +from .physical_ratios import _primordial_mass_fraction + class ChemicalFormula: def __init__(self, formula_string): @@ -43,3 +30,15 @@ def __init__(self, formula_string): def __repr__(self): return self.formula_string + + +def compute_mu(): + # Assume full ionization and cosmic abundances + # This assumes full ionization! + muinv = 2.0 * _primordial_mass_fraction["H"] / \ + ChemicalFormula("H").weight + muinv += 3.0 * _primordial_mass_fraction["He"] / \ + ChemicalFormula("He").weight + return 1.0/muinv + +default_mu = compute_mu() diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 9d5fa5dd353..158e59020d7 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -1,20 +1,3 @@ -""" -A means of running standalone commands with a shared set of options. - - - -""" -from __future__ import print_function -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import argparse import base64 import getpass @@ -40,9 +23,9 @@ update_hg_or_git, \ enable_plugins, \ download_file -from yt.extern.six import add_metaclass, string_types -from yt.extern.six.moves import urllib, input -from yt.extern.six.moves.urllib.parse import urlparse +import urllib +import urllib.request +from urllib.parse import urlparse from yt.extern.tqdm import tqdm from yt.convenience import load from yt.visualization.plot_window import \ @@ -74,7 +57,7 @@ def _fix_ds(arg, *args, **kwargs): return ds def _add_arg(sc, arg): - if isinstance(arg, string_types): + if isinstance(arg, str): arg = _common_options[arg].copy() argc = dict(arg.items()) argnames = [] @@ -198,8 +181,7 @@ def __init__(cls, name, b, d): for arg in cls.args: _add_arg(sc, arg) -@add_metaclass(YTCommandSubtype) -class YTCommand(object): +class YTCommand(metaclass = YTCommandSubtype): args = () name = None description = "" diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py index 056c7e8ae69..8fbd8ba5aed 100644 --- a/yt/utilities/configure.py +++ b/yt/utilities/configure.py @@ -1,18 +1,9 @@ -# -*- coding: UTF-8 -*- -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import shutil import sys import argparse from yt.config import CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE, YTConfigParser -from yt.extern.six.moves import configparser +import configparser CONFIG = YTConfigParser() CONFIG.read([CURRENT_CONFIG_FILE]) diff --git a/yt/utilities/cosmology.py b/yt/utilities/cosmology.py index 0453e24762c..84c65ce3a77 100644 --- a/yt/utilities/cosmology.py +++ b/yt/utilities/cosmology.py @@ -1,29 +1,16 @@ -""" -Cosmology calculator. -Cosmology calculator based originally on http://www.kempner.net/cosmic.php -and featuring time and redshift conversion functions from Enzo. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) yt Development Team. All rights reserved. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import functools import numpy as np from yt.funcs import \ - issue_deprecation_warning + issue_deprecation_warning from yt.units import dimensions from yt.units.unit_registry import \ - UnitRegistry + UnitRegistry +from yt.units.unit_object import \ + Unit from yt.units.yt_array import \ - YTArray, \ - YTQuantity + YTArray, \ + YTQuantity from yt.utilities.physical_constants import \ gravitational_constant_cgs as G, \ @@ -97,13 +84,15 @@ def __init__(self, hubble_constant = 0.71, self.omega_lambda = float(omega_lambda) self.omega_curvature = float(omega_curvature) if unit_registry is None: - unit_registry = UnitRegistry() - unit_registry.modify("h", hubble_constant) + unit_registry = UnitRegistry(unit_system=unit_system) + unit_registry.add("h", hubble_constant, dimensions.dimensionless, r"h") for my_unit in ["m", "pc", "AU", "au"]: new_unit = "%scm" % my_unit + my_u = Unit(my_unit, registry=unit_registry) # technically not true, but distances here are actually comoving - unit_registry.add(new_unit, unit_registry.lut[my_unit][0], - dimensions.length, "\\rm{%s}/(1+z)" % my_unit) + unit_registry.add( + new_unit, my_u.base_value, dimensions.length, + "\\rm{%s}/(1+z)" % my_unit, prefixable=True) self.unit_registry = unit_registry self.hubble_constant = self.quan(hubble_constant, "100*km/s/Mpc") self.unit_system = unit_system diff --git a/yt/utilities/decompose.py b/yt/utilities/decompose.py index 24319437d00..d30ab29ed2c 100644 --- a/yt/utilities/decompose.py +++ b/yt/utilities/decompose.py @@ -1,18 +1,3 @@ -""" -Automagical cartesian domain decomposition. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from __future__ import division import numpy as np diff --git a/yt/utilities/definitions.py b/yt/utilities/definitions.py index b61763078d1..76ff06878e6 100644 --- a/yt/utilities/definitions.py +++ b/yt/utilities/definitions.py @@ -1,18 +1,3 @@ -""" -Various definitions for various other modules and routines - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .physical_ratios import \ mpc_per_mpc, kpc_per_mpc, pc_per_mpc, au_per_mpc, rsun_per_mpc, \ miles_per_mpc, km_per_mpc, cm_per_mpc, sec_per_Gyr, sec_per_Myr, \ diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 96c55ca6f01..7761e6e5b99 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -1,22 +1,9 @@ -""" -This is a library of yt-defined exceptions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - # We don't need to import 'exceptions' import os.path +from unyt.exceptions import UnitOperationError + class YTException(Exception): def __init__(self, message = None, ds = None): Exception.__init__(self, message) @@ -167,6 +154,9 @@ class YTCoordinateNotImplemented(YTException): def __str__(self): return "This coordinate is not implemented for this geometry type." +# define for back compat reasons for code written before yt 4.0 +YTUnitOperationError = UnitOperationError + class YTUnitNotRecognized(YTException): def __init__(self, unit): self.unit = unit @@ -174,80 +164,6 @@ def __init__(self, unit): def __str__(self): return "This dataset doesn't recognize %s" % self.unit -class YTUnitOperationError(YTException, ValueError): - def __init__(self, operation, unit1, unit2=None): - self.operation = operation - self.unit1 = unit1 - self.unit2 = unit2 - YTException.__init__(self) - - def __str__(self): - err = "The %s operator for YTArrays with units (%s) " % (self.operation, self.unit1, ) - if self.unit2 is not None: - err += "and (%s) " % self.unit2 - err += "is not well defined." - return err - -class YTUnitConversionError(YTException): - def __init__(self, unit1, dimension1, unit2, dimension2): - self.unit1 = unit1 - self.unit2 = unit2 - self.dimension1 = dimension1 - self.dimension2 = dimension2 - YTException.__init__(self) - - def __str__(self): - err = "Unit dimensionalities do not match. Tried to convert between " \ - "%s (dim %s) and %s (dim %s)." \ - % (self.unit1, self.dimension1, self.unit2, self.dimension2) - return err - -class YTUnitsNotReducible(YTException): - def __init__(self, unit, units_base): - self.unit = unit - self.units_base = units_base - YTException.__init__(self) - - def __str__(self): - err = "The unit '%s' cannot be reduced to a single expression within " \ - "the %s base system of units." % (self.unit, self.units_base) - return err - -class YTEquivalentDimsError(YTUnitOperationError): - def __init__(self, old_units, new_units, base): - self.old_units = old_units - self.new_units = new_units - self.base = base - - def __str__(self): - err = "It looks like you're trying to convert between '%s' and '%s'. Try " \ - "using \"to_equivalent('%s', '%s')\" instead." % (self.old_units, self.new_units, - self.new_units, self.base) - return err - -class YTUfuncUnitError(YTException): - def __init__(self, ufunc, unit1, unit2): - self.ufunc = ufunc - self.unit1 = unit1 - self.unit2 = unit2 - YTException.__init__(self) - - def __str__(self): - err = "The NumPy %s operation is only allowed on objects with " \ - "identical units. Convert one of the arrays to the other\'s " \ - "units first. Received units (%s) and (%s)." % \ - (self.ufunc, self.unit1, self.unit2) - return err - -class YTIterableUnitCoercionError(YTException): - def __init__(self, quantity_list): - self.quantity_list = quantity_list - - def __str__(self): - err = "Received a list or tuple of quantities with nonuniform units: " \ - "%s" % self.quantity_list - return err - class YTFieldUnitError(YTException): def __init__(self, field_info, returned_units): self.msg = ("The field function associated with the field '%s' returned " @@ -539,6 +455,15 @@ def __init__(self, filename): def __str__(self): return "A file already exists at %s and overwrite=False." % self.filename +class YTNonIndexedDataContainer(YTException): + def __init__(self, cont): + self.cont = cont + + def __str__(self): + return ("The data container (%s) is an unindexed type. " + "Operations such as ires, icoords, fcoords and fwidth " + "will not work on it." % type(self.cont)) + class YTGDFUnknownGeometry(Exception): def __init__(self, geometry): self.geometry = geometry @@ -595,10 +520,11 @@ def __init__(self, fields): self.fields = fields def __str__(self): - msg = ("\nSlicePlot, ProjectionPlot, and OffAxisProjectionPlot can only " - "plot fields that\n" - "are defined on a mesh, but received the following particle " - "fields:\n\n" + msg = ("\nSlicePlot, ProjectionPlot, and OffAxisProjectionPlot can " + "only plot fields that\n" + "are defined on a mesh or for SPH particles, but received the " + "following N-body\n" + "particle fields:\n\n" " %s\n\n" "Did you mean to use ParticlePlot or plot a deposited particle " "field instead?" % self.fields) @@ -776,6 +702,15 @@ def __str__(self): msg += " pip install %s\n" % self.module return msg +class YTModuleRemoved(Exception): + def __init__(self, name, new_home=None, info=None): + message = "The %s module has been removed from yt." % name + if new_home is not None: + message += "\nIt has been moved to %s." % new_home + if info is not None: + message += "\nFor more information, see %s." % info + Exception.__init__(self, message) + class YTArrayTooLargeToDisplay(YTException): def __init__(self, size, max_size): self.size = size diff --git a/yt/utilities/file_handler.py b/yt/utilities/file_handler.py index af858dc61dd..ddaeb3b13aa 100644 --- a/yt/utilities/file_handler.py +++ b/yt/utilities/file_handler.py @@ -1,18 +1,3 @@ -""" -A wrapper class for h5py file objects. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.on_demand_imports import _h5py as h5py from yt.utilities.on_demand_imports import NotAModule from contextlib import contextmanager diff --git a/yt/utilities/flagging_methods.py b/yt/utilities/flagging_methods.py index 5a213f8b5f5..620dcaef44d 100644 --- a/yt/utilities/flagging_methods.py +++ b/yt/utilities/flagging_methods.py @@ -1,21 +1,5 @@ -""" -Utilities for flagging zones for refinement in a dataset - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np # For modern purposes from yt.utilities.lib.misc_utilities import grow_flagging_field -from yt.extern.six import add_metaclass flagging_method_registry = {} @@ -25,8 +9,7 @@ def __init__(cls, name, b, d): if hasattr(cls, "_type_name") and not cls._skip_add: flagging_method_registry[cls._type_name] = cls -@add_metaclass(RegisteredFlaggingMethod) -class FlaggingMethod(object): +class FlaggingMethod(metaclass = RegisteredFlaggingMethod): _skip_add = False class OverDensity(FlaggingMethod): diff --git a/yt/utilities/fortran_utils.py b/yt/utilities/fortran_utils.py index ff3e23eb521..1b1b18b1bed 100644 --- a/yt/utilities/fortran_utils.py +++ b/yt/utilities/fortran_utils.py @@ -1,19 +1,3 @@ -""" -Utilities for reading Fortran files. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import struct import numpy as np import os diff --git a/yt/utilities/grid_data_format/conversion/conversion_athena.py b/yt/utilities/grid_data_format/conversion/conversion_athena.py index 806d2ef2313..6cb93dd611c 100644 --- a/yt/utilities/grid_data_format/conversion/conversion_athena.py +++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py @@ -83,7 +83,7 @@ def write_gdf_field(self, fn, grid_number, field, data): def read_and_write_index(self,basename, ddn, gdf_name): """ Read Athena legacy vtk file from multiple cpus """ proc_names = glob(self.source_dir+'id*') - #print 'Reading a dataset from %i Processor Files' % len(proc_names) + #print('Reading a dataset from %i Processor Files' % len(proc_names)) N = len(proc_names) grid_dims = np.empty([N,3],dtype='int64') grid_left_edges = np.empty([N,3],dtype='float64') @@ -197,7 +197,7 @@ def read_and_write_index(self,basename, ddn, gdf_name): def read_and_write_data(self, basename, ddn, gdf_name): proc_names = glob(self.source_dir+'id*') - #print 'Reading a dataset from %i Processor Files' % len(proc_names) + #print('Reading a dataset from %i Processor Files' % len(proc_names)) N = len(proc_names) for i in range(N): if i == 0: @@ -205,7 +205,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): else: fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk' f = open(fn,'rb') - #print 'Reading data from %s' % fn + #print('Reading data from %s' % fn) line = f.readline() while line != '': if len(line) == 0: break @@ -241,7 +241,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): data = np.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F') if i == 0: self.fields.append(field) - # print 'writing field %s' % field + # print('writing field %s' % field) self.write_gdf_field(gdf_name, i, field, data) read_table=False @@ -256,7 +256,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): self.fields.append(field+'_y') self.fields.append(field+'_z') - # print 'writing field %s' % field + # print('writing field %s' % field) self.write_gdf_field(gdf_name, i, field+'_x', data_x) self.write_gdf_field(gdf_name, i, field+'_y', data_y) self.write_gdf_field(gdf_name, i, field+'_z', data_z) @@ -332,7 +332,7 @@ def parse_line(self, line, grid): def read_grid(self, filename): """ Read Athena legacy vtk file from single cpu """ f = open(filename,'rb') - #print 'Reading from %s'%filename + #print('Reading from %s'%filename) grid = {} grid['read_field'] = None grid['read_type'] = None diff --git a/yt/utilities/grid_data_format/tests/test_writer.py b/yt/utilities/grid_data_format/tests/test_writer.py index 418c98b221b..2950e17906b 100644 --- a/yt/utilities/grid_data_format/tests/test_writer.py +++ b/yt/utilities/grid_data_format/tests/test_writer.py @@ -1,17 +1,3 @@ -""" -Testsuite for writing yt data to GDF - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import tempfile import shutil import os diff --git a/yt/utilities/grid_data_format/writer.py b/yt/utilities/grid_data_format/writer.py index c5b9fc1c0cf..309b463c4f5 100644 --- a/yt/utilities/grid_data_format/writer.py +++ b/yt/utilities/grid_data_format/writer.py @@ -1,18 +1,3 @@ -""" -Writing yt data to a GDF file. - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import sys from yt.utilities.on_demand_imports import _h5py as h5py @@ -115,7 +100,7 @@ def save_field(ds, fields, field_parameters=None): if isinstance(field_name, tuple): field_name = field_name[1] field_obj = ds._get_field_info(field_name) - if field_obj.particle_type: + if field_obj.sampling_type == "particle": print("Saving particle fields currently not supported.") return @@ -173,7 +158,7 @@ def _write_fields_to_gdf(ds, fhandle, fields, particle_type_name, particles_group = grid_group["particles"] pt_group = particles_group[particle_type_name] - if fi.particle_type: # particle data + if fi.sampling_type == "particle": # particle data pt_group.create_dataset(field_name, grid.ActiveDimensions, dtype="float64") else: # a field @@ -208,7 +193,7 @@ def _write_fields_to_gdf(ds, fhandle, fields, particle_type_name, grid.get_data(field_name) units = fhandle[ "field_types"][field_name].attrs["field_units"] - if fi.particle_type: # particle data + if fi.sampling_type == "particle": # particle data dset = pt_group[field_name] dset[:] = grid[field_name].in_units(units) else: # a field diff --git a/yt/utilities/hierarchy_inspection.py b/yt/utilities/hierarchy_inspection.py index 85033b509d7..22e2ca67c42 100644 --- a/yt/utilities/hierarchy_inspection.py +++ b/yt/utilities/hierarchy_inspection.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import inspect from collections import Counter -from yt.extern.six.moves import reduce +from functools import reduce def find_lowest_subclasses(candidates): diff --git a/yt/utilities/initial_conditions.py b/yt/utilities/initial_conditions.py index ee0933f0612..517fecdcd30 100644 --- a/yt/utilities/initial_conditions.py +++ b/yt/utilities/initial_conditions.py @@ -1,18 +1,3 @@ -""" -Painting zones in a grid - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np class FluidOperator(object): diff --git a/yt/utilities/io_handler.py b/yt/utilities/io_handler.py index cc1f217e3ad..da30fbab978 100644 --- a/yt/utilities/io_handler.py +++ b/yt/utilities/io_handler.py @@ -1,31 +1,13 @@ -""" -The data-file handling functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from collections import defaultdict from contextlib import contextmanager import os from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np -from yt.extern.six import add_metaclass from yt.utilities.lru_cache import \ local_lru_cache, _make_key from yt.geometry.selection_routines import GridSelector -_axis_ids = {0:2,1:1,2:0} - io_registry = {} use_caching = 0 @@ -44,8 +26,7 @@ def __init__(cls, name, b, d): cls._read_obj_field = local_lru_cache(maxsize=use_caching, typed=True, make_key=_make_io_key)(cls._read_obj_field) -@add_metaclass(RegisteredIOHandler) -class BaseIOHandler(object): +class BaseIOHandler(metaclass = RegisteredIOHandler): _vector_fields = () _dataset_type = None _particle_reader = False @@ -53,6 +34,8 @@ class BaseIOHandler(object): _misses = 0 _hits = 0 + __metaclass__ = RegisteredIOHandler + def __init__(self, ds): self.queue = defaultdict(dict) self.ds = ds @@ -165,19 +148,21 @@ def _read_exception(self): def _read_chunk_data(self, chunk, fields): return {} - def _count_particles_chunks(self, chunks, ptf, selector): - psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK + def _count_particles_chunks(self, psize, chunks, ptf, selector): for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf): + # assume particles have zero radius, we break this assumption + # in the SPH frontend and override this function there psize[ptype] += selector.count_points(x, y, z, 0.0) - return dict(psize.items()) + return psize def _read_particle_selection(self, chunks, selector, fields): rv = {} ind = {} # We first need a set of masks for each particle type - ptf = defaultdict(list) # ON-DISK TO READ - fsize = defaultdict(lambda: 0) # COUNT RV - field_maps = defaultdict(list) # ptypes -> fields + ptf = defaultdict(list) # ptype -> on-disk fields to read + fsize = defaultdict(lambda: 0) # ptype -> size of return value + psize = defaultdict(lambda: 0) # ptype -> particle count on disk + field_maps = defaultdict(list) # ptype -> fields (including unions) chunks = list(chunks) unions = self.ds.particle_unions # What we need is a mapping from particle types to return types @@ -192,14 +177,13 @@ def _read_particle_selection(self, chunks, selector, fields): else: ptf[ftype].append(fname) field_maps[field].append(field) - # We can't hash chunks, but otherwise this is a neat idea. - # Now we have our full listing. - # Here, ptype_map means which particles contribute to a given type. - # And ptf is the actual fields from disk to read. - psize = self._count_particles_chunks(chunks, ptf, selector) + # Now we have our full listing + + # psize maps the names of particle types to the number of + # particles of each type + self._count_particles_chunks(psize, chunks, ptf, selector) + # Now we allocate - # ptf, remember, is our mapping of what we want to read - #for ptype in ptf: for field in fields: if field[0] in unions: for pt in unions[field[0]]: diff --git a/yt/utilities/lib/allocation_container.pxd b/yt/utilities/lib/allocation_container.pxd index 9a20ca088aa..25c6eb6f62b 100644 --- a/yt/utilities/lib/allocation_container.pxd +++ b/yt/utilities/lib/allocation_container.pxd @@ -5,13 +5,6 @@ An allocation container and memory pool """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np from libc.stdlib cimport malloc, free, realloc diff --git a/yt/utilities/lib/allocation_container.pyx b/yt/utilities/lib/allocation_container.pyx index 1f58ccc738e..4bb61517cc6 100644 --- a/yt/utilities/lib/allocation_container.pyx +++ b/yt/utilities/lib/allocation_container.pyx @@ -5,13 +5,6 @@ An allocation container and memory pool """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np import numpy as np diff --git a/yt/utilities/lib/alt_ray_tracers.pyx b/yt/utilities/lib/alt_ray_tracers.pyx index 099790fab07..1678747b51e 100644 --- a/yt/utilities/lib/alt_ray_tracers.pyx +++ b/yt/utilities/lib/alt_ray_tracers.pyx @@ -5,13 +5,6 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/amr_kdtools.pxd b/yt/utilities/lib/amr_kdtools.pxd index 937c2656f4a..97eada36787 100644 --- a/yt/utilities/lib/amr_kdtools.pxd +++ b/yt/utilities/lib/amr_kdtools.pxd @@ -5,13 +5,6 @@ AMR kD-Tree Cython Tools """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np diff --git a/yt/utilities/lib/amr_kdtools.pyx b/yt/utilities/lib/amr_kdtools.pyx index afa34f20eb1..464345cdc87 100644 --- a/yt/utilities/lib/amr_kdtools.pyx +++ b/yt/utilities/lib/amr_kdtools.pyx @@ -5,13 +5,6 @@ AMR kD-Tree Cython Tools """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -48,12 +41,12 @@ cdef class Node: def print_me(self): - print 'Node %i' % self.node_id - print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], - self.left_edge[2]) - print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], - self.right_edge[2]) - print '\t grid: %i' % self.grid + print('Node %i' % self.node_id) + print('\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], + self.left_edge[2])) + print('\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], + self.right_edge[2])) + print('\t grid: %i' % self.grid) def get_split_dim(self): if self.split != NULL: @@ -235,8 +228,8 @@ cdef class Node: greater_ids[ngreater] = i ngreater += 1 - #print 'nless: %i' % nless - #print 'ngreater: %i' % ngreater + #print('nless: %i' % nless) + #print('ngreater: %i' % ngreater) if nless > 0: less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t)) @@ -305,7 +298,7 @@ cdef class Node: contained *= gres[0,i] >= self.right_edge[i] if contained == 1: - # print 'Node fully contained, setting to grid: %i' % gids[0] + # print('Node fully contained, setting to grid: %i' % gids[0]) self.grid = gids[0] assert(self.grid != -1) return @@ -356,13 +349,13 @@ cdef class Node: self.divide(split) # Populate Left Node - #print 'Inserting left node', self.left_edge, self.right_edge + #print('Inserting left node', self.left_edge, self.right_edge) if nless == 1: self.left.insert_grid(gle, gre, gid, rank, size) # Populate Right Node - #print 'Inserting right node', self.left_edge, self.right_edge + #print('Inserting right node', self.left_edge, self.right_edge) if ngreater == 1: self.right.insert_grid(gle, gre, gid, rank, size) @@ -403,7 +396,7 @@ cdef class Node: # If best_dim is -1, then we have found a place where there are no choices. # Exit out and set the node to None. if best_dim == -1: - print 'Failed to split grids.' + print('Failed to split grids.') return -1 split = malloc(sizeof(Split)) @@ -440,7 +433,7 @@ cdef class Node: less_gres[i,j] = gres[index,j] # Populate Left Node - #print 'Inserting left node', self.left_edge, self.right_edge + #print('Inserting left node', self.left_edge, self.right_edge) self.left.insert_grids(nless, less_gles, less_gres, l_ids, rank, size) @@ -457,7 +450,7 @@ cdef class Node: greater_gres[i,j] = gres[index,j] # Populate Right Node - #print 'Inserting right node', self.left_edge, self.right_edge + #print('Inserting right node', self.left_edge, self.right_edge) self.right.insert_grids(ngreater, greater_gles, greater_gres, g_ids, rank, size) @@ -501,13 +494,13 @@ cdef class Node: #lnew_gre[big_dim] = new_pos # Populate Left Node - #print 'Inserting left node', self.left_edge, self.right_edge + #print('Inserting left node', self.left_edge, self.right_edge) self.left.insert_grid(lnew_gle, lnew_gre, grid_id, rank, size) #rnew_gle[big_dim] = new_pos # Populate Right Node - #print 'Inserting right node', self.left_edge, self.right_edge + #print('Inserting right node', self.left_edge, self.right_edge) self.right.insert_grid(rnew_gle, rnew_gre, grid_id, rank, size) return @@ -784,17 +777,17 @@ cdef kdtree_get_choices(int n_grids, for i in range(n_grids): # Check for disqualification for j in range(2): - # print "Checking against", i,j,dim,data[i,j,dim] + # print("Checking against", i,j,dim,data[i,j,dim]) if not (l_corner[dim] < data[i][j][dim] and data[i][j][dim] < r_corner[dim]): - # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim] + # print("Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]) continue skipit = 0 # Add our left ... for k in range(n_unique): if uniques[k] == data[i][j][dim]: skipit = 1 - # print "Identified", uniques[k], data[i,j,dim], n_unique + # print("Identified", uniques[k], data[i,j,dim], n_unique) break if skipit == 0: uniques[n_unique] = data[i][j][dim] @@ -808,7 +801,7 @@ cdef kdtree_get_choices(int n_grids, # I recognize how lame this is. cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64') for i in range(my_max): - # print "Setting tarr: ", i, uniquedims[best_dim][i] + # print("Setting tarr: ", i, uniquedims[best_dim][i]) tarr[i] = uniquedims[best_dim][i] tarr.sort() split = tarr[my_split] diff --git a/yt/utilities/lib/api.py b/yt/utilities/lib/api.py index 030ca16a1cc..ea60c087c08 100644 --- a/yt/utilities/lib/api.py +++ b/yt/utilities/lib/api.py @@ -1,19 +1,3 @@ -import os -""" -Compatibility module - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .grid_traversal import * from .particle_mesh_operations import * from .depth_first_octree import * diff --git a/yt/utilities/lib/basic_octree.pyx b/yt/utilities/lib/basic_octree.pyx index 0461dbfae98..c95c47c5538 100644 --- a/yt/utilities/lib/basic_octree.pyx +++ b/yt/utilities/lib/basic_octree.pyx @@ -5,13 +5,6 @@ A refine-by-two AMR-specific octree """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np @@ -435,7 +428,7 @@ cdef class Octree: if this_node is NULL: break if this_node is NULL: break if truncate and potential > kinetic: - print 'Truncating...' + print('Truncating...') break pair_node = this_node.next while pair_node is not NULL: @@ -545,7 +538,7 @@ cdef class Octree: for k in range(2): nline += "%d," % self.node_ID(node.children[i][j][k]) line += nline - print line + print(line) return cdef void iterate_print_nodes(self, OctreeNode *node): @@ -579,7 +572,7 @@ cdef class Octree: for i in range(self.nvals): line += "val%d\t\t" % i line += "weight\t\tchild?\tparent?\tchildren" - print line + print(line) for i in range(self.top_grid_dims[0]): for j in range(self.top_grid_dims[1]): for k in range(self.top_grid_dims[2]): diff --git a/yt/utilities/lib/bitarray.pxd b/yt/utilities/lib/bitarray.pxd index f9be00031f6..61e6dc75f4b 100644 --- a/yt/utilities/lib/bitarray.pxd +++ b/yt/utilities/lib/bitarray.pxd @@ -5,13 +5,6 @@ Bit array functions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -19,8 +12,11 @@ cimport cython cdef inline void ba_set_value(np.uint8_t *buf, np.uint64_t ind, np.uint8_t val) nogil: - if val > 0: val = 1 - buf[ind >> 3] |= (val << (ind & 7)) + # This assumes 8 bit buffer + if val > 0: + buf[ind >> 3] |= (1 << (ind & 7)) + else: + buf[ind >> 3] &= ~(1 << (ind & 7)) cdef inline np.uint8_t ba_get_value(np.uint8_t *buf, np.uint64_t ind) nogil: cdef np.uint8_t rv = (buf[ind >> 3] & (1 << (ind & 7))) diff --git a/yt/utilities/lib/bitarray.pyx b/yt/utilities/lib/bitarray.pyx index 793e0a3a5f2..c046b51532c 100644 --- a/yt/utilities/lib/bitarray.pyx +++ b/yt/utilities/lib/bitarray.pyx @@ -5,13 +5,6 @@ Bit array functions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -23,7 +16,8 @@ cdef class bitarray: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def __init__(self, size = -1, arr = None): + def __cinit__(self, np.int64_t size = -1, + np.ndarray[np.uint8_t, ndim=1, cast=True] arr = None): r"""This is a bitarray, which flips individual bits to on/off inside a uint8 container array. @@ -45,7 +39,7 @@ cdef class bitarray: >>> arr_in2 = np.array([False, True, True]) >>> a = ba.bitarray(arr = arr_in1) >>> b = ba.bitarray(arr = arr_in2) - >>> print a & b + >>> print(a & b) >>> print (a & b).as_bool_array() """ @@ -74,7 +68,7 @@ cdef class bitarray: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def set_from_array(self, np.ndarray[np.uint8_t, cast=True] arr): + def set_from_array(self, np.ndarray[np.uint8_t, cast=True] arr not None): r"""Given an array that is either uint8_t or boolean, set the values of this array to match it. @@ -140,7 +134,7 @@ cdef class bitarray: >>> arr_in = np.array([True, True, False]) >>> a = ba.bitarray(arr = arr_in) - >>> print a.set_value(2, 1) + >>> print(a.set_value(2, 1)) """ ba_set_value(self.buf, ind, val) @@ -163,7 +157,7 @@ cdef class bitarray: >>> arr_in = np.array([True, True, False]) >>> a = ba.bitarray(arr = arr_in) - >>> print a.query_value(2) + >>> print(a.query_value(2)) """ return ba_get_value(self.buf, ind) diff --git a/yt/utilities/lib/bounded_priority_queue.pxd b/yt/utilities/lib/bounded_priority_queue.pxd new file mode 100644 index 00000000000..798cf7a0746 --- /dev/null +++ b/yt/utilities/lib/bounded_priority_queue.pxd @@ -0,0 +1,41 @@ +""" +A cython implementation of the bounded priority queue + +This is a priority queue that only keeps track of smallest k values that have +been added to it. + + +""" + +import numpy as np +cimport numpy as np + +cdef class BoundedPriorityQueue: + cdef public np.float64_t[:] heap + cdef np.float64_t* heap_ptr + cdef public np.int64_t[:] pids + cdef np.int64_t* pids_ptr + cdef int use_pids + + cdef np.intp_t size + cdef np.intp_t max_elements + + cdef int max_heapify(self, np.intp_t index) nogil except -1 + cdef int propagate_up(self, np.intp_t index) nogil except -1 + cdef int add(self, np.float64_t val) nogil except -1 + cdef int add_pid(self, np.float64_t val, np.int64_t pid) nogil except -1 + cdef int heap_append(self, np.float64_t val, np.int64_t ind) nogil except -1 + cdef np.float64_t extract_max(self) nogil except -1 + cdef int validate_heap(self) nogil except -1 + +cdef class NeighborList: + cdef public np.float64_t[:] data + cdef np.float64_t* data_ptr + cdef public np.int64_t[:] pids + cdef np.int64_t* pids_ptr + cdef np.intp_t size + cdef np.intp_t _max_size + + cdef int _update_memview(self) except -1 + cdef int _extend(self) nogil except -1 + cdef int add_pid(self, np.float64_t val, np.int64_t ind) nogil except -1 diff --git a/yt/utilities/lib/bounded_priority_queue.pyx b/yt/utilities/lib/bounded_priority_queue.pyx new file mode 100644 index 00000000000..a035a8d3533 --- /dev/null +++ b/yt/utilities/lib/bounded_priority_queue.pyx @@ -0,0 +1,243 @@ +""" +A cython implementation of the bounded priority queue + +This is a priority queue that only keeps track of smallest k values that have +been added to it. + +This priority queue is implemented with the configuration of having the largest +element at the beginning - this exploited to store nearest neighbour lists. + +""" + + +import numpy as np +cimport numpy as np + +cimport cython +from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free + +cdef class BoundedPriorityQueue: + def __cinit__(self, np.intp_t max_elements, np.intp_t pids=0): + self.max_elements = max_elements + # mark invalid recently values with -1 + self.heap = np.zeros(max_elements)-1 + self.heap_ptr = &(self.heap[0]) + # only allocate memory if we intend to store particle ID's + self.use_pids = pids + if pids == 1: + self.pids = np.zeros(max_elements, dtype="int64")-1 + self.pids_ptr = &(self.pids[0]) + + self.size = 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int max_heapify(self, np.intp_t index) nogil except -1: + cdef np.intp_t left = 2 * index + 1 + cdef np.intp_t right = 2 * index + 2 + cdef np.intp_t largest = index + + if left < self.size and self.heap_ptr[left] > self.heap_ptr[largest]: + largest = left + if right < self.size and self.heap_ptr[right] > self.heap_ptr[largest]: + largest = right + + if largest != index: + self.heap_ptr[index], self.heap_ptr[largest] = \ + self.heap_ptr[largest], self.heap_ptr[index] + if self.use_pids: + self.pids_ptr[index], self.pids_ptr[largest] = \ + self.pids_ptr[largest], self.pids_ptr[index] + + self.max_heapify(largest) + + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int propagate_up(self, np.intp_t index) nogil except -1: + while index != 0 and self.heap_ptr[(index - 1) // 2] < self.heap_ptr[index]: + self.heap_ptr[index], self.heap_ptr[(index - 1) // 2] = self.heap_ptr[(index - 1) // 2], self.heap_ptr[index] + if self.use_pids: + self.pids_ptr[index], self.pids_ptr[(index - 1) // 2] = self.pids_ptr[(index - 1) // 2], self.pids_ptr[index] + index = (index - 1) // 2 + + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int add(self, np.float64_t val) nogil except -1: + # if not at max size append, if at max size, only append if smaller than + # the maximum value + if self.size == self.max_elements: + if val < self.heap_ptr[0]: + self.extract_max() + self.heap_append(val, -1) + else: + self.heap_append(val, -1) + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int add_pid(self, np.float64_t val, np.int64_t ind) nogil except -1: + if self.size == self.max_elements: + if val < self.heap_ptr[0]: + self.extract_max() + self.heap_append(val, ind) + else: + self.heap_append(val, ind) + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int heap_append(self, np.float64_t val, np.int64_t ind) nogil except -1: + self.heap_ptr[self.size] = val + if self.use_pids: + self.pids_ptr[self.size] = ind + self.size += 1 + self.propagate_up(self.size - 1) + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef np.float64_t extract_max(self) nogil except -1: + cdef np.float64_t maximum = self.heap_ptr[0] + cdef np.float64_t val + cdef np.int64_t ind + + val = self.heap_ptr[self.size-1] + self.heap_ptr[self.size-1] = -1 + + if self.use_pids: + ind = self.pids_ptr[self.size-1] + self.pids_ptr[self.size-1] = -1 + + self.size -= 1 + if self.size > 0: + self.heap_ptr[0] = val + if self.use_pids: + self.pids_ptr[0] = ind + self.max_heapify(0) + return maximum + + cdef int validate_heap(self) nogil except -1: + # this function loops through every element in the heap, if any children + # are greater than their parents then we return zero, which is an error + # as the heap condition is not satisfied + cdef int i, index + for i in range(self.size-1, -1, -1): + index = i + while index != 0: + if self.heap_ptr[index] > self.heap_ptr[(index - 1) // 2]: + return 0 + index = (index - 1) // 2 + return 1 + +cdef class NeighborList: + def __cinit__(self, np.intp_t init_size=32): + self.size = 0 + self._max_size = init_size + self.data_ptr = PyMem_Malloc( + self._max_size * sizeof(np.float64_t) + ) + self.pids_ptr = PyMem_Malloc( + self._max_size * sizeof(np.int64_t) + ) + self._update_memview() + + def __dealloc__(self): + PyMem_Free(self.data_ptr) + PyMem_Free(self.pids_ptr) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int _update_memview(self) except -1: + self.data = self.data_ptr + self.pids = self.pids_ptr + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int _extend(self) nogil except -1: + if self.size == self._max_size: + self._max_size *= 2 + with gil: + self.data_ptr = PyMem_Realloc( + self.data_ptr, + self._max_size * sizeof(np.float64_t) + ) + self.pids_ptr = PyMem_Realloc( + self.pids_ptr, + self._max_size * sizeof(np.int64_t) + ) + self._update_memview() + return 0 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef int add_pid(self, np.float64_t val, np.int64_t ind) nogil except -1: + self._extend() + self.data_ptr[self.size] = val + self.pids_ptr[self.size] = ind + self.size += 1 + return 0 + +# these are test functions which are called from +# yt/utilities/lib/tests/test_nn.py +# they are stored here to allow easy interaction with functions not exposed at +# the python level +def validate_pid(): + m = BoundedPriorityQueue(5, True) + + # Add elements to the queue + elements = [0.1, 0.25, 1.33, 0.5, 3.2, 4.6, 2.0, 0.4, 4.0, .001] + pids = [1,2,3,4,5,6,7,8,9,10] + + for el, pid in zip(elements, pids): + m.add_pid(el, pid) + + m.extract_max() + m.extract_max() + m.extract_max() + + return np.asarray(m.heap), np.asarray(m.pids) + +def validate(): + m = BoundedPriorityQueue(5) + + # Add elements to the queue + for el in [0.1, 0.25, 1.33, 0.5, 3.2, 4.6, 2.0, 0.4, 4.0, .001]: + m.add(el) + + m.extract_max() + m.extract_max() + m.extract_max() + + return np.asarray(m.heap) + +def validate_nblist(): + nblist = NeighborList(init_size=2) + + for i in range(4): + nblist.add_pid(1.0, i) + + # Copy is necessary here. Without it, the allocated memory would be freed. + # Leaving random data array. + return np.asarray(nblist.data).copy(), np.asarray(nblist.pids).copy() diff --git a/yt/utilities/lib/contour_finding.pxd b/yt/utilities/lib/contour_finding.pxd index c30d492b02c..1b34fae2f7c 100644 --- a/yt/utilities/lib/contour_finding.pxd +++ b/yt/utilities/lib/contour_finding.pxd @@ -5,13 +5,6 @@ Contour finding exports """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np diff --git a/yt/utilities/lib/contour_finding.pyx b/yt/utilities/lib/contour_finding.pyx index e11453de8d6..bf3ab59e90f 100644 --- a/yt/utilities/lib/contour_finding.pyx +++ b/yt/utilities/lib/contour_finding.pyx @@ -5,21 +5,12 @@ A two-pass contour finding algorithm """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - +from __future__ import print_function import numpy as np cimport numpy as np cimport cython from cython cimport floating from libc.stdlib cimport malloc, free, realloc -from yt.geometry.selection_routines cimport \ - SelectorObject, AlwaysSelector, OctreeSubsetSelector from yt.utilities.lib.fp_utils cimport imax from yt.geometry.oct_container cimport \ OctreeContainer, OctInfo @@ -35,7 +26,7 @@ import sys cdef inline ContourID *contour_create(np.int64_t contour_id, ContourID *prev = NULL): node = malloc(sizeof(ContourID)) - #print "Creating contour with id", contour_id + #print("Creating contour with id", contour_id) node.contour_id = contour_id node.next = node.parent = NULL node.prev = prev @@ -156,7 +147,7 @@ cdef class ContourTree: n = contour_ids.shape[0] cdef ContourID *cur = self.last for i in range(n): - #print i, contour_ids[i] + #print(i, contour_ids[i]) cur = contour_create(contour_ids[i], cur) if self.first == NULL: self.first = cur self.last = cur @@ -237,15 +228,15 @@ cdef class ContourTree: cdef ContourID *c1 cdef ContourID *c2 n = join_tree.shape[0] - #print "Counting" - #print "Checking", self.count() + #print("Counting") + #print("Checking", self.count()) for i in range(n): ins = 0 cid1 = join_tree[i, 0] cid2 = join_tree[i, 1] c1 = c2 = NULL cur = self.first - #print "Looking for ", cid1, cid2 + #print("Looking for ", cid1, cid2) while c1 == NULL or c2 == NULL: if cur.contour_id == cid1: c1 = contour_find(cur) @@ -255,9 +246,9 @@ cdef class ContourTree: cur = cur.next if cur == NULL: break if c1 == NULL or c2 == NULL: - if c1 == NULL: print " Couldn't find ", cid1 - if c2 == NULL: print " Couldn't find ", cid2 - print " Inspected ", ins + if c1 == NULL: print(" Couldn't find ", cid1) + if c2 == NULL: print(" Couldn't find ", cid2) + print(" Inspected ", ins) raise RuntimeError else: c1.count = c2.count = 0 @@ -571,11 +562,11 @@ cdef class ParticleContourTree(ContourTree): cdef np.int64_t *nind = malloc(sizeof(np.int64_t)*nsize) counter = 0 cdef np.int64_t frac = (doff.shape[0] / 20.0) - if verbose == 1: print >> sys.stderr, "Will be outputting every", frac + if verbose == 1: print("Will be outputting every", frac, file=sys.stderr) for i in range(doff.shape[0]): if verbose == 1 and counter >= frac: counter = 0 - print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size) + print("FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size), file=sys.stderr) counter += 1 # Any particles found for this oct? if doff[i] < 0: continue diff --git a/yt/utilities/lib/cykdtree/__init__.py b/yt/utilities/lib/cykdtree/__init__.py new file mode 100644 index 00000000000..675daffbbfb --- /dev/null +++ b/yt/utilities/lib/cykdtree/__init__.py @@ -0,0 +1,23 @@ +from yt.utilities.lib.cykdtree.kdtree import PyKDTree, PyNode # NOQA +from yt.utilities.lib.cykdtree import plot # NOQA + +def make_tree(pts, **kwargs): + r"""Build a KD-tree for a set of points. + + Args: + pts (np.ndarray of float64): (n,m) Array of n mD points. + \*\*kwargs: Additional keyword arguments are passed to the appropriate + class for constructuing the tree. + + Returns: + T (:class:`cykdtree.PyKDTree`): KDTree object. + + Raises: + ValueError: If `pts` is not a 2D array. + + """ + # Check input + if (pts.ndim != 2): + raise ValueError("pts must be a 2D array of ND coordinates") + T = PyKDTree(pts, **kwargs) + return T diff --git a/yt/analysis_modules/halo_finding/fof/__init__.py b/yt/utilities/lib/cykdtree/c_kdtree.cpp similarity index 100% rename from yt/analysis_modules/halo_finding/fof/__init__.py rename to yt/utilities/lib/cykdtree/c_kdtree.cpp diff --git a/yt/utilities/lib/cykdtree/c_kdtree.hpp b/yt/utilities/lib/cykdtree/c_kdtree.hpp new file mode 100644 index 00000000000..e4a9afb7132 --- /dev/null +++ b/yt/utilities/lib/cykdtree/c_kdtree.hpp @@ -0,0 +1,964 @@ +#include +#include +#include +#include +#include +#include +#include +#include "c_utils.hpp" + +#define LEAF_MAX 4294967295 + +template +T deserialize_scalar(std::istream &is) { + T scalar; + is.read((char*)&scalar, sizeof(T)); + return scalar; +} + +template +void serialize_scalar(std::ostream &os, const T &scalar) { + os.write((char*)&scalar, sizeof(scalar)); +} + +template +T* deserialize_pointer_array(std::istream &is, uint64_t len) { + T* arr = (T*)malloc(len*sizeof(T)); + is.read((char*)&arr[0], len*sizeof(T)); + return arr; +} + +template +void serialize_pointer_array(std::ostream &os, const T* array, uint64_t len) { + os.write((char*)array, len*sizeof(T)); +} + +class Node { +public: + bool is_empty; + bool is_leaf; + uint32_t leafid; + uint32_t ndim; + double *left_edge; + double *right_edge; + uint64_t left_idx; + uint64_t children; + bool *periodic_left; + bool *periodic_right; + std::vector > left_neighbors; + std::vector > right_neighbors; + std::vector all_neighbors; + std::vector left_nodes; + // innernode parameters + uint32_t split_dim; + double split; + Node *less; + Node *greater; + // empty node constructor + Node() { + is_empty = true; + is_leaf = false; + leafid = LEAF_MAX; + ndim = 0; + left_edge = NULL; + right_edge = NULL; + periodic_left = NULL; + periodic_right = NULL; + less = NULL; + greater = NULL; + } + // emtpy node with some info + Node(uint32_t ndim0, double *le, double *re, bool *ple, bool *pre) { + is_empty = true; + is_leaf = false; + leafid = 4294967295; + ndim = ndim0; + left_edge = (double*)malloc(ndim*sizeof(double)); + right_edge = (double*)malloc(ndim*sizeof(double)); + periodic_left = (bool*)malloc(ndim*sizeof(bool)); + periodic_right = (bool*)malloc(ndim*sizeof(bool)); + memcpy(left_edge, le, ndim*sizeof(double)); + memcpy(right_edge, re, ndim*sizeof(double)); + memcpy(periodic_left, ple, ndim*sizeof(bool)); + memcpy(periodic_right, pre, ndim*sizeof(bool)); + less = NULL; + greater = NULL; + for (uint32_t i=0; i left_nodes0) { + is_empty = false; + is_leaf = false; + leafid = 4294967295; + ndim = ndim0; + left_idx = Lidx; + + split_dim = sdim0; + split = split0; + less = lnode; + greater = gnode; + children = lnode->children + gnode->children; + + left_edge = (double*)malloc(ndim*sizeof(double)); + right_edge = (double*)malloc(ndim*sizeof(double)); + periodic_left = (bool*)malloc(ndim*sizeof(bool)); + periodic_right = (bool*)malloc(ndim*sizeof(bool)); + memcpy(left_edge, le, ndim*sizeof(double)); + memcpy(right_edge, re, ndim*sizeof(double)); + memcpy(periodic_left, ple, ndim*sizeof(bool)); + memcpy(periodic_right, pre, ndim*sizeof(bool)); + for (uint32_t d = 0; d < ndim; d++) + left_nodes.push_back(left_nodes0[d]); + + left_neighbors = std::vector >(ndim); + right_neighbors = std::vector >(ndim); + } + // leafnode constructor + Node(uint32_t ndim0, double *le, double *re, bool *ple, bool *pre, + uint64_t Lidx, uint64_t n, int leafid0, + std::vector left_nodes0) { + is_empty = false; + is_leaf = true; + leafid = leafid0; + ndim = ndim0; + split = 0.0; + split_dim = 0; + left_idx = Lidx; + less = NULL; + greater = NULL; + + children = n; + + left_edge = (double*)malloc(ndim*sizeof(double)); + right_edge = (double*)malloc(ndim*sizeof(double)); + periodic_left = (bool*)malloc(ndim*sizeof(bool)); + periodic_right = (bool*)malloc(ndim*sizeof(bool)); + memcpy(left_edge, le, ndim*sizeof(double)); + memcpy(right_edge, re, ndim*sizeof(double)); + memcpy(periodic_left, ple, ndim*sizeof(bool)); + memcpy(periodic_right, pre, ndim*sizeof(bool)); + for (uint32_t d = 0; d < ndim; d++) + left_nodes.push_back(left_nodes0[d]); + + left_neighbors = std::vector >(ndim); + right_neighbors = std::vector >(ndim); + + for (uint32_t d = 0; d < ndim; d++) { + if ((left_nodes[d]) && (!(left_nodes[d]->is_empty))) + add_neighbors(left_nodes[d], d); + } + } + Node(std::istream &is) { + // Note that Node instances intialized via this method do not have + // any neighbor information. We will build neighbor information later + // by walking the tree + bool check_bit = deserialize_scalar(is); + if (!check_bit) { + // something has gone terribly wrong so we crash + abort(); + } + is_empty = deserialize_scalar(is); + is_leaf = deserialize_scalar(is); + leafid = deserialize_scalar(is); + ndim = deserialize_scalar(is); + left_edge = deserialize_pointer_array(is, ndim); + right_edge = deserialize_pointer_array(is, ndim); + left_idx = deserialize_scalar(is); + children = deserialize_scalar(is); + periodic_left = deserialize_pointer_array(is, ndim); + periodic_right = deserialize_pointer_array(is, ndim); + split_dim = deserialize_scalar(is); + split = deserialize_scalar(is); + less = NULL; + greater = NULL; + left_neighbors = std::vector >(ndim); + right_neighbors = std::vector >(ndim); + for (uint32_t i=0; i(os, true); + serialize_scalar(os, is_empty); + serialize_scalar(os, is_leaf); + serialize_scalar(os, leafid); + serialize_scalar(os, ndim); + serialize_pointer_array(os, left_edge, ndim); + serialize_pointer_array(os, right_edge, ndim); + serialize_scalar(os, left_idx); + serialize_scalar(os, children); + serialize_pointer_array(os, periodic_left, ndim); + serialize_pointer_array(os, periodic_right, ndim); + serialize_scalar(os, split_dim); + serialize_scalar(os, split); + } + ~Node() { + if (left_edge) + free(left_edge); + if (right_edge) + free(right_edge); + if (periodic_left) + free(periodic_left); + if (periodic_right) + free(periodic_right); + } + friend std::ostream &operator<<(std::ostream &os, const Node &node) { + // this is available for nicely formatted debugging, use serialize + // to save data to disk + os << "is_empty: " << node.is_empty << std::endl; + os << "is_leaf: " << node.is_leaf << std::endl; + os << "leafid: " << node.leafid << std::endl; + os << "ndim: " << node.ndim << std::endl; + os << "left_edge: "; + for (uint32_t i = 0; i < node.ndim; i++) { + os << node.left_edge[i] << " "; + } + os << std::endl; + os << "right_edge: "; + for (uint32_t i = 0; i < node.ndim; i++) { + os << node.right_edge[i] << " "; + } + os << std::endl; + os << "left_idx: " << node.left_idx << std::endl; + os << "children: " << node.children << std::endl; + os << "periodic_left: "; + for (uint32_t i = 0; i < node.ndim; i++) { + os << node.periodic_left[i] << " "; + } + os << std::endl; + os << "periodic_right: "; + for (uint32_t i = 0; i < node.ndim; i++) { + os << node.periodic_right[i] << " "; + } + os << std::endl; + os << "split_dim: " << node.split_dim << std::endl; + os << "split: " << node.split << std::endl; + for (uint32_t i=0; i < node.left_nodes.size(); i++) { + os << node.left_nodes[i] << std::endl; + if (node.left_nodes[i]) { + os << node.left_nodes[i]->left_idx << std::endl; + os << node.left_nodes[i]->children << std::endl; + } + } + + return os; + } + + Node* copy() { + Node *out; + if (is_empty) { + if (left_edge) { + out = new Node(ndim, left_edge, right_edge, + periodic_left, periodic_right); + } else { + out = new Node(); + } + } else if (is_leaf) { + std::vector left_nodes_copy; + for (uint32_t d = 0; d < ndim; d++) + left_nodes_copy.push_back(NULL); + out = new Node(ndim, left_edge, right_edge, + periodic_left, periodic_right, + left_idx, children, leafid, + left_nodes_copy); + } else { + Node *lnode = less->copy(); + Node *gnode = greater->copy(); + std::vector left_nodes_copy; + for (uint32_t d = 0; d < ndim; d++) + left_nodes_copy.push_back(NULL); + out = new Node(ndim, left_edge, right_edge, + periodic_left, periodic_right, + left_idx, split_dim, split, lnode, gnode, + left_nodes_copy); + std::vector::iterator it; + for (uint32_t d = 0; d < ndim; d++) { + for (it = left_neighbors[d].begin(); + it != left_neighbors[d].end(); it++) { + out->left_neighbors[d].push_back(*it); + } + for (it = right_neighbors[d].begin(); + it != right_neighbors[d].end(); it++) { + out->right_neighbors[d].push_back(*it); + } + } + } + + return out; + } + + void update_ids(uint32_t add_to) { + leafid += add_to; + uint32_t i; + for (uint32_t d = 0; d < ndim; d++) { + for (i = 0; i < left_neighbors[d].size(); i++) + left_neighbors[d][i] += add_to; + for (i = 0; i < right_neighbors[d].size(); i++) + right_neighbors[d][i] += add_to; + } + for (i = 0; i < all_neighbors.size(); i++) + all_neighbors[i] += add_to; + } + + void print_neighbors() { + uint32_t i, j; + // Left + printf("left: ["); + for (i = 0; i < ndim; i++) { + printf("["); + for (j = 0; j < left_neighbors[i].size(); j++) + printf("%u ", left_neighbors[i][j]); + printf("] "); + } + printf("]\n"); + // Right + printf("right: ["); + for (i = 0; i < ndim; i++) { + printf("["); + for (j = 0; j < right_neighbors[i].size(); j++) + printf("%u ", right_neighbors[i][j]); + printf("] "); + } + printf("]\n"); + } + + void add_neighbors(Node* curr, uint32_t dim) { + if (curr->is_leaf) { + left_neighbors[dim].push_back(curr->leafid); + curr->right_neighbors[dim].push_back(leafid); + } else { + if (curr->split_dim == dim) { + add_neighbors(curr->greater, dim); + } else { + if (curr->split > this->right_edge[curr->split_dim]) + add_neighbors(curr->less, dim); + else if (curr->split < this->left_edge[curr->split_dim]) + add_neighbors(curr->greater, dim); + else { + add_neighbors(curr->less, dim); + add_neighbors(curr->greater, dim); + } + } + } + } + + void clear_neighbors() { + uint32_t d; + for (d = 0; d < ndim; d++) { + left_neighbors[d].clear(); + right_neighbors[d].clear(); + } + } + + bool is_left_node(Node *lnode, uint32_t ldim) { + uint32_t d; + for (d = 0; d < ndim; d++) { + if (d == ldim) + continue; + if (right_edge[d] < lnode->left_edge[d]) + return false; + if (left_edge[d] > lnode->right_edge[d]) + return false; + } + return true; + } + + void select_unique_neighbors() { + if (!is_leaf) + return; + + uint32_t d; + std::vector::iterator last; + for (d = 0; d < ndim; d++) { + // left + std::sort(left_neighbors[d].begin(), left_neighbors[d].end()); + last = std::unique(left_neighbors[d].begin(), left_neighbors[d].end()); + left_neighbors[d].erase(last, left_neighbors[d].end()); + // right + std::sort(right_neighbors[d].begin(), right_neighbors[d].end()); + last = std::unique(right_neighbors[d].begin(), right_neighbors[d].end()); + right_neighbors[d].erase(last, right_neighbors[d].end()); + } + } + + void join_neighbors() { + if (!is_leaf) + return; + + uint32_t d; + std::vector::iterator last; + // Create concatenated vector and remove duplicates + all_neighbors = left_neighbors[0]; + for (d = 1; d < ndim; d++) + all_neighbors.insert(all_neighbors.end(), left_neighbors[d].begin(), left_neighbors[d].end()); + for (d = 0; d < ndim; d++) + all_neighbors.insert(all_neighbors.end(), right_neighbors[d].begin(), right_neighbors[d].end()); + + // Get unique + std::sort(all_neighbors.begin(), all_neighbors.end()); + last = std::unique(all_neighbors.begin(), all_neighbors.end()); + all_neighbors.erase(last, all_neighbors.end()); + + } + + bool check_overlap(Node other, uint32_t dim) { + if (other.right_edge[dim] < left_edge[dim]) + return false; + else if (other.left_edge[dim] > right_edge[dim]) + return false; + else + return true; + } + +}; + +void write_tree_nodes(std::ostream &os, Node* node) { + if (node) { + // depth first search of tree below node, writing each node to os + // as we go + node->serialize(os); + write_tree_nodes(os, node->less); + write_tree_nodes(os, node->greater); + } + else { + // write null character to indicate empty node + serialize_scalar(os, false); + } +} + +Node* read_tree_nodes(std::istream &is, + std::vector &leaves, + std::vector &left_nodes) { + Node* node = new Node(is); + node->left_nodes = left_nodes; + bool is_leaf = true; + + if (is.peek()) { + // read left subtree + node->less = read_tree_nodes(is, leaves, left_nodes); + is_leaf = false; + } + else { + // no left children + is.get(); + node->less = NULL; + } + + if (is.peek()) { + // read right subtree + std::vector greater_left_nodes = left_nodes; + greater_left_nodes[node->split_dim] = node->less; + node->greater = read_tree_nodes(is, leaves, greater_left_nodes); + is_leaf = false; + } + else { + // no right children + is.get(); + node->greater = NULL; + } + + if (is_leaf) { + leaves.push_back(node); + for (uint32_t d = 0; d < node->ndim; d++) { + if ((node->left_nodes[d]) && (!(node->left_nodes[d]->is_empty))) { + node->add_neighbors(node->left_nodes[d], d); + } + } + } + + return node; +} + +void free_tree_nodes(Node* node) { + if (node) + { + free_tree_nodes(node->less); + free_tree_nodes(node->greater); + delete node; + } +} + +class KDTree +{ +public: + bool is_partial; + bool skip_dealloc_root; + bool use_sliding_midpoint; + uint64_t* all_idx; + uint64_t npts; + uint32_t ndim; + uint64_t left_idx; + int64_t data_version; + bool *periodic_left; + bool *periodic_right; + uint32_t leafsize; + double* domain_left_edge; + double* domain_right_edge; + double* domain_width; + bool* periodic; + bool any_periodic; + double* domain_mins; + double* domain_maxs; + uint32_t num_leaves; + std::vector leaves; + Node* root; + + // KDTree() {} + KDTree(double *pts, uint64_t *idx, uint64_t n, uint32_t m, + uint32_t leafsize0, double *left_edge, double *right_edge, + bool *periodic_left0, bool *periodic_right0, + double *domain_mins0, double *domain_maxs0, int64_t dversion, + bool use_sliding_midpoint0 = false, bool dont_build = false) + { + is_partial = true; + skip_dealloc_root = false; + use_sliding_midpoint = use_sliding_midpoint0; + + all_idx = idx; + npts = n; + ndim = m; + leafsize = leafsize0; + domain_left_edge = (double*)malloc(ndim*sizeof(double)); + domain_right_edge = (double*)malloc(ndim*sizeof(double)); + periodic_left = (bool*)malloc(ndim*sizeof(bool)); + periodic_right = (bool*)malloc(ndim*sizeof(bool)); + data_version = dversion; + periodic = (bool*)malloc(ndim*sizeof(bool)); + domain_mins = NULL; + domain_maxs = NULL; + domain_width = (double*)malloc(ndim*sizeof(double)); + num_leaves = 0; + + memcpy(domain_left_edge, left_edge, ndim*sizeof(double)); + memcpy(domain_right_edge, right_edge, ndim*sizeof(double)); + memcpy(periodic_left, periodic_left0, ndim*sizeof(bool)); + memcpy(periodic_right, periodic_right0, ndim*sizeof(bool)); + + if (domain_mins0) { + domain_mins = (double*)malloc(ndim*sizeof(double)); + memcpy(domain_mins, domain_mins0, ndim*sizeof(double)); + } else if (pts) { + domain_mins = min_pts(pts, n, m); + } + if (domain_maxs0) { + domain_maxs = (double*)malloc(ndim*sizeof(double)); + memcpy(domain_maxs, domain_maxs0, ndim*sizeof(double)); + } else if (pts) { + domain_maxs = max_pts(pts, n, m); + } + + any_periodic = false; + for (uint32_t d = 0; d < ndim; d++) { + if ((periodic_left[d]) && (periodic_right[d])) { + periodic[d] = true; + any_periodic = true; + } else { + periodic[d] = false; + } + } + + for (uint32_t d = 0; d < ndim; d++) + domain_width[d] = domain_right_edge[d] - domain_left_edge[d]; + + if ((pts) && (!(dont_build))) + build_tree(pts); + + } + KDTree(double *pts, uint64_t *idx, uint64_t n, uint32_t m, uint32_t leafsize0, + double *left_edge, double *right_edge, bool *periodic0, int64_t dversion, + bool use_sliding_midpoint0 = false, bool dont_build = false) + { + is_partial = false; + skip_dealloc_root = false; + use_sliding_midpoint = use_sliding_midpoint0; + left_idx = 0; + + all_idx = idx; + npts = n; + ndim = m; + leafsize = leafsize0; + domain_left_edge = (double*)malloc(ndim*sizeof(double)); + domain_right_edge = (double*)malloc(ndim*sizeof(double)); + data_version = dversion; + periodic_left = (bool*)malloc(ndim*sizeof(bool)); + periodic_right = (bool*)malloc(ndim*sizeof(bool)); + periodic = (bool*)malloc(ndim*sizeof(bool)); + domain_mins = NULL; + domain_maxs = NULL; + domain_width = (double*)malloc(ndim*sizeof(double)); + num_leaves = 0; + + memcpy(domain_left_edge, left_edge, ndim*sizeof(double)); + memcpy(domain_right_edge, right_edge, ndim*sizeof(double)); + memcpy(periodic, periodic0, ndim*sizeof(bool)); + + if (pts) { + domain_mins = min_pts(pts, n, m); + domain_maxs = max_pts(pts, n, m); + } + + any_periodic = false; + for (uint32_t d = 0; d < ndim; d++) { + if (periodic[d]) { + periodic_left[d] = true; + periodic_right[d] = true; + any_periodic = true; + } else { + periodic_left[d] = false; + periodic_right[d] = false; + } + } + + for (uint32_t d = 0; d < ndim; d++) + domain_width[d] = domain_right_edge[d] - domain_left_edge[d]; + + if ((pts) && (!(dont_build))) + build_tree(pts); + + } + KDTree(std::istream &is) + { + data_version = deserialize_scalar(is); + is_partial = deserialize_scalar(is); + use_sliding_midpoint = deserialize_scalar(is); + npts = deserialize_scalar(is); + all_idx = deserialize_pointer_array(is, npts); + ndim = deserialize_scalar(is); + left_idx = deserialize_scalar(is); + periodic = deserialize_pointer_array(is, ndim); + periodic_left = deserialize_pointer_array(is, ndim); + periodic_right = deserialize_pointer_array(is, ndim); + any_periodic = deserialize_scalar(is); + leafsize = deserialize_scalar(is); + domain_left_edge = deserialize_pointer_array(is, ndim); + domain_right_edge = deserialize_pointer_array(is, ndim); + domain_width = deserialize_pointer_array(is, ndim); + domain_mins = deserialize_pointer_array(is, ndim); + domain_maxs = deserialize_pointer_array(is, ndim); + num_leaves = deserialize_scalar(is); + std::vector left_nodes; + for (uint32_t i=0; i < ndim; i++) { + left_nodes.push_back(NULL); + } + root = read_tree_nodes(is, leaves, left_nodes); + finalize_neighbors(); + } + void serialize(std::ostream &os) + { + serialize_scalar(os, data_version); + serialize_scalar(os, is_partial); + serialize_scalar(os, use_sliding_midpoint); + serialize_scalar(os, npts); + serialize_pointer_array(os, all_idx, npts); + serialize_scalar(os, ndim); + serialize_scalar(os, left_idx); + serialize_pointer_array(os, periodic, ndim); + serialize_pointer_array(os, periodic_left, ndim); + serialize_pointer_array(os, periodic_right, ndim); + serialize_scalar(os, any_periodic); + serialize_scalar(os, leafsize); + serialize_pointer_array(os, domain_left_edge, ndim); + serialize_pointer_array(os, domain_right_edge, ndim); + serialize_pointer_array(os, domain_width, ndim); + serialize_pointer_array(os, domain_mins, ndim); + serialize_pointer_array(os, domain_maxs, ndim); + serialize_scalar(os, num_leaves); + write_tree_nodes(os, root); + } + ~KDTree() + { + if (!(skip_dealloc_root)) + free_tree_nodes(root); + free(domain_left_edge); + free(domain_right_edge); + free(domain_width); + if (domain_mins) + free(domain_mins); + if (domain_maxs) + free(domain_maxs); + free(periodic); + free(periodic_left); + free(periodic_right); + } + + void consolidate_edges(double *leaves_le, double *leaves_re) { + for (uint32_t k = 0; k < num_leaves; k++) { + memcpy(leaves_le+ndim*leaves[k]->leafid, + leaves[k]->left_edge, + ndim*sizeof(double)); + memcpy(leaves_re+ndim*leaves[k]->leafid, + leaves[k]->right_edge, + ndim*sizeof(double)); + } + } + + void build_tree(double* all_pts) { + uint32_t d; + double *LE = (double*)malloc(ndim*sizeof(double)); + double *RE = (double*)malloc(ndim*sizeof(double)); + bool *PLE = (bool*)malloc(ndim*sizeof(bool)); + bool *PRE = (bool*)malloc(ndim*sizeof(bool)); + double *mins = (double*)malloc(ndim*sizeof(double)); + double *maxs = (double*)malloc(ndim*sizeof(double)); + std::vector left_nodes; + + if (!(domain_mins)) + domain_mins = min_pts(all_pts, npts, ndim); + if (!(domain_maxs)) + domain_maxs = max_pts(all_pts, npts, ndim); + + for (d = 0; d < ndim; d++) { + LE[d] = domain_left_edge[d]; + RE[d] = domain_right_edge[d]; + PLE[d] = periodic_left[d]; + PRE[d] = periodic_right[d]; + mins[d] = domain_mins[d]; + maxs[d] = domain_maxs[d]; + left_nodes.push_back(NULL); + } + + root = build(0, npts, LE, RE, PLE, PRE, all_pts, + mins, maxs, left_nodes); + + free(LE); + free(RE); + free(PLE); + free(PRE); + free(mins); + free(maxs); + + // Finalize neighbors + finalize_neighbors(); + + } + + void finalize_neighbors() { + uint32_t d; + + // Add periodic neighbors + if (any_periodic) + set_neighbors_periodic(); + + // Remove duplicate neighbors + for (d = 0; d < num_leaves; d++) { + leaves[d]->select_unique_neighbors(); + leaves[d]->join_neighbors(); + } + } + + void clear_neighbors() { + std::vector::iterator it; + for (it = leaves.begin(); it != leaves.end(); it++) + (*it)->clear_neighbors(); + } + + void set_neighbors_periodic() + { + uint32_t d0; + Node* leaf; + Node *prev; + uint64_t i, j; + + // Periodic neighbors + for (i = 0; i < num_leaves; i++) { + leaf = leaves[i]; + for (d0 = 0; d0 < ndim; d0++) { + if (!leaf->periodic_left[d0]) + continue; + for (j = i; j < num_leaves; j++) { + prev = leaves[j]; + if (!prev->periodic_right[d0]) + continue; + add_neighbors_periodic(leaf, prev, d0); + } + } + } + } + + void add_neighbors_periodic(Node *leaf, Node *prev, uint32_t d0) { + uint32_t d, ndim_escape; + bool match; + if (!leaf->periodic_left[d0]) + return; + if (!prev->periodic_right[d0]) + return; + match = true; + ndim_escape = 0; + for (d = 0; d < ndim; d++) { + if (d == d0) + continue; + if (leaf->left_edge[d] >= prev->right_edge[d]) { + if (!(leaf->periodic_right[d] && prev->periodic_left[d])) { + match = false; + break; + } else { + ndim_escape++; + } + } + if (leaf->right_edge[d] <= prev->left_edge[d]) { + if (!(prev->periodic_right[d] && leaf->periodic_left[d])) { + match = false; + break; + } else { + ndim_escape++; + } + } + } + if ((match) && (ndim_escape < (ndim-1))) { + // printf("%d: %d, %d (%d)\n", d0, leaf->leafid, prev->leafid, ndim_escape); + leaf->left_neighbors[d0].push_back(prev->leafid); + prev->right_neighbors[d0].push_back(leaf->leafid); + } + } + + Node* build(uint64_t Lidx, uint64_t n, + double *LE, double *RE, + bool *PLE, bool *PRE, + double* all_pts, + double *mins, double *maxes, + std::vector left_nodes) + { + // Create leaf + if (n < leafsize) { + Node* out = new Node(ndim, LE, RE, PLE, PRE, Lidx, n, num_leaves, + left_nodes); + num_leaves++; + leaves.push_back(out); + return out; + } else { + // Split + uint32_t dmax, d; + int64_t split_idx = 0; + double split_val = 0.0; + dmax = split(all_pts, all_idx, Lidx, n, ndim, mins, maxes, + split_idx, split_val, use_sliding_midpoint); + if (maxes[dmax] == mins[dmax]) { + // all points singular + Node* out = new Node(ndim, LE, RE, PLE, PRE, Lidx, n, num_leaves, + left_nodes); + num_leaves++; + leaves.push_back(out); + return out; + } + + // Get new boundaries + uint64_t Nless = split_idx-Lidx+1; + uint64_t Ngreater = n - Nless; + double *lessmaxes = (double*)malloc(ndim*sizeof(double)); + double *lessright = (double*)malloc(ndim*sizeof(double)); + bool *lessPRE = (bool*)malloc(ndim*sizeof(bool)); + double *greatermins = (double*)malloc(ndim*sizeof(double)); + double *greaterleft = (double*)malloc(ndim*sizeof(double)); + bool *greaterPLE = (bool*)malloc(ndim*sizeof(bool)); + std::vector greater_left_nodes; + for (d = 0; d < ndim; d++) { + lessmaxes[d] = maxes[d]; + lessright[d] = RE[d]; + lessPRE[d] = PRE[d]; + greatermins[d] = mins[d]; + greaterleft[d] = LE[d]; + greaterPLE[d] = PLE[d]; + greater_left_nodes.push_back(left_nodes[d]); + } + lessmaxes[dmax] = split_val; + lessright[dmax] = split_val; + lessPRE[dmax] = false; + greatermins[dmax] = split_val; + greaterleft[dmax] = split_val; + greaterPLE[dmax] = false; + + // Build less and greater nodes + Node* less = build(Lidx, Nless, LE, lessright, PLE, lessPRE, + all_pts, mins, lessmaxes, left_nodes); + greater_left_nodes[dmax] = less; + Node* greater = build(Lidx+Nless, Ngreater, greaterleft, RE, + greaterPLE, PRE, all_pts, + greatermins, maxes, greater_left_nodes); + + // Create innernode referencing child nodes + Node* out = new Node(ndim, LE, RE, PLE, PRE, Lidx, dmax, split_val, + less, greater, left_nodes); + + free(lessright); + free(greaterleft); + free(lessPRE); + free(greaterPLE); + free(lessmaxes); + free(greatermins); + return out; + } + } + + double* wrap_pos(double* pos) { + uint32_t d; + double* wrapped_pos = (double*)malloc(ndim*sizeof(double)); + for (d = 0; d < ndim; d++) { + if (periodic[d]) { + if (pos[d] < domain_left_edge[d]) { + wrapped_pos[d] = domain_right_edge[d] - fmod((domain_right_edge[d] - pos[d]),domain_width[d]); + } else { + wrapped_pos[d] = domain_left_edge[d] + fmod((pos[d] - domain_left_edge[d]),domain_width[d]); + } + } else { + wrapped_pos[d] = pos[d]; + } + } + return wrapped_pos; + } + + Node* search(double* pos0, bool dont_wrap = false) + { + uint32_t i; + Node* out = NULL; + bool valid; + // Wrap positions + double* pos; + if ((!dont_wrap) && (any_periodic)) + pos = wrap_pos(pos0); // allocates new array + else + pos = pos0; + // Ensure that pos is in root, early return NULL if it's not + valid = true; + for (i = 0; i < ndim; i++) { + if (pos[i] < root->left_edge[i]) { + valid = false; + break; + } + if (pos[i] >= root->right_edge[i]) { + valid = false; + break; + } + } + // Traverse tree looking for leaf containing pos + if (valid) { + out = root; + while (!(out->is_leaf)) { + if (pos[out->split_dim] < out->split) + out = out->less; + else + out = out->greater; + } + } + + if ((!dont_wrap) && (any_periodic)) + free(pos); + return out; + } + + std::vector get_neighbor_ids(double* pos) + { + Node* leaf; + std::vector neighbors; + leaf = search(pos); + if (leaf) + neighbors = leaf->all_neighbors; + return neighbors; + } + +}; diff --git a/yt/utilities/lib/cykdtree/c_utils.cpp b/yt/utilities/lib/cykdtree/c_utils.cpp new file mode 100644 index 00000000000..8a0323e011b --- /dev/null +++ b/yt/utilities/lib/cykdtree/c_utils.cpp @@ -0,0 +1,257 @@ +#include +#include +#include +#include +#include +#include +#include "c_utils.hpp" +#include + +bool isEqual(double f1, double f2) { + return (fabs(f1 - f2) <= FLT_EPSILON); +} + +double* max_pts(double *pts, uint64_t n, uint32_t m) +{ + double* max = (double*)std::malloc(m*sizeof(double)); + uint32_t d; + for (d = 0; d < m; d++) max[d] = -DBL_MAX; // pts[d]; + for (uint64_t i = 0; i < n; i++) { + for (d = 0; d < m; d++) { + if (pts[m*i + d] > max[d]) + max[d] = pts[m*i + d]; + } + } + return max; +} + +double* min_pts(double *pts, uint64_t n, uint32_t m) +{ + double* min = (double*)std::malloc(m*sizeof(double)); + uint32_t d; + for (d = 0; d < m; d++) min[d] = DBL_MAX; // pts[d]; + for (uint64_t i = 0; i < n; i++) { + for (d = 0; d < m; d++) { + if (pts[m*i + d] < min[d]) + min[d] = pts[m*i + d]; + } + } + return min; +} + +uint64_t argmax_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx) +{ + double max = -DBL_MAX; + uint64_t idx_max = Lidx; + for (uint64_t i = Lidx; i <= Ridx; i++) { + if (pts[m*idx[i] + d] > max) { + max = pts[m*idx[i] + d]; + idx_max = i; + } + } + return idx_max; +} + +uint64_t argmin_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx) +{ + double min = DBL_MAX; + uint64_t idx_min = Lidx; + for (uint64_t i = Lidx; i <= Ridx; i++) { + if (pts[m*idx[i] + d] < min) { + min = pts[m*idx[i] + d]; + idx_min = i; + } + } + return idx_min; +} + +// http://www.comp.dit.ie/rlawlor/Alg_DS/sorting/quickSort.c +void quickSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) +{ + int64_t j; + if( l < r ) + { + // divide and conquer + j = partition(pts, idx, ndim, d, l, r, (l+r)/2); + quickSort(pts, idx, ndim, d, l, j-1); + quickSort(pts, idx, ndim, d, j+1, r); + } +} + +void insertSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) +{ + int64_t i, j; + uint64_t t; + + if (r <= l) return; + for (i = l+1; i <= r; i++) { + t = idx[i]; + j = i - 1; + while ((j >= l) && (pts[ndim*idx[j]+d] > pts[ndim*t+d])) { + idx[j+1] = idx[j]; + j--; + } + idx[j+1] = t; + } +} + +int64_t pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) +{ + if (r < l) { + return -1; + } else if (r == l) { + return l; + } else if ((r - l) < 5) { + insertSort(pts, idx, ndim, d, l, r); + return (l+r)/2; + } + + int64_t i, subr, m5; + uint64_t t; + int64_t nsub = 0; + for (i = l; i <= r; i+=5) { + subr = i + 4; + if (subr > r) subr = r; + + insertSort(pts, idx, ndim, d, i, subr); + m5 = (i+subr)/2; + t = idx[m5]; idx[m5] = idx[l + nsub]; idx[l + nsub] = t; + + nsub++; + } + return pivot(pts, idx, ndim, d, l, l+nsub-1); + // return select(pts, idx, ndim, d, l, l+nsub-1, (nsub/2)+(nsub%2)); +} + +int64_t partition_given_pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, double pivot) { + // If all less than pivot, j will remain r + // If all greater than pivot, j will be l-1 + if (r < l) + return -1; + int64_t i, j, tp = -1; + uint64_t t; + for (i = l, j = r; i <= j; ) { + if ((pts[ndim*idx[i]+d] > pivot) && (pts[ndim*idx[j]+d] <= pivot)) { + t = idx[i]; idx[i] = idx[j]; idx[j] = t; + } + if (isEqual(pts[ndim*idx[i]+d], pivot)) tp = i; + // if (pts[ndim*idx[i]+d] == pivot) tp = i; + if (pts[ndim*idx[i]+d] <= pivot) i++; + if (pts[ndim*idx[j]+d] > pivot) j--; + } + if ((tp >= 0) && (tp != j)) { + t = idx[tp]; idx[tp] = idx[j]; idx[j] = t; + } + + return j; +} + +int64_t partition(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, int64_t p) +{ + double pivot; + int64_t j; + uint64_t t; + if (r < l) + return -1; + pivot = pts[ndim*idx[p]+d]; + t = idx[p]; idx[p] = idx[l]; idx[l] = t; + + j = partition_given_pivot(pts, idx, ndim, d, l+1, r, pivot); + + t = idx[l]; idx[l] = idx[j]; idx[j] = t; + + return j; +} + +// https://en.wikipedia.org/wiki/Median_of_medians +int64_t select(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l0, int64_t r0, int64_t n) +{ + int64_t p; + int64_t l = l0, r = r0; + + while ( 1 ) { + if (l == r) return l; + + p = pivot(pts, idx, ndim, d, l, r); + p = partition(pts, idx, ndim, d, l, r, p); + if (p < 0) + return -1; + else if (n == (p-l0+1)) { + return p; + } else if (n < (p-l0+1)) { + r = p - 1; + } else { + l = p + 1; + } + } +} + +uint32_t split(double *all_pts, uint64_t *all_idx, + uint64_t Lidx, uint64_t n, uint32_t ndim, + double *mins, double *maxes, + int64_t &split_idx, double &split_val, + bool use_sliding_midpoint) { + // Return immediately if variables empty + if ((n == 0) || (ndim == 0)) { + split_idx = -1; + split_val = 0.0; + return 0; + } + + // Find dimension to split along + uint32_t dmax, d; + dmax = 0; + for (d = 1; d < ndim; d++) + if ((maxes[d]-mins[d]) > (maxes[dmax]-mins[dmax])) + dmax = d; + if (maxes[dmax] == mins[dmax]) { + // all points singular + return ndim; + } + + if (use_sliding_midpoint) { + // Split at middle, then slide midpoint as necessary + split_val = (mins[dmax] + maxes[dmax])/2.0; + split_idx = partition_given_pivot(all_pts, all_idx, ndim, dmax, + Lidx, Lidx+n-1, split_val); + if (split_idx == (int64_t)(Lidx-1)) { + uint64_t t; + split_idx = argmin_pts_dim(all_pts, all_idx, ndim, dmax, Lidx, Lidx+n-1); + t = all_idx[split_idx]; all_idx[split_idx] = all_idx[Lidx]; all_idx[Lidx] = t; + split_idx = Lidx; + split_val = all_pts[ndim*all_idx[split_idx] + dmax]; + } else if (split_idx == (int64_t)(Lidx+n-1)) { + uint64_t t; + split_idx = argmax_pts_dim(all_pts, all_idx, ndim, dmax, Lidx, Lidx+n-1); + t = all_idx[split_idx]; all_idx[split_idx] = all_idx[Lidx+n-1]; all_idx[Lidx+n-1] = t; + split_idx = Lidx+n-2; + split_val = all_pts[ndim*all_idx[split_idx] + dmax]; + } + } else { + // Find median along dimension + int64_t nsel = (n/2) + (n%2); + split_idx = select(all_pts, all_idx, ndim, dmax, Lidx, Lidx+n-1, nsel); + split_val = all_pts[ndim*all_idx[split_idx] + dmax]; + } + + return dmax; +} + + + diff --git a/yt/utilities/lib/cykdtree/c_utils.hpp b/yt/utilities/lib/cykdtree/c_utils.hpp new file mode 100644 index 00000000000..2c6360f8e06 --- /dev/null +++ b/yt/utilities/lib/cykdtree/c_utils.hpp @@ -0,0 +1,41 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +bool isEqual(double f1, double f2); +double* max_pts(double *pts, uint64_t n, uint32_t m); +double* min_pts(double *pts, uint64_t n, uint32_t m); +uint64_t argmin_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx); +uint64_t argmax_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx); +void quickSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r); +void insertSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r); +int64_t pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r); +int64_t partition_given_pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, double pivot); +int64_t partition(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, int64_t p); +int64_t select(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, int64_t n); +uint32_t split(double *all_pts, uint64_t *all_idx, + uint64_t Lidx, uint64_t n, uint32_t ndim, + double *mins, double *maxes, + int64_t &split_idx, double &split_val, + bool use_sliding_midpoint = false); diff --git a/yt/utilities/lib/cykdtree/kdtree.pxd b/yt/utilities/lib/cykdtree/kdtree.pxd new file mode 100644 index 00000000000..f8a9415e5c3 --- /dev/null +++ b/yt/utilities/lib/cykdtree/kdtree.pxd @@ -0,0 +1,117 @@ +cimport numpy as np +from libcpp.vector cimport vector +from libcpp.pair cimport pair +from libcpp cimport bool +from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t + +cdef extern from "" namespace "std": + cdef cppclass istream: + pass + + cdef cppclass ostream: + pass + +# the following extern definitions adapted from +# http://stackoverflow.com/a/31009461/1382869 + +# obviously std::ios_base isn't a namespace, but this lets +# Cython generate the correct C++ code +cdef extern from "" namespace "std::ios_base": + cdef cppclass open_mode: + pass + cdef open_mode binary + # you can define other constants as needed + +cdef extern from "" namespace "std": + cdef cppclass ofstream(ostream): + # constructors + ofstream(const char*) except + + ofstream(const char*, open_mode) except+ + + cdef cppclass ifstream(istream): + # constructors + ifstream(const char*) except + + ifstream(const char*, open_mode) except+ + + +cdef extern from "c_kdtree.hpp": + cdef cppclass Node: + bool is_leaf + uint32_t leafid + uint32_t ndim + double *left_edge + double *right_edge + uint64_t left_idx + uint64_t children + uint32_t split_dim + double split + Node* less + Node* greater + bool *periodic_left + bool *periodic_right + vector[vector[uint32_t]] left_neighbors + vector[vector[uint32_t]] right_neighbors + vector[uint32_t] all_neighbors + cdef cppclass KDTree: + uint64_t* all_idx + uint64_t npts + uint32_t ndim + int64_t data_version + uint32_t leafsize + double* domain_left_edge + double* domain_right_edge + double* domain_width + double* domain_mins + double* domain_maxs + bool* periodic + bool any_periodic + uint32_t num_leaves + vector[Node*] leaves + Node* root + KDTree(double *pts, uint64_t *idx, uint64_t n, uint32_t m, + uint32_t leafsize0, double *left_edge, double *right_edge, + bool *periodic, int64_t data_version) + KDTree(double *pts, uint64_t *idx, uint64_t n, uint32_t m, + uint32_t leafsize0, double *left_edge, double *right_edge, + bool *periodic, int64_t data_version, + bool use_sliding_midpoint) + KDTree(istream &ist) + void serialize(ostream &os) + double* wrap_pos(double* pos) nogil + vector[uint32_t] get_neighbor_ids(double* pos) nogil + Node* search(double* pos) nogil + Node* search(double* pos, bool dont_wrap) nogil + void consolidate_edges(double *leaves_le, double *leaves_re) + + +cdef class PyNode: + cdef Node *_node + cdef readonly np.uint32_t id + cdef readonly np.uint64_t npts + cdef readonly np.uint32_t ndim + cdef readonly np.uint32_t num_leaves + cdef readonly np.uint64_t start_idx + cdef readonly np.uint64_t stop_idx + cdef double *_domain_width + cdef readonly object left_neighbors, right_neighbors + cdef void _init_node(self, Node* node, uint32_t num_leaves, + double *domain_width) + +cdef class PyKDTree: + cdef KDTree *_tree + cdef readonly uint64_t npts + cdef readonly uint32_t ndim + cdef readonly uint32_t num_leaves + cdef readonly uint32_t leafsize + cdef readonly int64_t data_version + cdef double *_left_edge + cdef double *_right_edge + cdef bool *_periodic + cdef readonly object leaves + cdef readonly object _idx + cdef void _init_tree(self, KDTree* tree) + cdef void _make_tree(self, double *pts, bool use_sliding_midpoint) + cdef void _make_leaves(self) + cdef np.ndarray[np.uint32_t, ndim=1] _get_neighbor_ids(self, np.ndarray[double, ndim=1] pos) + cdef np.ndarray[np.uint32_t, ndim=1] _get_neighbor_ids_3(self, np.float64_t pos[3]) + cdef PyNode _get(self, np.ndarray[double, ndim=1] pos) diff --git a/yt/utilities/lib/cykdtree/kdtree.pyx b/yt/utilities/lib/cykdtree/kdtree.pyx new file mode 100644 index 00000000000..928cca6d95c --- /dev/null +++ b/yt/utilities/lib/cykdtree/kdtree.pyx @@ -0,0 +1,480 @@ +import cython +import numpy as np +cimport numpy as np + +from libc.stdlib cimport malloc, free +from libcpp cimport bool as cbool +from cpython cimport bool as pybool +from cython.operator cimport dereference + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +cdef class PyNode: + r"""A container for leaf info. + + Attributes: + npts (np.uint64_t): Number of points in this node. + ndim (np.uint32_t): Number of dimensions in domain. + num_leaves (np.uint32_t): Number of leaves in the tree containing this + node. + start_idx (np.uint64_t): Index where indices for this node begin. + stop_idx (np.uint64_t): One passed the end of indices for this node. + left_edge (np.ndarray of float64): Minimum bounds of this node in each + dimension. + right_edge (np.ndarray of float64): Maximum bounds of this node in each + dimension. + periodic_left (np.ndarray of bool): Periodicity of minimum bounds. + periodic_right (np.ndarray of bool): Periodicity of maximum bounds. + domain_width (np.ndarray of float64): Width of the total domain in each + dimension. + left_neighbors (list of lists): Indices of neighbor leaves at the + minimum bounds in each dimension. + right_neighbors (list of lists): Indices of neighbor leaves at the + maximum bounds in each dimension. + + """ + + cdef void _init_node(self, Node* node, uint32_t num_leaves, + double *domain_width): + cdef np.uint32_t i, j + self._node = node + self.id = node.leafid + self.npts = node.children + self.ndim = node.ndim + self.num_leaves = num_leaves + self.start_idx = node.left_idx + self.stop_idx = (node.left_idx + node.children) + self._domain_width = domain_width + self.left_neighbors = [None for i in range(self.ndim)] + self.right_neighbors = [None for i in range(self.ndim)] + for i in range(self.ndim): + self.left_neighbors[i] = [node.left_neighbors[i][j] for j in + range(node.left_neighbors[i].size())] + self.right_neighbors[i] = [node.right_neighbors[i][j] for j in + range(node.right_neighbors[i].size())] + + def __cinit__(self): + # Initialize everthing to NULL/0/None to prevent seg fault + self._node = NULL + self.id = 0 + self.npts = 0 + self.ndim = 0 + self.num_leaves = 0 + self.start_idx = 0 + self.stop_idx = 0 + self._domain_width = NULL + self.left_neighbors = None + self.right_neighbors = None + + def __init__(self): + pass + + def __repr__(self): + nchars = 1 + len(str(self.__class__.__name__)) + return ('%s(id=%i, npts=%i, start_idx=%i, stop_idx=%i,\n' + + ' ' * nchars + 'left_edge=%s,\n' + + ' ' * nchars + 'right_edge=%s)') % ( + self.__class__.__name__, + self.id, + self.npts, + self.start_idx, + self.stop_idx, + self.left_edge, + self.right_edge, + ) + + @property + def periodic_left(self): + cdef cbool[:] view = self._node.periodic_left + return np.asarray(view) + @property + def periodic_right(self): + cdef cbool[:] view = self._node.periodic_right + return np.asarray(view) + @property + def left_edge(self): + cdef np.float64_t[:] view = self._node.left_edge + return np.asarray(view) + @property + def right_edge(self): + cdef np.float64_t[:] view = self._node.right_edge + return np.asarray(view) + @property + def domain_width(self): + cdef np.float64_t[:] view = self._domain_width + return np.asarray(view) + + @property + def slice(self): + """slice: Slice of kdtree indices contained by this node.""" + return slice(self.start_idx, self.stop_idx) + + @property + def neighbors(self): + """list of int: Indices of all neighboring leaves including this + leaf.""" + cdef np.uint32_t i + cdef object out + cdef vector[uint32_t] vout = self._node.all_neighbors + out = [vout[i] for i in range(vout.size())] + return out + + def assert_equal(self, PyNode solf): + """Assert that node properties are equal.""" + np.testing.assert_equal(self.npts, solf.npts) + np.testing.assert_equal(self.ndim, solf.ndim) + np.testing.assert_equal(self.num_leaves, solf.num_leaves) + np.testing.assert_equal(self.id, solf.id) + np.testing.assert_equal(self.start_idx, solf.start_idx) + np.testing.assert_equal(self.stop_idx, solf.stop_idx) + np.testing.assert_array_equal(self.left_edge, solf.left_edge) + np.testing.assert_array_equal(self.right_edge, solf.right_edge) + np.testing.assert_array_equal(self.periodic_left, solf.periodic_left) + np.testing.assert_array_equal(self.periodic_right, solf.periodic_right) + for i in range(self.ndim): + np.testing.assert_equal(self.left_neighbors[i], solf.left_neighbors[i]) + np.testing.assert_equal(self.right_neighbors[i], solf.right_neighbors[i]) + np.testing.assert_equal(self.neighbors, solf.neighbors) + + +cdef class PyKDTree: + r"""Construct a KDTree for a set of points. + + Args: + pts (np.ndarray of double): (n,m) array of n coordinates in a + m-dimensional domain. + left_edge (np.ndarray of double): (m,) domain minimum in each dimension. + right_edge (np.ndarray of double): (m,) domain maximum in each dimension. + periodic (bool or np.ndarray of bool, optional): Truth of the domain + periodicity overall (if bool), or in each dimension (if np.ndarray). + Defaults to `False`. + leafsize (int, optional): The maximum number of points that should be in + a leaf. Defaults to 10000. + nleaves (int, optional): The number of leaves that should be in the + resulting tree. If greater than 0, leafsize is adjusted to produce a + tree with 2**(ceil(log2(nleaves))) leaves. The leafsize keyword + argument is ignored if nleaves is greater zero. Defaults to 0. + data_version (int, optional): An optional user-provided integer that + can be used to uniquely identify the data used to generate the + KDTree. This is useful if you save the kdtree to disk and restore + it later and need to verify that the underlying data is the same. + use_sliding_midpoint (bool, optional): If True, the sliding midpoint + rule is used to perform splits. Otherwise, the median is used. + Defaults to False. + + Raises: + ValueError: If `leafsize < 2`. This currectly segfaults. + + Attributes: + npts (uint64): Number of points in the tree. + ndim (uint32): Number of dimensions points occupy. + data_version (int64): User-provided version number (defaults to 0) + num_leaves (uint32): Number of leaves in the tree. + leafsize (uint32): Maximum number of points a leaf can have. + leaves (list of `cykdtree.PyNode`): Tree leaves. + idx (np.ndarray of uint64): Indices sorting the points by leaf. + left_edge (np.ndarray of double): (m,) domain minimum in each dimension. + right_edge (np.ndarray of double): (m,) domain maximum in each dimension. + domain_width (np.ndarray of double): (m,) domain width in each dimension. + periodic (np.ndarray of bool): Truth of domain periodicity in each + dimension. + + """ + + cdef void _init_tree(self, KDTree* tree): + self._tree = tree + self.ndim = tree.ndim + self.data_version = tree.data_version + self.npts = tree.npts + self.num_leaves = tree.num_leaves + self.leafsize = tree.leafsize + self._make_leaves() + self._idx = np.empty(self.npts, 'uint64') + cdef uint64_t i + for i in range(self.npts): + self._idx[i] = tree.all_idx[i] + + def __cinit__(self): + # Initialize everthing to NULL/0/None to prevent seg fault + self._tree = NULL + self.npts = 0 + self.ndim = 0 + self.num_leaves = 0 + self.leafsize = 0 + self._left_edge = NULL + self._right_edge = NULL + self._periodic = NULL + self.leaves = None + self._idx = None + + def __init__(self, np.ndarray[double, ndim=2] pts = None, + left_edge = None, + right_edge = None, + periodic = False, + int leafsize = 10000, + int nleaves = 0, + data_version = None, + use_sliding_midpoint = False): + # Return with nothing set if points not provided + if pts is None: + return + # Set leafsize of number of leaves provided + if nleaves > 0: + nleaves = (2**np.ceil(np.log2(nleaves))) + leafsize = pts.shape[0]/nleaves + 1 + if (leafsize < 2): + # This is here to prevent segfault. The cpp code needs modified to + # support leafsize = 1 + raise ValueError("'leafsize' cannot be smaller than 2.") + if left_edge is None: + left_edge = np.min(pts, axis=0) + else: + left_edge = np.array(left_edge) + if right_edge is None: + right_edge = np.max(pts, axis=0) + else: + right_edge = np.array(right_edge) + if data_version is None: + data_version = 0 + self.data_version = data_version + cdef uint32_t k,i,j + self.npts = pts.shape[0] + self.ndim = pts.shape[1] + assert(left_edge.size == self.ndim) + assert(right_edge.size == self.ndim) + self.leafsize = leafsize + self._left_edge = malloc(self.ndim*sizeof(double)) + self._right_edge = malloc(self.ndim*sizeof(double)) + self._periodic = malloc(self.ndim*sizeof(cbool)); + for i in range(self.ndim): + self._left_edge[i] = left_edge[i] + self._right_edge[i] = right_edge[i] + if isinstance(periodic, pybool): + for i in range(self.ndim): + self._periodic[i] = periodic + else: + for i in range(self.ndim): + self._periodic[i] = periodic[i] + # Create tree and leaves + self._make_tree(&pts[0,0], use_sliding_midpoint) + self._make_leaves() + + def __dealloc__(self): + if self._tree != NULL: + del self._tree + if self._left_edge != NULL: + free(self._left_edge) + if self._right_edge != NULL: + free(self._right_edge) + if self._periodic != NULL: + free(self._periodic) + + def assert_equal(self, PyKDTree solf, pybool strict_idx = True): + r"""Compare this tree to another tree. + + Args: + solf (PyKDTree): Another KDTree to compare with this one. + strict_idx (bool, optional): If True, the index vectors are + compared for equality element by element. If False, + corresponding leaves must contain the same indices, but they + can be in any order. Defaults to True. + + Raises: + AssertionError: If there are missmatches between any of the two + trees' parameters. + + """ + np.testing.assert_equal(self.npts, solf.npts) + np.testing.assert_equal(self.ndim, solf.ndim) + np.testing.assert_equal(self.data_version, solf.data_version) + np.testing.assert_equal(self.leafsize, solf.leafsize) + np.testing.assert_equal(self.num_leaves, solf.num_leaves) + np.testing.assert_array_equal(self.left_edge, solf.left_edge) + np.testing.assert_array_equal(self.right_edge, solf.right_edge) + np.testing.assert_array_equal(self.periodic, solf.periodic) + # Compare index at the leaf level since we only care that the leaves + # contain the same points + if strict_idx: + np.testing.assert_array_equal(self._idx, solf._idx) + for i in range(self.num_leaves): + self.leaves[i].assert_equal(solf.leaves[i]) + if not strict_idx: + np.testing.assert_array_equal( + np.sort(self._idx[self.leaves[i].slice]), + np.sort(solf._idx[solf.leaves[i].slice])) + + cdef void _make_tree(self, double *pts, bool use_sliding_midpoint): + r"""Carry out creation of KDTree at C++ level.""" + cdef uint64_t[:] idx = np.arange(self.npts).astype('uint64') + self._tree = new KDTree(pts, &idx[0], self.npts, self.ndim, self.leafsize, + self._left_edge, self._right_edge, self._periodic, + self.data_version, use_sliding_midpoint) + self._idx = idx + + cdef void _make_leaves(self): + r"""Create a list of Python leaf objects from C++ leaves.""" + self.num_leaves = self._tree.leaves.size() + self.leaves = [None for _ in xrange(self.num_leaves)] + cdef Node* leafnode + cdef PyNode leafnode_py + cdef object leaf_neighbors = None + for k in xrange(self.num_leaves): + leafnode = self._tree.leaves[k] + leafnode_py = PyNode() + leafnode_py._init_node(leafnode, self.num_leaves, + self._tree.domain_width) + self.leaves[leafnode.leafid] = leafnode_py + + @property + def left_edge(self): + cdef np.float64_t[:] view = self._tree.domain_left_edge + return np.asarray(view) + @property + def right_edge(self): + cdef np.float64_t[:] view = self._tree.domain_right_edge + return np.asarray(view) + @property + def domain_width(self): + cdef np.float64_t[:] view = self._tree.domain_width + return np.asarray(view) + @property + def periodic(self): + cdef cbool[:] view = self._tree.periodic + # return np.asarray(view) + cdef object out = np.empty(self.ndim, 'bool') + cdef np.uint32_t i + for i in range(self.ndim): + out[i] = view[i] + return out + + def leaf_idx(self, np.uint32_t leafid): + r"""Get array of indices for points in a leaf. + + Args: + leafid (np.uint32_t): Unique index of the leaf in question. + + Returns: + np.ndarray of np.uint64_t: Indices of points belonging to leaf. + + """ + cdef np.ndarray[np.uint64_t] out = self._idx[self.leaves[leafid].slice] + return out + + cdef np.ndarray[np.uint32_t, ndim=1] _get_neighbor_ids(self, np.ndarray[double, ndim=1] pos): + cdef np.uint32_t i + cdef vector[uint32_t] vout = self._tree.get_neighbor_ids(&pos[0]); + cdef np.ndarray[np.uint32_t, ndim=1] out = np.empty(vout.size(), 'uint32') + for i in xrange(vout.size()): + out[i] = vout[i] + return out + + @property + def idx(self): + return np.asarray(self._idx) + + def get_neighbor_ids(self, np.ndarray[double, ndim=1] pos): + r"""Return the IDs of leaves containing & neighboring a given position. + + Args: + pos (np.ndarray of double): Coordinates. + + Returns: + np.ndarray of uint32: Leaves containing/neighboring `pos`. + + Raises: + ValueError: If pos is not contained withing the KDTree. + + """ + return self._get_neighbor_ids(pos) + + cdef np.ndarray[np.uint32_t, ndim=1] _get_neighbor_ids_3(self, np.float64_t pos[3]): + cdef np.uint32_t i + cdef vector[uint32_t] vout = self._tree.get_neighbor_ids(&pos[0]); + cdef np.ndarray[np.uint32_t, ndim=1] out = np.empty(vout.size(), 'uint32') + for i in xrange(vout.size()): + out[i] = vout[i] + return out + + cdef PyNode _get(self, np.ndarray[double, ndim=1] pos): + assert(len(pos) == self.ndim) + cdef Node* leafnode = self._tree.search(&pos[0]) + if leafnode == NULL: + raise ValueError("Position is not within the kdtree root node.") + cdef PyNode out = self.leaves[leafnode.leafid] + return out + + def get(self, np.ndarray[double, ndim=1] pos): + r"""Return the leaf containing a given position. + + Args: + pos (np.ndarray of double): Coordinates. + + Returns: + :class:`cykdtree.PyNode`: Leaf containing `pos`. + + Raises: + ValueError: If pos is not contained withing the KDTree. + + """ + return self._get(pos) + + def consolidate_edges(self): + r"""Return arrays of the left and right edges for all leaves in the + tree on each process. + + Returns: + tuple(np.ndarray of double, np.ndarray of double): The left (first + array) and right (second array) edges of each leaf (1st array + dimension), in each dimension (2nd array dimension). + + """ + cdef np.ndarray[np.float64_t, ndim=2] leaves_le + cdef np.ndarray[np.float64_t, ndim=2] leaves_re + leaves_le = np.empty((self.num_leaves, self.ndim), 'float64') + leaves_re = np.empty((self.num_leaves, self.ndim), 'float64') + self._tree.consolidate_edges(&leaves_le[0,0], &leaves_re[0,0]) + return (leaves_le, leaves_re) + + def save(self, str filename): + r"""Saves the PyKDTree to disk as raw binary data. + + Note that this file may not necessarily be portable. + + Args: + filename (string): Name of the file to serialize the kdtree to + + """ + cdef KDTree* my_tree = self._tree + cdef ofstream* outputter = new ofstream(filename.encode('utf8'), binary) + try: + my_tree.serialize(dereference(outputter)) + finally: + del outputter + + @classmethod + def from_file(cls, str filename, data_version=None): + r"""Create a PyKDTree from a binary file created by ``PyKDTree.save()`` + + Note that loading a file created on another machine may create + a corrupted PyKDTree instance. + + Args: + filename (string): Name of the file to load the kdtree from + data_version (int): A unique integer corresponding to the data + being loaded. If the loaded data_version does + not match the data_version supplied here then + an OSError is raised. Optional. + + Returns: + :class:`cykdtree.PyKDTree`: A KDTree restored from the file + + """ + cdef ifstream* inputter = new ifstream(filename.encode(), binary) + cdef PyKDTree ret = cls() + if data_version is None: + data_version = 0 + try: + ret._init_tree(new KDTree(dereference(inputter))) + finally: + del inputter + return ret diff --git a/yt/utilities/lib/cykdtree/plot.py b/yt/utilities/lib/cykdtree/plot.py new file mode 100644 index 00000000000..069b8b31b14 --- /dev/null +++ b/yt/utilities/lib/cykdtree/plot.py @@ -0,0 +1,132 @@ +import numpy as np + +def _plot2D_root(seg, pts=None, txt=None, plotfile=None, point_kw={}, box_kw={}, + axs=None, subplot_kw={}, gridspec_kw={}, fig_kw={}, + save_kw={}, title=None, xlabel='x', ylabel='y', label_kw={}): + r"""Plot a 2D kd-tree. + + Args: + seg (list of np.ndarray): Line segments to plot defining box edges. + pts (np.ndarray, optional): Points contained by the kdtree. Defaults to + None if not provided and points are not plotted. + txt (list of tuples, optional): Each tuple contains the (x, y, string) + information for text labels to be added to the boxes. Defaults to + None and text is not added. + plotfile (:obj:`str`, optional): Full path to file where the plot + should be saved. If None, the plot is displayed. Defaults to None + point_kw (:obj:`dict`, optional): Keywords passed directly to + :func:`matplotlib.pyplot.scatter` for drawing the points. Defaults + to empty dict. + box_kw (:obj:`dict`, optional): Keywords passed directly to + :class:`matplotlib.collections.LineCollection` for drawing the + leaf boxes. Defaults to empty dict. + + axs (:obj:`matplotlib.pyplot.Axes`, optional): Axes that should be used + for plotting. Defaults to None and new axes are created. + subplot_kw (:obj:`dict`, optional): Keywords passed directly to + :meth:`matplotlib.figure.Figure.add_subplot`. Defaults to {}. + gridspec_kw (:obj:`dict`, optional): Keywords passed directly to + :class:`matplotlib.gridspec.GridSpec`. Defaults to empty dict. + fig_kw (:obj:`dict`, optional): Keywords passed directly to + :func:`matplotlib.pyplot.figure`. Defaults to empty dict. + save_kw (:obj:`dict`, optional): Keywords passed directly to + :func:`matplotlib.pyplot.savefig`. Defaults to empty dict. + + title (:obj:`str`, optional): Title that the plot should be given. + Defaults to None and no title is displayed. + xlabel (:obj:`str`, optional): Label for the x-axis. Defaults to 'x'. + ylabel (:obj:`str`, optional): Label for the y-axis. Defaults to 'y'. + label_kw (:obj:`dict`, optional): Keywords passed directly to + :class:`matplotlib.text.Text` when creating box labels. Defaults + to empty dict. + + Returns: + :obj:`matplotlib.pyplot.Axes`: Axes containing the plot. + + """ + import matplotlib.pyplot as plt + from matplotlib.collections import LineCollection + + # Axes creation + if axs is None: + plt.close('all') + fig, axs = plt.subplots(subplot_kw=subplot_kw, gridspec_kw=gridspec_kw, + **fig_kw) + + # Labels + if title is not None: + axs.set_title(title) + axs.set_xlabel(xlabel, **label_kw) + axs.set_ylabel(ylabel, **label_kw) + + # Plot points + if isinstance(pts, list): + for p in pts: + if p is not None: + axs.scatter(p[:, 0], p[:, 1], **point_kw) + elif pts is not None: + axs.scatter(pts[:, 0], pts[:, 1], **point_kw) + + # Plot boxes + lc = LineCollection(seg, **box_kw) + axs.add_collection(lc) + + # Labels + if txt is not None: + # label_kw.setdefault('axes', axs) + label_kw.setdefault('verticalalignment', 'bottom') + label_kw.setdefault('horizontalalignment', 'left') + for t in txt: + plt.text(*t, **label_kw) + + axs.autoscale() + axs.margins(0.1) + + # Save + if plotfile is not None: + plt.savefig(plotfile, **save_kw) + else: + plt.show() + + # Return axes + return axs + + +def plot2D_serial(tree, pts=None, label_boxes=False, **kwargs): + r"""Plot a 2D kd-tree constructed in serial. + + Args: + tree (:class:`cykdtree.kdtree.PyKDTree`): kd-tree class. + pts (np.ndarray, optional): Points contained by the kdtree. Defaults to + None if not provided and points are not plotted. + label_boxes (bool, optional): If True, leaves in the tree are labeled + with their index. Defaults to False. + Additional keywords are passed to :func:`cykdtree.plot._plot2D_root`. + + Returns: + :obj:`matplotlib.pyplot.Axes`: Axes containing the plot. + + """ + # Box edges + seg = [] + for leaf in tree.leaves: + le = leaf.left_edge + re = leaf.right_edge + # Top + seg.append(np.array([[le[0], re[1]], [re[0], re[1]]], 'float')) + # Bottom + seg.append(np.array([[le[0], le[1]], [re[0], le[1]]], 'float')) + # Left + seg.append(np.array([[le[0], le[1]], [le[0], re[1]]], 'float')) + # Right + seg.append(np.array([[re[0], le[1]], [re[0], re[1]]], 'float')) + + # Labels + txt = None + if label_boxes: + txt = [] + for leaf in tree.leaves: + txt.append((leaf.left_edge[0], leaf.left_edge[1], '%d' % leaf.id)) + + # Return axes + return _plot2D_root(seg, pts=pts, txt=txt, **kwargs) diff --git a/yt/utilities/lib/cykdtree/tests/__init__.py b/yt/utilities/lib/cykdtree/tests/__init__.py new file mode 100644 index 00000000000..450a1912a45 --- /dev/null +++ b/yt/utilities/lib/cykdtree/tests/__init__.py @@ -0,0 +1,250 @@ +from datetime import datetime +import cProfile +import pstats +import time +from subprocess import Popen, PIPE +from nose.tools import nottest +import numpy as np +import itertools +import sys + + +def assert_less_equal(x, y): + size_match = True + try: + xshape = (1,) + yshape = (1,) + if (isinstance(x, np.ndarray) or isinstance(y, np.ndarray)): + if isinstance(x, np.ndarray): + xshape = x.shape + if isinstance(y, np.ndarray): + yshape = y.shape + size_match = (xshape == yshape) + assert((x <= y).all()) + else: + assert(x <= y) + except: + if not size_match: + raise AssertionError("Shape mismatch\n\n"+ + "x.shape: %s\ny.shape: %s\n" % + (str(x.shape), str(y.shape))) + raise AssertionError("Variables are not less-equal ordered\n\n" + + "x: %s\ny: %s\n" % (str(x), str(y))) + + +def call_subprocess(np, func, args, kwargs): + # Create string with arguments & kwargs + args_str = "" + for a in args: + args_str += str(a)+"," + for k, v in kwargs.items(): + args_str += k+"="+str(v)+"," + if args_str.endswith(","): + args_str = args_str[:-1] + cmd = ["mpirun", "-n", str(np), sys.executable, "-c", + "'from %s import %s; %s(%s)'" % (func.__module__, func.__name__, + func.__name__, args_str)] + cmd = ' '.join(cmd) + print('Running the following command:\n%s' % cmd) + p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True) + output, err = p.communicate() + exit_code = p.returncode + print(output.decode('utf-8')) + if exit_code != 0: + print(err.decode('utf-8')) + raise Exception("Error on spawned process. See output.") + return None + return output.decode('utf-8') + + +def iter_dict(dicts): + try: + return (dict(itertools.izip(dicts, x)) for x in + itertools.product(*dicts.itervalues())) + except AttributeError: + # python 3 + return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values())) + + +def parametrize(**pargs): + for k in pargs.keys(): + if not isinstance(pargs[k], (tuple, list)): + pargs[k] = (pargs[k],) + + def dec(func): + + def pfunc(kwargs0): + # Wrapper so that name encodes parameters + def wrapped(*args, **kwargs): + kwargs.update(**kwargs0) + return func(*args, **kwargs) + wrapped.__name__ = func.__name__ + for k,v in kwargs0.items(): + wrapped.__name__ += "_{}{}".format(k,v) + return wrapped + + def func_param(*args, **kwargs): + out = [] + for ipargs in iter_dict(pargs): + out.append(pfunc(ipargs)(*args, **kwargs)) + return out + + func_param.__name__ = func.__name__ + + return func_param + + return dec + + +np.random.seed(100) +pts2 = np.random.rand(100, 2).astype('float64') +pts3 = np.random.rand(100, 3).astype('float64') +rand_state = np.random.get_state() +left_neighbors_x = [[], # None + [0], + [1], + [2], + [], # None + [], # None + [4, 5], + [5]] +left_neighbors_y = [[], # None + [], # None + [], # None + [], # None + [0, 1], + [4], + [1, 2, 3], + [6]] +left_neighbors_x_periodic = [[3], + [0], + [1], + [2], + [6], + [6, 7], + [4, 5], + [5]] +left_neighbors_y_periodic = [[5], + [5, 7], + [7], + [7], + [0, 1], + [4], + [1, 2, 3], + [6]] + + +@nottest +def make_points_neighbors(periodic=False): + ndim = 2 + npts = 50 + leafsize = 10 + np.random.set_state(rand_state) + pts = np.random.rand(npts, ndim).astype('float64') + left_edge = np.zeros(ndim, 'float64') + right_edge = np.ones(ndim, 'float64') + if periodic: + lx = left_neighbors_x_periodic + ly = left_neighbors_y_periodic + else: + lx = left_neighbors_x + ly = left_neighbors_y + num_leaves = len(lx) + ln = [lx, ly] + rn = [[[] for i in range(num_leaves)] for _ + in range(ndim)] + for d in range(ndim): + for i in range(num_leaves): + for j in ln[d][i]: + rn[d][j].append(i) + for i in range(num_leaves): + rn[d][i] = list(set(rn[d][i])) + return pts, left_edge, right_edge, leafsize, ln, rn + + +@nottest +def make_points(npts, ndim, leafsize=10, distrib='rand', seed=100): + ndim = int(ndim) + npts = int(npts) + leafsize = int(leafsize) + np.random.seed(seed) + LE = 0.0 + RE = 1.0 + left_edge = LE*np.ones(ndim, 'float64') + right_edge = RE*np.ones(ndim, 'float64') + if npts <= 0: + npts = 100 + leafsize = 10 + if ndim == 2: + pts = pts2 + elif ndim == 3: + pts = pts3 + else: + pts = np.random.rand(npts, ndim).astype('float64') + else: + if distrib == 'rand': + pts = np.random.rand(npts, ndim).astype('float64') + elif distrib == 'uniform': + pts = np.random.uniform(low=LE, high=RE, size=(npts, ndim)) + elif distrib in ('gaussian', 'normal'): + pts = np.random.normal(loc=(LE+RE)/2.0, scale=(RE-LE)/4.0, + size=(npts, ndim)) + np.clip(pts, LE, RE) + else: + raise ValueError("Invalid 'distrib': {}".format(distrib)) + return pts, left_edge, right_edge, leafsize + + +@nottest +def run_test(npts, ndim, nproc=0, distrib='rand', periodic=False, leafsize=10, + profile=False, suppress_final_output=False, **kwargs): + r"""Run a rountine with a designated number of points & dimensions on a + selected number of processors. + + Args: + npart (int): Number of particles. + nproc (int): Number of processors. + ndim (int): Number of dimensions. + distrib (str, optional): Distribution that should be used when + generating points. Defaults to 'rand'. + periodic (bool, optional): If True, the domain is assumed to be + periodic. Defaults to False. + leafsize (int, optional): Maximum number of points that should be in + an leaf. Defaults to 10. + profile (bool, optional): If True cProfile is used. Defaults to False. + suppress_final_output (bool, optional): If True, the final output + from spawned MPI processes is suppressed. This is mainly for + timing purposes. Defaults to False. + + """ + from yt.utilities.lib.cykdtree import make_tree + unique_str = datetime.today().strftime("%Y%j%H%M%S") + pts, left_edge, right_edge, leafsize = make_points(npts, ndim, + leafsize=leafsize, + distrib=distrib) + # Set keywords for multiprocessing version + if nproc > 1: + kwargs['suppress_final_output'] = suppress_final_output + if profile: + kwargs['profile'] = '{}_mpi_profile.dat'.format(unique_str) + # Run + if profile: + pr = cProfile.Profile() + t0 = time.time() + pr.enable() + make_tree(pts, nproc=nproc, left_edge=left_edge, right_edge=right_edge, + periodic=periodic, leafsize=leafsize, **kwargs) + if profile: + pr.disable() + t1 = time.time() + ps = pstats.Stats(pr) + ps.add(kwargs['profile']) + if isinstance(profile, str): + ps.dump_stats(profile) + print("Stats saved to {}".format(profile)) + else: + sort_key = 'tottime' + ps.sort_stats(sort_key).print_stats(25) + # ps.sort_stats(sort_key).print_callers(5) + print("{} s according to 'time'".format(t1-t0)) + return ps diff --git a/yt/utilities/lib/cykdtree/tests/scaling.py b/yt/utilities/lib/cykdtree/tests/scaling.py new file mode 100644 index 00000000000..0213c0cf74e --- /dev/null +++ b/yt/utilities/lib/cykdtree/tests/scaling.py @@ -0,0 +1,186 @@ +r"""Routines for tracking the scaling of the triangulation routines.""" +import numpy as np +import time +import os +import cProfile +import pstats +from yt.utilities.lib.cykdtree.tests import run_test + + +def stats_run(npart, nproc, ndim, periodic=False, overwrite=False, + display=False, suppress_final_output=False): + r"""Get timing stats using :package:`cProfile`. + + Args: + npart (int): Number of particles. + nproc (int): Number of processors. + ndim (int): Number of dimensions. + periodic (bool, optional): If True, the domain is assumed to be + periodic. Defaults to False. + overwrite (bool, optional): If True, the existing file for this + set of input parameters if overwritten. Defaults to False. + suppress_final_output (bool, optional): If True, the final output + from spawned MPI processes is suppressed. This is mainly for + timing purposes. Defaults to False. + display (bool, optional): If True, display the profile results. + Defaults to False. + + """ + perstr = "" + outstr = "" + if periodic: + perstr = "_periodic" + if suppress_final_output: + outstr = "_noout" + fname_stat = 'stat_{}part_{}proc_{}dim{}{}.txt'.format( + npart, nproc, ndim, perstr, outstr) + if overwrite or not os.path.isfile(fname_stat): + cProfile.run( + "from yt.utilities.lib.cykdtree.tests import run_test; "+ + "run_test({}, {}, nproc={}, ".format(npart, ndim, nproc) + + "periodic={}, ".format(periodic) + + "suppress_final_output={})".format(suppress_final_output), + fname_stat) + if display: + p = pstats.Stats(fname_stat) + p.sort_stats('time').print_stats(10) + return p + return fname_stat + + +def time_run(npart, nproc, ndim, nrep=1, periodic=False, leafsize=10, + suppress_final_output=False): + r"""Get runing times using :package:`time`. + + Args: + npart (int): Number of particles. + nproc (int): Number of processors. + ndim (int): Number of dimensions. + nrep (int, optional): Number of times the run should be performed to + get an average. Defaults to 1. + periodic (bool, optional): If True, the domain is assumed to be + periodic. Defaults to False. + leafsize (int, optional): The maximum number of points that should be + in any leaf in the tree. Defaults to 10. + suppress_final_output (bool, optional): If True, the final output + from spawned MPI processes is suppressed. This is mainly for + timing purposes. Defaults to False. + + """ + times = np.empty(nrep, 'float') + for i in range(nrep): + t1 = time.time() + run_test(npart, ndim, nproc=nproc, + periodic=periodic, leafsize=leafsize, + suppress_final_output=suppress_final_output) + t2 = time.time() + times[i] = t2 - t1 + return np.mean(times), np.std(times) + + +def strong_scaling(npart=1e6, nrep=1, periodic=False, + leafsize=10, overwrite=True, + suppress_final_output=False): + r"""Plot the scaling with number of processors for a particular function. + + Args: + npart (int, optional): Number of particles. Defaults to 1e6. + nrep (int, optional): Number of times the run should be performed to + get an average. Defaults to 1. + periodic (bool, optional): If True, the domain is assumed to be + periodic. Defaults to False. + leafsize (int, optional): The maximum number of points that should be + in any leaf in the tree. Defaults to 10. + overwrite (bool, optional): If True, the existing file for this + set of input parameters if overwritten. Defaults to False. + suppress_final_output (bool, optional): If True, the final output + from spawned MPI processes is suppressed. This is mainly for + timing purposes. Defaults to False. + + """ + import matplotlib.pyplot as plt + npart = int(npart) + perstr = "" + outstr = "" + if periodic: + perstr = "_periodic" + if suppress_final_output: + outstr = "_noout" + fname_plot = 'plot_strong_scaling_nproc_{}part{}_{}leafsize{}.png'.format( + npart, perstr, leafsize, outstr) + nproc_list = [1, 2, 4, 8]#, 16] + ndim_list = [2, 3, 4] + clr_list = ['b', 'r', 'g', 'm'] + times = np.empty((len(nproc_list), len(ndim_list), 2), 'float') + for j, nproc in enumerate(nproc_list): + for i, ndim in enumerate(ndim_list): + times[j, i, 0], times[j, i, 1] = time_run( + npart, nproc, ndim, nrep=nrep, + periodic=periodic, leafsize=leafsize, + suppress_final_output=suppress_final_output) + print("Finished {}D on {}.".format(ndim, nproc)) + fig, axs = plt.subplots(1, 1) + for i in range(len(ndim_list)): + ndim = ndim_list[i] + clr = clr_list[i] + axs.errorbar(nproc_list, times[:, i, 0], yerr=times[:, i, 1], + fmt=clr, label='ndim = {}'.format(ndim)) + axs.set_xlabel("# of Processors") + axs.set_ylabel("Time (s)") + axs.legend() + fig.savefig(fname_plot) + print(' '+fname_plot) + + +def weak_scaling(npart=1e4, nrep=1, periodic=False, leafsize=10, + overwrite=True, suppress_final_output=False): + r"""Plot the scaling with number of processors with a constant number of + particles per processor for a particular function. + + Args: + npart (int, optional): Number of particles per processor. Defaults to + 1e4. + nrep (int, optional): Number of times the run should be performed to + get an average. Defaults to 1. + periodic (bool, optional): If True, the domain is assumed to be + periodic. Defaults to False. + leafsize (int, optional): The maximum number of points that should be + in any leaf in the tree. Defaults to 10. + overwrite (bool, optional): If True, the existing file for this + set of input parameters if overwritten. Defaults to False. + suppress_final_output (bool, optional): If True, the final output + from spawned MPI processes is suppressed. This is mainly for + timing purposes. Defaults to False. + + """ + import matplotlib.pyplot as plt + npart = int(npart) + perstr = "" + outstr = "" + if periodic: + perstr = "_periodic" + if suppress_final_output: + outstr = "_noout" + fname_plot = 'plot_weak_scaling_nproc_{}part{}_{}leafsize{}.png'.format( + npart, perstr, leafsize, outstr) + nproc_list = [1, 2, 4, 8, 16] + ndim_list = [2, 3] + clr_list = ['b', 'r', 'g', 'm'] + times = np.empty((len(nproc_list), len(ndim_list), 2), 'float') + for j, nproc in enumerate(nproc_list): + for i, ndim in enumerate(ndim_list): + times[j, i, 0], times[j, i, 1] = time_run( + npart*nproc, nproc, ndim, nrep=nrep, + periodic=periodic, leafsize=leafsize, + suppress_final_output=suppress_final_output) + fig, axs = plt.subplots(1, 1) + for i in range(len(ndim_list)): + ndim = ndim_list[i] + clr = clr_list[i] + axs.errorbar(nproc_list, times[:, i, 0], yerr=times[:, i, 1], + fmt=clr, label='ndim = {}'.format(ndim)) + axs.set_xlabel("# of Processors") + axs.set_ylabel("Time (s)") + axs.legend() + fig.savefig(fname_plot) + print(' '+fname_plot) diff --git a/yt/utilities/lib/cykdtree/tests/test_kdtree.py b/yt/utilities/lib/cykdtree/tests/test_kdtree.py new file mode 100644 index 00000000000..9a0445578f2 --- /dev/null +++ b/yt/utilities/lib/cykdtree/tests/test_kdtree.py @@ -0,0 +1,112 @@ +import numpy as np +import time +import tempfile +from nose.tools import assert_raises +import yt.utilities.lib.cykdtree as cykdtree +from yt.utilities.lib.cykdtree.tests import ( + parametrize, + make_points, + make_points_neighbors, +) + + +@parametrize(npts=100, ndim=(2, 3), periodic=(False, True), + use_sliding_midpoint=(False, True)) +def test_PyKDTree(npts=100, ndim=2, periodic=False, use_sliding_midpoint=False): + pts, le, re, ls = make_points(npts, ndim) + cykdtree.PyKDTree(pts, le, re, leafsize=ls, periodic=periodic, + use_sliding_midpoint=use_sliding_midpoint) + + +def test_PyKDTree_errors(): + pts, le, re, ls = make_points(100, 2) + assert_raises(ValueError, cykdtree.PyKDTree, pts, le, re, + leafsize=1) + + +@parametrize(npts=100, ndim=(2, 3), periodic=(False, True)) +def test_search(npts=100, ndim=2, periodic=False): + pts, le, re, ls = make_points(npts, ndim) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=ls, periodic=periodic) + pos_list = [le, (le+re)/2.] + if periodic: + pos_list.append(re) + for pos in pos_list: + leaf = tree.get(pos) + leaf.neighbors + + +@parametrize(npts=100, ndim=(2, 3)) +def test_search_errors(npts=100, ndim=2): + pts, le, re, ls = make_points(npts, ndim) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=ls) + assert_raises(ValueError, tree.get, re) + + +@parametrize(periodic=(False, True)) +def test_neighbors(periodic=False): + pts, le, re, ls, left_neighbors, right_neighbors = make_points_neighbors( + periodic=periodic) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=ls, periodic=periodic) + for leaf in tree.leaves: + out_str = str(leaf.id) + try: + for d in range(tree.ndim): + out_str += '\nleft: {} {} {}'.format(d, leaf.left_neighbors[d], + left_neighbors[d][leaf.id]) + assert(len(left_neighbors[d][leaf.id]) == + len(leaf.left_neighbors[d])) + for i in range(len(leaf.left_neighbors[d])): + assert(left_neighbors[d][leaf.id][i] == + leaf.left_neighbors[d][i]) + out_str += '\nright: {} {} {}'.format(d, leaf.right_neighbors[d], + right_neighbors[d][leaf.id]) + assert(len(right_neighbors[d][leaf.id]) == + len(leaf.right_neighbors[d])) + for i in range(len(leaf.right_neighbors[d])): + assert(right_neighbors[d][leaf.id][i] == + leaf.right_neighbors[d][i]) + except: + for leaf in tree.leaves: + print(leaf.id, leaf.left_edge, leaf.right_edge) + print(out_str) + raise + + +@parametrize(npts=100, ndim=(2,3), periodic=(False, True)) +def test_get_neighbor_ids(npts=100, ndim=2, periodic=False): + pts, le, re, ls = make_points(npts, ndim) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=ls, periodic=periodic) + pos_list = [le, (le+re)/2.] + if periodic: + pos_list.append(re) + for pos in pos_list: + tree.get_neighbor_ids(pos) + + +def time_tree_construction(Ntime, LStime, ndim=2): + pts, le, re, ls = make_points(Ntime, ndim, leafsize=LStime) + t0 = time.time() + cykdtree.PyKDTree(pts, le, re, leafsize=LStime) + t1 = time.time() + print("{} {}D points, leafsize {}: took {} s".format(Ntime, ndim, LStime, t1-t0)) + + +def time_neighbor_search(Ntime, LStime, ndim=2): + pts, le, re, ls = make_points(Ntime, ndim, leafsize=LStime) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=LStime) + t0 = time.time() + tree.get_neighbor_ids(0.5*np.ones(tree.ndim, 'double')) + t1 = time.time() + print("{} {}D points, leafsize {}: took {} s".format(Ntime, ndim, LStime, t1-t0)) + +def test_save_load(): + for periodic in (True, False): + for ndim in range(1, 5): + pts, le, re, ls = make_points(100, ndim) + tree = cykdtree.PyKDTree(pts, le, re, leafsize=ls, + periodic=periodic, data_version=ndim+12) + with tempfile.NamedTemporaryFile(delete=False) as tf: + tree.save(tf.name) + restore_tree = cykdtree.PyKDTree.from_file(tf.name) + tree.assert_equal(restore_tree) diff --git a/yt/utilities/lib/cykdtree/tests/test_plot.py b/yt/utilities/lib/cykdtree/tests/test_plot.py new file mode 100644 index 00000000000..29e7f6b8e39 --- /dev/null +++ b/yt/utilities/lib/cykdtree/tests/test_plot.py @@ -0,0 +1,15 @@ +import os +from yt.utilities.lib.cykdtree.plot import plot2D_serial +from yt.utilities.lib.cykdtree.kdtree import PyKDTree +from yt.utilities.lib.cykdtree.tests import make_points + + +def test_plot2D_serial(): + fname_test = "test_plot2D_serial.png" + pts, le, re, ls = make_points(100, 2) + tree = PyKDTree(pts, le, re, leafsize=ls) + axs = plot2D_serial(tree, pts, title="Serial Test", plotfile=fname_test, + label_boxes=True) + os.remove(fname_test) + # plot2D_serial(tree, pts, axs=axs) + del axs diff --git a/yt/utilities/lib/cykdtree/tests/test_utils.py b/yt/utilities/lib/cykdtree/tests/test_utils.py new file mode 100644 index 00000000000..9d42148b19a --- /dev/null +++ b/yt/utilities/lib/cykdtree/tests/test_utils.py @@ -0,0 +1,175 @@ +from __future__ import division + +import numpy as np +from nose.tools import assert_equal +from yt.utilities.lib.cykdtree.tests import parametrize, assert_less_equal +from yt.utilities.lib.cykdtree import utils + +def test_max_pts(): + pts = np.arange(5*3).reshape((5, 3)).astype('float64') + out = utils.py_max_pts(pts) + np.testing.assert_allclose(out, np.max(pts, axis=0)) + + +def test_min_pts(): + pts = np.arange(5*3).reshape((5, 3)).astype('float64') + out = utils.py_min_pts(pts) + np.testing.assert_allclose(out, np.min(pts, axis=0)) + + +@parametrize(N=(10), ndim=(2, 3), Lidx=(0,5), Ridx=(5,9)) +def test_argmax_pts_dim(N=10, ndim=2, Lidx=0, Ridx=9): + d = ndim-1 + pts = np.random.rand(N, ndim).astype('float64') + idx = np.argsort(pts[:, d]).astype('uint64') + out = utils.py_argmax_pts_dim(pts, idx, d, Lidx, Ridx) + assert_equal(out, np.argmax(pts[idx[Lidx:(Ridx+1)], d]) + Lidx) + + +@parametrize(N=(10), ndim=(2, 3), Lidx=(0,5), Ridx=(5,9)) +def test_argmin_pts_dim(N=10, ndim=2, Lidx=0, Ridx=9): + d = ndim-1 + pts = np.random.rand(N, ndim).astype('float64') + idx = np.argsort(pts[:, d]).astype('uint64') + out = utils.py_argmin_pts_dim(pts, idx, d, Lidx, Ridx) + assert_equal(out, np.argmin(pts[idx[Lidx:(Ridx+1)], d]) + Lidx) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_quickSort(N=10, ndim=2): + d = ndim-1 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + idx = utils.py_quickSort(pts, d) + assert_equal(idx.size, N) + if (N != 0): + np.testing.assert_allclose(idx, np.argsort(pts[:, d])) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_insertSort(N=10, ndim=2): + d = ndim-1 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + idx = utils.py_insertSort(pts, d) + assert_equal(idx.size, N) + if (N != 0): + np.testing.assert_allclose(idx, np.argsort(pts[:, d])) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_pivot(N=10, ndim=2): + d = ndim-1 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + q, idx = utils.py_pivot(pts, d) + if (N == 0): + np.testing.assert_equal(q, -1) + else: + piv = pts[idx[q], d] + nmax = (7*N/10 + 6) + assert_less_equal(np.sum(pts[:, d] < piv), nmax) + assert_less_equal(np.sum(pts[:, d] > piv), nmax) + + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_partition(N=10, ndim=2): + d = ndim-1 + p = 0 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + q, idx = utils.py_partition(pts, d, p) + if (N == 0): + assert_equal(q, -1) + else: + piv = pts[p, d] + np.testing.assert_approx_equal(pts[idx[q], d], piv) + np.testing.assert_array_less(pts[idx[:q], d], piv) + np.testing.assert_array_less(piv, pts[idx[(q+1):], d]) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_partition_given_pivot(N=10, ndim=2): + d = ndim-1 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + if N == 0: + piv_list = [0.5] + else: + piv_list = [0.5, np.median(pts[:, d])] + for piv in piv_list: + q, idx = utils.py_partition_given_pivot(pts, d, piv) + if (N == 0): + assert_equal(q, -1) + else: + assert_less_equal(pts[idx[q], d], piv) + np.testing.assert_array_less(pts[idx[:q], d], piv) + np.testing.assert_array_less(piv, pts[idx[(q+1):], d]) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3)) +def test_select(N=10, ndim=2): + d = ndim-1 + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + p = int(N)//2 + int(N)%2 + q, idx = utils.py_select(pts, d, p) + assert_equal(idx.size, N) + if (N == 0): + assert_equal(q, -1) + else: + assert_equal(q, p-1) + med = np.median(pts[:, d]) + np.testing.assert_array_less(pts[idx[:q], d], med) + np.testing.assert_array_less(med, pts[idx[(q+1):], d]) + if (N%2): + np.testing.assert_approx_equal(pts[idx[q], d], med) + else: + np.testing.assert_array_less(pts[idx[q], d], med) + + +@parametrize(N=(0, 10, 11), ndim=(2, 3), use_sliding_midpoint=(False, True)) +def test_split(N=10, ndim=2, use_sliding_midpoint=False): + np.random.seed(10) + pts = np.random.rand(N, ndim).astype('float64') + p = int(N)//2 + int(N)%2 + q, d, idx = utils.py_split(pts, use_sliding_midpoint=use_sliding_midpoint) + assert_equal(idx.size, N) + if (N == 0): + assert_equal(q, -1) + else: + if use_sliding_midpoint: + # Midpoint + med = 0.5*(np.min(pts[:,d]) + np.max(pts[:,d])) + np.testing.assert_array_less(pts[idx[:q], d], med) + np.testing.assert_array_less(med, pts[idx[(q+1):], d]) + np.testing.assert_array_less(pts[idx[q], d], med) + # Sliding midpoint (slide to minimum) + q, d, idx = utils.py_split(pts, + mins=-1*np.ones(ndim), + maxs=np.ones(ndim), + use_sliding_midpoint=True) + med = np.min(pts[:,d]) + assert_equal(q, 0) + np.testing.assert_array_less(pts[idx[:q], d], med) + np.testing.assert_array_less(med, pts[idx[(q+1):], d]) + np.testing.assert_approx_equal(pts[idx[q], d], med) + # Sliding midpoint (slide to maximum) + q, d, idx = utils.py_split(pts, + mins=np.zeros(ndim), + maxs=2*np.ones(ndim), + use_sliding_midpoint=True) + med = np.max(pts[:,d]) + assert_equal(q, N-2) + np.testing.assert_array_less(pts[idx[:(q+1)], d], med) + np.testing.assert_approx_equal(pts[idx[q+1], d], med) + else: + assert_equal(q, p-1) + med = np.median(pts[:, d]) + np.testing.assert_array_less(pts[idx[:q], d], med) + np.testing.assert_array_less(med, pts[idx[(q+1):], d]) + if (N%2): + np.testing.assert_approx_equal(pts[idx[q], d], med) + else: + np.testing.assert_array_less(pts[idx[q], d], med) diff --git a/yt/utilities/lib/cykdtree/utils.pxd b/yt/utilities/lib/cykdtree/utils.pxd new file mode 100644 index 00000000000..80b148354d4 --- /dev/null +++ b/yt/utilities/lib/cykdtree/utils.pxd @@ -0,0 +1,43 @@ +cimport numpy as np +from libcpp.vector cimport vector +from libcpp.pair cimport pair +from libcpp cimport bool +from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t + + +cdef extern from "c_utils.hpp": + double* max_pts(double *pts, uint64_t n, uint64_t m) + double* min_pts(double *pts, uint64_t n, uint64_t m) + uint64_t argmax_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx) + uint64_t argmin_pts_dim(double *pts, uint64_t *idx, + uint32_t m, uint32_t d, + uint64_t Lidx, uint64_t Ridx) + void quickSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) + int64_t partition(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, int64_t p) + int64_t partition_given_pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, double pivot) + int64_t select(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r, int64_t n) + int64_t pivot(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) + void insertSort(double *pts, uint64_t *idx, + uint32_t ndim, uint32_t d, + int64_t l, int64_t r) + uint32_t split(double *all_pts, uint64_t *all_idx, + uint64_t Lidx, uint64_t n, uint32_t ndim, + double *mins, double *maxes, + int64_t &split_idx, double &split_val) + uint32_t split(double *all_pts, uint64_t *all_idx, + uint64_t Lidx, uint64_t n, uint32_t ndim, + double *mins, double *maxes, + int64_t &split_idx, double &split_val, + bool use_sliding_midpoint) diff --git a/yt/utilities/lib/cykdtree/utils.pyx b/yt/utilities/lib/cykdtree/utils.pyx new file mode 100644 index 00000000000..4d3696e0115 --- /dev/null +++ b/yt/utilities/lib/cykdtree/utils.pyx @@ -0,0 +1,341 @@ +import numpy as np +cimport numpy as np +cimport cython +from libcpp.vector cimport vector +from libcpp.pair cimport pair +from libcpp cimport bool as cbool +from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t + +import copy + +def py_max_pts(np.ndarray[np.float64_t, ndim=2] pos): + r"""Get the maximum of points along each coordinate. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + + Returns: + np.ndarray of float64: Maximum of pos along each coordinate. + + """ + cdef uint64_t n = pos.shape[0] + cdef uint32_t m = pos.shape[1] + cdef np.float64_t* cout = max_pts(&pos[0,0], n, m) + cdef uint32_t i = 0 + cdef np.ndarray[np.float64_t] out = np.zeros(m, 'float64') + for i in range(m): + out[i] = cout[i] + #free(cout) + return out + +def py_min_pts(np.ndarray[np.float64_t, ndim=2] pos): + r"""Get the minimum of points along each coordinate. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + + Returns: + np.ndarray of float64: Minimum of pos along each coordinate. + + """ + cdef uint64_t n = pos.shape[0] + cdef uint32_t m = pos.shape[1] + cdef np.float64_t* cout = min_pts(&pos[0,0], n, m) + cdef uint32_t i = 0 + cdef np.ndarray[np.float64_t] out = np.zeros(m, 'float64') + for i in range(m): + out[i] = cout[i] + #free(cout) + return out + +def py_argmax_pts_dim(np.ndarray[np.float64_t, ndim=2] pos, + uint64_t[:] idx, + np.uint32_t d, int Lidx0, int Ridx0): + r"""Get the maximum of points along one dimension for a subset of the + point indices. This is essentially max(pos[idx[Lidx:(Ridx+1)], d]). + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + idx (np.ndarray of uint64_t): (n,) array of indices for positions. + d (uint32_t): Dimension to compute maximum along. + Lidx (int): Index in idx that search should begin at. + Ridx (int): Index in idx that search should end at. + + Returns: + uint64_t: Index in idx that provides maximum position in the subset + indices along dimension d. + + """ + cdef np.intp_t n = pos.shape[0] + cdef uint32_t m = pos.shape[1] + cdef uint64_t Lidx = 0 + cdef uint64_t Ridx = (n-1) + if (Lidx0 < 0): + Lidx = (n + Lidx0) + elif Lidx0 >= n: + raise Exception("Left index (%d) exceeds size of positions array (%d)." + % (Lidx0, n)) + else: + Lidx = Lidx0 + if (Ridx0 < 0): + Ridx = (n + Ridx0) + elif Ridx0 >= n: + raise Exception("Right index (%d) exceeds size of positions array (%d)." + % (Ridx0, n)) + else: + Ridx = Ridx0 + cdef np.uint64_t cout = Lidx + if (Ridx > Lidx): + cout = argmax_pts_dim(&pos[0,0], &idx[0], m, d, Lidx, Ridx) + return cout + +def py_argmin_pts_dim(np.ndarray[np.float64_t, ndim=2] pos, + uint64_t[:] idx, + np.uint32_t d, int Lidx0, int Ridx0): + r"""Get the minimum of points along one dimension for a subset of the + point indices. This is essentially min(pos[idx[Lidx:(Ridx+1)], d]). + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + idx (np.ndarray of uint64_t): (n,) array of indices for positions. + d (uint32_t): Dimension to compute minimum along. + Lidx (int): Index in idx that search should begin at. + Ridx (int): Index in idx that search should end at. + + Returns: + uint64_t: Index in idx that provides minimum position in the subset + indices along dimension d. + + """ + cdef uint64_t n = pos.shape[0] + cdef uint32_t m = pos.shape[1] + cdef uint64_t Lidx = 0 + cdef uint64_t Ridx = n + if (Lidx0 < 0): + Lidx = (n + Lidx0) + else: + Lidx = Lidx0 + if (Ridx0 < 0): + Ridx = (n + Ridx0) + else: + Ridx = Ridx0 + cdef np.uint64_t cout = Lidx + if (Ridx > Lidx): + cout = argmin_pts_dim(&pos[0,0], &idx[0], m, d, Lidx, Ridx) + return cout + +def py_quickSort(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d): + r"""Get the indices required to sort coordinates along one dimension. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be sorted along. + + Returns: + np.ndarray of uint64: Indices that sort pos along dimension d. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + quickSort(ptr_pos, ptr_idx, ndim, d, l, r) + return idx + +def py_insertSort(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d): + r"""Get the indices required to sort coordinates along one dimension. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be sorted along. + + Returns: + np.ndarray of uint64: Indices that sort pos along dimension d. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + insertSort(ptr_pos, ptr_idx, ndim, d, l, r) + return idx + +def py_pivot(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d): + r"""Get the index of the median of medians along one dimension and indices + that partition pos according to the median of medians. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be partitioned along. + + Returns: + tuple of int64 and np.ndarray of uint64: Index q of idx that is the + pivot. All elements of idx before the pivot will be less than + the pivot. If there is an odd number of points, the pivot will + be the median. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + cdef int64_t q = pivot(ptr_pos, ptr_idx, ndim, d, l, r) + return q, idx + +def py_partition(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d, + np.int64_t p): + r"""Get the indices required to partition coordinates along one dimension. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be partitioned along. + p (np.int64_t): Element of pos[:,d] that should be used as the pivot + to partition pos. + + Returns: + tuple of int64 and np.ndarray of uint64: Location of the pivot in the + partitioned array and the indices required to partition the array + such that elements before the pivot are smaller and elements after + the pivot are larger. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + cdef int64_t q = partition(ptr_pos, ptr_idx, ndim, d, l, r, p) + return q, idx + +def py_partition_given_pivot(np.ndarray[np.float64_t, ndim=2] pos, + np.uint32_t d, np.float64_t pval): + r"""Get the indices required to partition coordinates along one dimension. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be partitioned along. + pval (np.float64_t): Value that should be used to partition pos. + + Returns: + tuple of int64 and np.ndarray of uint64: Location of the largest value + that is smaller than pval in partitioned array and the indices + required to partition the array such that elements before the pivot + are smaller and elements after the pivot are larger. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + cdef int64_t q = partition_given_pivot(ptr_pos, ptr_idx, ndim, d, l, r, + pval) + return q, idx + +def py_select(np.ndarray[np.float64_t, ndim=2] pos, np.uint32_t d, + np.int64_t t): + r"""Get the indices required to partition coordiantes such that the first + t elements in pos[:,d] are the smallest t elements in pos[:,d]. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + d (np.uint32_t): Dimension that pos should be partitioned along. + t (np.int64_t): Number of smallest elements in pos[:,d] that should be + partitioned. + + Returns: + tuple of int64 and np.ndarray of uint64: Location of element t in the + partitioned array and the indices required to partition the array + such that elements before element t are smaller and elements after + the pivot are larger. + + """ + cdef np.intp_t ndim = pos.shape[1] + cdef int64_t l = 0 + cdef int64_t r = pos.shape[0]-1 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + if pos.shape[0] != 0: + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + cdef int64_t q = select(ptr_pos, ptr_idx, ndim, d, l, r, t) + return q, idx + + +def py_split(np.ndarray[np.float64_t, ndim=2] pos, + np.ndarray[np.float64_t, ndim=1] mins = None, + np.ndarray[np.float64_t, ndim=1] maxs = None, + bool use_sliding_midpoint = False): + r"""Get the indices required to split the positions equally along the + largest dimension. + + Args: + pos (np.ndarray of float64): (n,m) array of n m-D coordinates. + mins (np.ndarray of float64, optional): (m,) array of mins. Defaults + to None and is set to mins of pos along each dimension. + maxs (np.ndarray of float64, optional): (m,) array of maxs. Defaults + to None and is set to maxs of pos along each dimension. + use_sliding_midpoint (bool, optional): If True, the sliding midpoint + rule is used to split the positions. Defaults to False. + + Returns: + tuple(int64, uint32, np.ndarray of uint64): The index of the split in + the partitioned array, the dimension of the split, and the indices + required to partition the array. + + """ + cdef np.intp_t npts = pos.shape[0] + cdef np.intp_t ndim = pos.shape[1] + cdef uint64_t Lidx = 0 + cdef uint64_t[:] idx + idx = np.arange(pos.shape[0]).astype('uint64') + cdef double *ptr_pos = NULL + cdef uint64_t *ptr_idx = NULL + cdef double *ptr_mins = NULL + cdef double *ptr_maxs = NULL + if (npts != 0) and (ndim != 0): + if mins is None: + mins = np.min(pos, axis=0) + if maxs is None: + maxs = np.max(pos, axis=0) + ptr_pos = &pos[0,0] + ptr_idx = &idx[0] + ptr_mins = &mins[0] + ptr_maxs = &maxs[0] + cdef int64_t q = 0 + cdef double split_val = 0.0 + cdef cbool c_midpoint_flag = use_sliding_midpoint + cdef uint32_t dsplit = split(ptr_pos, ptr_idx, Lidx, npts, ndim, + ptr_mins, ptr_maxs, q, split_val, + c_midpoint_flag) + return q, dsplit, idx diff --git a/yt/utilities/lib/cykdtree/windows/stdint.h b/yt/utilities/lib/cykdtree/windows/stdint.h new file mode 100644 index 00000000000..4fe0ef9a9b2 --- /dev/null +++ b/yt/utilities/lib/cykdtree/windows/stdint.h @@ -0,0 +1,259 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#if _MSC_VER >= 1600 // [ +#include +#else // ] _MSC_VER >= 1600 [ + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#endif // _MSC_VER >= 1600 ] + +#endif // _MSC_STDINT_H_ ] diff --git a/yt/utilities/lib/cyoctree.pyx b/yt/utilities/lib/cyoctree.pyx new file mode 100644 index 00000000000..b2bf5b14d20 --- /dev/null +++ b/yt/utilities/lib/cyoctree.pyx @@ -0,0 +1,672 @@ +""" +CyOctree building, loading and refining routines + + + +""" + + +cimport numpy as np +import numpy as np +cimport cython +import struct + +from libcpp.vector cimport vector +from libcpp cimport bool +cimport libc.math as math +from libc.stdlib cimport malloc, free + +from yt.geometry.particle_deposit cimport \ + kernel_func, get_kernel_func + +################################################################################ +# OCTREE IMPLEMENTATION DETAILS # +################################################################################ +# The tree is made of of nodes, which are C structs, containing the left edge, +# the right end and other details to traverse the tree (i.e child and parent +# indexes in the nodes container). +# +# The nodes are stored in an STL vector - as such - it makes sense that the +# parent and child addresses are long's which just describe the index in the STL +# vector. i.e to access a parent, +# &nodes[node.parent] will return the address of the parent node +# The children are contiguous in memory, so the first child can be accessed with +# &nodes[node.children] and the second with, +# &nodes[node.children + 1] etc +# In general we avoid memory addresses in favour of indexes so the reallocation +# of the STL doesn't invalidate those. +# +# The tree is built with a non-recursive algorithm. We start by building the +# root node. We enter the root node, and use the split_helper function to split +# into 2^(3*density_factor) children. We recursively work out which particles +# are in each child by sorting the particle positions array so values less than +# the split value are on one side, and values greater on the other. We then only +# need to store the first and last particle in a node and from that we know +# every particle within the node, in the octree sorted positions array. +# NOTE: Despite being called an octree, the tree is actually of splitting into +# different numbers of children +# +# Once the first lot of children have been generated, they will be stored in the +# STL vector. The STL container will now contain the following, +# nodes = [root, child 1, child 2, child 3, ...] +# where only the root has been processed. +# We then loop through this vector, calling the process_node method and storing +# new children as we generate them and then eventually processing those until no +# new children are generated. +# +# A node will split into children if the node contains more particles than +# n_ref. Below this value, the node will not split, this is called a leaf. +# +# To maintain backwards compatibility we also have the cell structure. This +# means that each leaf is split into 2^(3*over_refine_factor) cells. When an SPH +# field is interpolated onto the octree, we interpolate the value at the centre +# of each cell. The cell locations, and particles they contain, are *NOT* +# stored in memory instead these are calculated on the fly when an interpolation +# or cell position request is made. This has appeared to be a good trade between +# memory and performance. Storing the cells, and all the necessary information +# in memory would increase the memory usage by num_leaves * +# 2^(3*over_refine_factor). + +#TODO: Add invalidation and setters +#TODO: Add more deposition functions +#TODO: Add parallel features + +cdef struct Node: + double left_edge[3] + double right_edge[3] + + np.int64_t start # First particle we store in pos array + np.int64_t end # Last particle we store + + np.int64_t parent # Index of parent in nodes container + np.int64_t children # Index of 1st child, children are + # contiguous + bool leaf + np.int64_t node_id # Not sure if this is even useful + np.int64_t leaf_id # This is used in depositions (maybe) + unsigned char depth + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef class CyOctree: + cdef vector[Node] nodes # This is an STL container to store the octs + cdef double _left_edge[3] # Boundary conditions for the octree + cdef double _right_edge[3] + cdef np.int64_t[:] _idx # Ordering of particles used by the tree + + cdef np.int64_t _data_version # Used to decide when to re-build a tree + + cdef int _n_ref # Max number of particles per leaf + cdef np.int64_t _num_particles + + # Cell structure + cdef int _max_depth + cdef int _over_refine_factor # this allows the tree to be built with more + # than 8 cells per leaf + cdef int _num_cells # 2**(3*_over_refine_factor) + cdef int _num_cells_per_dim # 2**(_over_refine_factor) + + # Children structure + cdef int _density_factor # this allows the tree to be built with more + # than 8 children per node + cdef int _num_children # 2**(3*_density_factor) + cdef int _num_children_per_dim # 2**(_density_factor) + + # This is use for interpolation and is global for the Octree smoothing + # operations + cdef kernel_func kernel + + def __init__(self, double[:, ::1] &input_pos = None, left_edge = None, + right_edge = None, int n_ref=32, int over_refine_factor=1, + int density_factor=1, np.int64_t data_version=0, + int max_depth=20): + + # If this is the case, we are very likely just initialising an instance + # and then going to load an existing Octree from disk, so we don't + # really need to do anything + if input_pos is None: + return + + # These don't have setters as these would invalidate the tree which is + # a feature we don't have + # TODO: Add invalidation feature + self._data_version = data_version + self._n_ref = n_ref + self._num_particles = input_pos.shape[0] + self._max_depth = max_depth + + # Setting the properties which determines how children divide + self.over_refine_factor = over_refine_factor + self.density_factor = density_factor + + # Set up the initial idx of the particles, this keeps track of which + # particle is which in the tree ordered array + self._idx = np.arange(0, input_pos.shape[0], dtype=np.int64) + + # Set up the bounds and root node + self.setup_bounds(input_pos, left_edge, right_edge) + self.setup_root(input_pos) + + # Reserve some space for the nodes to be stored, this is a conversative + # amount. If we exceed this the STL container will reallocate. This + # will *not* invalidate any pointers + # This decreases the conversative amount and keeps retrying, unless we + # stil fail even with a small reserve, then we error + cdef int exp_num_nodes = ((2**(3 * self.density_factor) * + self._num_particles) // n_ref + 8) + cdef int failed = 1 + while exp_num_nodes > 8 and failed == 1: + try: + reserve(&self.nodes, exp_num_nodes) + failed = 0 + except MemoryError: + exp_num_nodes = exp_num_nodes // 2 + failed = 1 + + if failed == 1: + raise MemoryError("Failed to allocate memory for octree!") + + # Now build the tree + self.build_tree(&input_pos[0, 0]) + + # Give up any excess reserved space + # NOTE: this doubles the memory usage + cdef vector[Node] temp + cdef int i + temp.reserve(self.nodes.size()) + for i in range(self.nodes.size()): + temp.push_back(self.nodes[i]) + self.nodes.swap(temp) + temp.clear() + + cdef int setup_bounds(self, double[:, ::1] &input_pos, left_edge=None, + right_edge=None): + if left_edge is not None: + for i in range(3): + self._left_edge[i] = left_edge[i] + else: + for i in range(3): + self._left_edge[i] = np.amin(input_pos[:,i]) + + if right_edge is not None: + for i in range(3): + self._right_edge[i] = right_edge[i] + else: + for i in range(3): + self._right_edge[i] = np.amax(input_pos[:,i]) + return 0 + + cdef int setup_root(self, double[:, ::1] &input_pos): + cdef Node root + root.left_edge = self._left_edge + root.right_edge = self._right_edge + # Not strictly true and could lead to an issue later + root.parent = 0 + + # Always true in yt context + root.start = 0 + root.end = input_pos.shape[0]*3 + + root.children = 0 + root.leaf = 1 + root.depth = 0 + root.leaf_id = 0 + root.node_id = 0 + + # Store the root in the nodes array + self.nodes.push_back(root) + return 0 + + cdef int reset(self): + # Clear the big memory users before we load an octree from disk + self.nodes.clear() + self._idx = np.zeros(1, dtype=np.int64)-1 + return 0 + + cdef int build_tree(self, double * input_pos): + # Generate an array to store the which particles are in each child oct + cdef np.int64_t * split_arr + split_arr = malloc((self._num_children + 1) * + sizeof(np.int64_t)) + + # Loop through the nodes in serial and process them, i.e, sort the + # particles and create the children, which will increase the node.size + # then we iterate through those children + cdef int num_nodes_processed = 0 + while num_nodes_processed < self.nodes.size(): + self.process_node(&self.nodes[num_nodes_processed], input_pos, + split_arr) + num_nodes_processed += 1 + + free(split_arr) + + return 0 + + cdef int process_node(self, Node * node, double * input_pos, + np.int64_t * split_arr) nogil: + if(node.end - node.start <= 3*self._n_ref or + node.depth > self._max_depth): + return 0 + + # Node is no longer a leaf + node.leaf = 0 + + # This sorts the children in the node and then stores the position of + # the first and last particle in each node + split_helper(node.start, node.end, self._num_children, + self._density_factor, split_arr, input_pos, &self._idx[0], + &node.left_edge[0], &node.right_edge[0]) + + # Generate the node structures for the children, and store the position + # of the first and last particles they contain + self.generate_children(node, split_arr) + + return 0 + + cdef inline void generate_children(self, Node * node, + np.int64_t * split_arr) nogil: + cdef int i, j, z, k, split_id + cdef double dx, dy, dz + cdef Node temp + + node.children = self.nodes.size() + + # Set the properties which are the same for all children + temp.parent = node.node_id + temp.leaf = 1 + temp.depth = node.depth + 1 + temp.leaf_id = 0 + temp.children = 0 + + # Set up the values to be used to set the child boundaries + dx = (node.right_edge[0] - node.left_edge[0]) / self._num_children_per_dim + dy = (node.right_edge[1] - node.left_edge[1]) / self._num_children_per_dim + dz = (node.right_edge[2] - node.left_edge[2]) / self._num_children_per_dim + + # Loop through and append the children setting the node dependent values + z = node.children + split_id = 0 + for i in range(self._num_children_per_dim): + for j in range(self._num_children_per_dim): + for k in range(self._num_children_per_dim): + temp.left_edge[0] = node.left_edge[0] + i*dx + temp.left_edge[1] = node.left_edge[1] + j*dy + temp.left_edge[2] = node.left_edge[2] + k*dz + temp.right_edge[0] = node.left_edge[0] + (i+1)*dx + temp.right_edge[1] = node.left_edge[1] + (j+1)*dy + temp.right_edge[2] = node.left_edge[2] + (k+1)*dz + temp.node_id = z + + temp.start = split_arr[split_id] + temp.end = split_arr[split_id + 1] + + self.nodes.push_back(temp) + z+=1 + split_id+=1 + + @property + def size_bytes(self): + return sizeof(Node) * self.nodes.size() + + @property + def max_depth(self): + return self._max_depth + + @property + def data_version(self): + return self._data_version + + @property + def num_particles(self): + return self._num_particles + + @property + def n_ref(self): + return self._n_ref + + @property + def num_octs(self): + return self.nodes.size() + + @property + def over_refine_factor(self): + return self._over_refine_factor + + @property + def idx(self): + return np.asarray(self._idx) + + @over_refine_factor.setter + def over_refine_factor(self, value): + self._over_refine_factor = value + self._num_cells = 2**(3 * value) + self._num_cells_per_dim = 2**value + + @property + def density_factor(self): + return self._density_factor + + @density_factor.setter + def density_factor(self, value): + self._density_factor = value + self._num_children = 2**(3 * value) + self._num_children_per_dim = 2**value + + @property + def cell_positions(self): + cdef int i, j, z, k, l, num_leaves + + # Find all the leaves + num_leaves = 0 + for i in range(self.nodes.size()): + if self.nodes[i].leaf == 1: + num_leaves += 1 + + cdef np.float64_t[:, :] pos = np.zeros((num_leaves*self._num_cells, 3), + dtype='float64') + cdef double leftx, lefty, leftz, rightx, righty, rightz + z = 0 + for i in range(self.nodes.size()): + if self.nodes[i].leaf == 0: + continue + + leftx = self.nodes[i].left_edge[0] + lefty = self.nodes[i].left_edge[1] + leftz = self.nodes[i].left_edge[2] + rightx = self.nodes[i].right_edge[0] + righty = self.nodes[i].right_edge[1] + rightz = self.nodes[i].right_edge[2] + + self.nodes[i].leaf_id = z + + # we have to generate cell locations + dx = (rightx - leftx) / self._num_cells_per_dim + dy = (righty - lefty) / self._num_cells_per_dim + dz = (rightz - leftz) / self._num_cells_per_dim + + for j in range(self._num_cells_per_dim): + for k in range(self._num_cells_per_dim): + for l in range(self._num_cells_per_dim): + pos[z, 0] = leftx + (j + 0.5) * dx + pos[z, 1] = lefty + (k + 0.5) * dy + pos[z, 2] = leftz + (l + 0.5) * dz + z+=1 + + return np.asarray(pos) + + # TODO: move these to the location of the rest of the deposition operations + cdef void smooth_onto_cells(self, np.float64_t[:] buff, + np.float64_t[:] buff_den, np.float64_t posx, + np.float64_t posy, np.float64_t posz, + np.float64_t hsml, np.float64_t prefactor, + np.float64_t prefactor_norm, Node * node, + int use_normalization=0): + + cdef Node * child + cdef int i, j, k, l + cdef double q_ij, diff_x, diff_y, diff_z, dx, dy, dz, voxel_hsml2 + cdef double leftx, lefty, leftz, rightx, righty, rightz + + # If not a leaf, then check if particle is in the children and go check + # through those. This is recursive - currently + if node.leaf == 0: + child = &self.nodes[node.children] + for i in range(self._num_children): + leftx = child[i].left_edge[0] + lefty = child[i].left_edge[1] + leftz = child[i].left_edge[2] + rightx = child[i].right_edge[0] + righty = child[i].right_edge[1] + rightz = child[i].right_edge[2] + + if leftx - posx < hsml and posx - rightx < hsml: + if lefty - posy < hsml and posy - righty < hsml: + if leftz - posz < hsml and posz - rightz < hsml: + self.smooth_onto_cells(buff, buff_den, posx, posy, + posz, hsml, prefactor, + prefactor_norm, &child[i], + use_normalization) + else: + leftx = node.left_edge[0] + lefty = node.left_edge[1] + leftz = node.left_edge[2] + rightx = node.right_edge[0] + righty = node.right_edge[1] + rightz = node.right_edge[2] + + # We have to generate cell locations + dx = (rightx - leftx) / self._num_cells_per_dim + dy = (righty - lefty) / self._num_cells_per_dim + dz = (rightz - leftz) / self._num_cells_per_dim + + # Loop through each cell and calculate the contribution, l is the + # number of the cell we are in + l = 0 + for i in range(self._num_cells_per_dim): + for j in range(self._num_cells_per_dim): + for k in range(self._num_cells_per_dim): + diff_x = (leftx + (i + 0.5) * dx - posx) + diff_x *= diff_x + diff_y = (lefty + (j + 0.5) * dy - posy) + diff_y *= diff_y + diff_z = (leftz + (k + 0.5) * dz - posz) + diff_z *= diff_z + + voxel_hsml2 = hsml*hsml + q_ij = math.sqrt((diff_x + diff_y + diff_z) / + voxel_hsml2) + + if use_normalization: + buff_den[node.leaf_id + l] += (prefactor_norm * + self.kernel(q_ij)) + buff[node.leaf_id + l] += prefactor * self.kernel(q_ij) + l += 1 + + def interpolate_sph_cells(self, np.float64_t[:] buff, + np.float64_t[:] buff_den, np.float64_t[:] posx, + np.float64_t[:] posy, np.float64_t[:] posz, + np.float64_t[:] pmass, np.float64_t[:] pdens, + np.float64_t[:] hsml, np.float64_t[:] field, + kernel_name="cubic", int use_normalization=0): + + self.kernel = get_kernel_func(kernel_name) + + cdef int i + cdef double prefactor, prefactor_norm + + for i in range(posx.shape[0]): + prefactor = pmass[i] / pdens[i] / hsml[i]**3 + prefactor_norm = prefactor + prefactor *= field[i] + + self.smooth_onto_cells(buff, buff_den, posx[i], posy[i], posz[i], + hsml[i], prefactor, prefactor_norm, + &self.nodes[0], + use_normalization=use_normalization) + + def __richcmp__(self, CyOctree other, op): + if op == 2: + return self._is_equal(other) + elif op ==3: + return not self._is_equal(other) + else: + raise NotImplementedError(("Use == or !=, other comparisons have " + + "not been implemented.")) + + def _is_equal(self, CyOctree other): + cdef bool same = True + + for i in range(3): + if self._left_edge[i] != other._left_edge[i]: + same = False + if self._right_edge[i] != other._right_edge[i]: + same = False + + if self._n_ref != other._n_ref: + same = False + + if self._over_refine_factor != other._over_refine_factor: + same = False + + if self._density_factor != other._density_factor: + same = False + + if self._data_version != other._data_version: + same = False + + return same + + # TODO: this code is much slower than I would like, this is likely due to + # the use of struct -> plan to replace this. A c++ approach is probably + # faster and more intuitive + def save(self, fname = None): + if fname is None: + raise ValueError("A filename must be specified to save the octree!") + + with open(fname,'wb') as f: + f.write(struct.pack('2Q3iq', self._num_particles, self.num_octs, + self._n_ref, self.over_refine_factor, + self.density_factor, self._data_version)) + f.write(struct.pack('{}Q'.format(self.num_particles), + *self.idx)) + + for i in range(self.num_octs): + f.write(struct.pack('6d4Q?2QB', + self.nodes[i].left_edge[0], + self.nodes[i].left_edge[1], + self.nodes[i].left_edge[2], + self.nodes[i].right_edge[0], + self.nodes[i].right_edge[1], + self.nodes[i].right_edge[2], + self.nodes[i].start, + self.nodes[i].end, + self.nodes[i].parent, + self.nodes[i].children, + self.nodes[i].leaf, + self.nodes[i].node_id, + self.nodes[i].leaf_id, + self.nodes[i].depth)) + + def load(self, fname = None): + if fname is None: + raise ValueError("A filename must be specified to load the octtree!") + # clear any current tree we have loaded + self.reset() + + cdef Node temp + cdef np.int64_t num_octs + with open(fname,'rb') as f: + (self._num_particles, num_octs, self._n_ref, + self.over_refine_factor, self.density_factor, + self._data_version) = \ + struct.unpack('2Q3iq', f.read(40)) + self._idx = \ + np.asarray(struct.unpack('{}Q'.format(self.num_particles), + f.read(8*self.num_particles)), dtype=np.int64) + + reserve(&self.nodes, num_octs+1) + for i in range(num_octs): + (temp.left_edge[0], temp.left_edge[1], temp.left_edge[2], + temp.right_edge[0], temp.right_edge[1], temp.right_edge[2], + temp.start, temp.end, temp.parent, temp.children, temp.leaf, + temp.node_id, temp.leaf_id, temp.depth) = \ + struct.unpack('6d4Q?2QB', f.read(105)) + self.nodes.push_back(temp) + + for i in range(3): + self._left_edge[i] = self.nodes[0].left_edge[i] + self._right_edge[i] = self.nodes[0].right_edge[i] + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef int split_helper(int start, int end, int num_children, int max_splits, + np.int64_t * split_arr, np.float64_t * pos, + np.int64_t * idx, np.float64_t * left, + np.float64_t * right) nogil except -1: + ''' + This takes in the split array and sets up the first and last particle, it + then calls the spit function. + ''' + cdef double lower = left[0] + cdef double upper = right[0] + + split_arr[0] = start + split_arr[num_children] = end + + split(0, num_children, max_splits, 0, 0, split_arr, idx, pos, left, right, + lower, upper) + + return 0 + +cdef int reserve(vector[Node] * vec, int amount) except +MemoryError: + ''' + This attempts to reserve memory and propagates any errors back. + ''' + vec.reserve(amount) + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef int seperate(np.float64_t * array, np.int64_t * idx, int offset, double value, + np.int64_t start, np.int64_t end) nogil except -1: + ''' + This takes in an axis, a value and the particles position array. + + It splits the array so all values in the positions array with positions in + the axis dimension less than value are on the left, and those with a value + greater are on the right. + ''' + cdef np.int64_t index + cdef np.int64_t idx_index = start // 3, idx_split = idx_index + cdef np.int64_t split = start + + for index in range(start, end, 3): + idx_index += 1 + if array[index + offset] < value: + idx[idx_split], idx[idx_index] = idx[idx_index], idx[idx_split] + array[split], array[index] = array[index], array[split] + array[split+1], array[index+1] = array[index+1], array[split+1] + array[split+2], array[index+2] = array[index+2], array[split+2] + split+=3 + idx_split+=1 + + return split + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef int split(int start, int end, int max_splits, int splits, int axis, + np.int64_t * split_arr, np.int64_t * idx, np.float64_t * pos, + np.float64_t * left_edge, np.float64_t * right_edge, + np.float64_t lower, np.float64_t upper) nogil except -1: + ''' + This splits the arrays recursively such that we split the particles in the + x, y and z axis such that we get 2**(density_factor) children. We store + which particles are in each children in the split_arr + ''' + cdef int mid + cdef double temp_value + + if splits == max_splits and axis < 2: + splits = 0 + axis += 1 + lower = left_edge[axis] + upper = right_edge[axis] + + if splits < max_splits: + splits += 1 + mid = (start + end) // 2 + temp_value = (upper + lower) / 2 + + split_arr[mid] = seperate(pos, idx, axis, temp_value, split_arr[start], + split_arr[end]) + + split(start, mid, max_splits, splits, axis, split_arr, idx, pos, + left_edge, right_edge, lower, temp_value) + split(mid, end, max_splits, splits, axis, split_arr, idx, pos, + left_edge, right_edge, temp_value, upper) + + return 0 diff --git a/yt/utilities/lib/depth_first_octree.pyx b/yt/utilities/lib/depth_first_octree.pyx index 6af3ed6955c..45280ae4924 100644 --- a/yt/utilities/lib/depth_first_octree.pyx +++ b/yt/utilities/lib/depth_first_octree.pyx @@ -5,13 +5,6 @@ This is a recursive function to return a depth-first octree """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/distance_queue.pxd b/yt/utilities/lib/distance_queue.pxd index 67ea4190fb1..3eb7b9afe67 100644 --- a/yt/utilities/lib/distance_queue.pxd +++ b/yt/utilities/lib/distance_queue.pxd @@ -6,13 +6,6 @@ A queue for evaluating distances to discrete points """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy as np diff --git a/yt/utilities/lib/distance_queue.pyx b/yt/utilities/lib/distance_queue.pyx index d501a2ba0a1..15deb631252 100644 --- a/yt/utilities/lib/distance_queue.pyx +++ b/yt/utilities/lib/distance_queue.pyx @@ -6,13 +6,6 @@ Distance queue implementation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np import numpy as np cimport cython diff --git a/yt/utilities/lib/element_mappings.pyx b/yt/utilities/lib/element_mappings.pyx index 8c01956fc47..d88c83c2a57 100644 --- a/yt/utilities/lib/element_mappings.pyx +++ b/yt/utilities/lib/element_mappings.pyx @@ -6,13 +6,6 @@ interpolation on finite element data. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np from numpy cimport ndarray diff --git a/yt/utilities/lib/ewah_bool_array.pxd b/yt/utilities/lib/ewah_bool_array.pxd new file mode 100644 index 00000000000..b8507b71f1f --- /dev/null +++ b/yt/utilities/lib/ewah_bool_array.pxd @@ -0,0 +1,102 @@ +""" +Wrapper for EWAH Bool Array: https://github.com/lemire/EWAHBoolArray + + + +""" + + +from libcpp.vector cimport vector +from libcpp.map cimport map +from libcpp.string cimport string +from libcpp cimport bool +from libc.stdint cimport uint64_t, uint32_t + +# Streams req for c++ IO +cdef extern from "" namespace "std": + cdef cppclass ostream[T]: + pass +cdef extern from "" namespace "std": + cdef cppclass istream[T]: + pass + +cdef extern from "" namespace "std": + cdef cppclass stringstream: + stringstream() except + + string str() + ostream write(char *, size_t) + istream read(char *, size_t) + bint eof() + +cdef extern from "ewah.h": + cppclass EWAHBoolArraySetBitForwardIterator[uword]: + # EWAHBoolArraySetBitForwardIterator() + EWAHBoolArraySetBitForwardIterator(const EWAHBoolArraySetBitForwardIterator &o) + size_t operator*() + EWAHBoolArraySetBitForwardIterator &operator++() + bint operator==(EWAHBoolArraySetBitForwardIterator &x) + bint operator!=(EWAHBoolArraySetBitForwardIterator &x) + # ctypedef EWAHBoolArraySetBitForwardIterator[unsigned long long] const_iterator + cdef cppclass EWAHBoolArray[uword]: + # We are going to skip the varargs here; it is too tricky to assemble. + bint get(const size_t pos) + bint set(size_t i) + void makeSameSize(EWAHBoolArray &a) + vector[size_t] toArray() + void logicaland(EWAHBoolArray &a, EWAHBoolArray &container) + void logicalor(EWAHBoolArray &a, EWAHBoolArray &container) + void logicalxor(EWAHBoolArray &a, EWAHBoolArray &container) + bint intersects(EWAHBoolArray &a) + void reset() + size_t sizeInBits() + size_t sizeInBytes() + bint operator==(EWAHBoolArray &x) + bint operator!=(EWAHBoolArray &x) + void append(EWAHBoolArray &x) + # Recommended container is "vector[size_t]" + void appendRowIDs[container](container &out, const size_t offset) + void appendSetBits[container](container &out, const size_t offset) + size_t numberOfOnes() + void logicalnot(EWAHBoolArray &x) + void inplace_logicalnot() + void swap(EWAHBoolArray &x) + void read(stringstream &incoming, bint savesizeinbits) + void readBuffer(stringstream &incoming, const size_t buffersize) + void write(stringstream &out, bint savesizeinbits) + void writeBuffer(stringstream &out) + size_t addWord(uword newdata) + vector[uword] &getBuffer() + # const_iterator begin() + # const_iterator end() + EWAHBoolArraySetBitForwardIterator begin() + EWAHBoolArraySetBitForwardIterator end() + +cdef extern from "boolarray.h": + cppclass BoolArray[uword]: + void setSizeInBits(size_t sizeib) + void set(size_t pos) + void unset(size_t pos) + bool get(size_t pos) + void reset() + size_t sizeInBits() + size_t sizeInBytes() + size_t numberOfOnes() + void inplace_logicalxor(BoolArray &other) + void inplace_logicalnot() + size_t padWithZeroes(size_t totalbits) + uword getWord(size_t pos) + size_t wordinbits + +cimport numpy as np +cimport cython + +IF UNAME_SYSNAME == "Windows": + ctypedef uint32_t ewah_word_type +ELSE: + ctypedef np.uint32_t ewah_word_type +ctypedef EWAHBoolArray[ewah_word_type] ewah_bool_array +ctypedef EWAHBoolArraySetBitForwardIterator[ewah_word_type] ewah_bool_iterator +ctypedef vector[size_t] bitset_array +ctypedef map[np.uint64_t, ewah_bool_array] ewah_map +ctypedef stringstream sstream +ctypedef BoolArray[ewah_word_type] bool_array diff --git a/yt/utilities/lib/ewah_bool_wrap.pxd b/yt/utilities/lib/ewah_bool_wrap.pxd new file mode 100644 index 00000000000..229b6536b4c --- /dev/null +++ b/yt/utilities/lib/ewah_bool_wrap.pxd @@ -0,0 +1,159 @@ +from libcpp.vector cimport vector +from libcpp.set cimport set as cset +from libcpp.pair cimport pair +from yt.utilities.lib.ewah_bool_array cimport \ + sstream, ewah_map, ewah_bool_array, ewah_bool_iterator + +cimport numpy as np +ctypedef bint bitarrtype +ctypedef pair[np.uint64_t, np.uint64_t] ind_pair + +cdef class FileBitmasks: + cdef np.uint32_t nfiles + cdef ewah_map** ewah_coll + cdef ewah_bool_array** ewah_keys + cdef ewah_bool_array** ewah_refn + + cdef void _reset(self) + cdef bint _iseq(self, FileBitmasks solf) + cdef BoolArrayCollection _get_bitmask(self, np.uint32_t ifile) + cdef tuple _find_collisions(self, BoolArrayCollection coll, bint verbose=*) + cdef tuple _find_collisions_coarse(self, BoolArrayCollection coll, bint + verbose=*, file_list=*) + cdef tuple _find_collisions_refined(self, BoolArrayCollection coll, bint verbose=*) + cdef void _set(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2=*) + cdef void _set_coarse(self, np.uint32_t ifile, np.uint64_t i1) + cdef void _set_refined(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2) + cdef void _set_coarse_array(self, np.uint32_t ifile, np.uint8_t[:] arr) + cdef void _set_refined_array(self, np.uint32_t ifile, np.uint64_t mi1, np.uint8_t[:] arr) + cdef void _set_refined_index_array(self, np.uint32_t ifile, np.int64_t nsub_mi, + np.ndarray[np.uint64_t, ndim=1] sub_mi1, + np.ndarray[np.uint64_t, ndim=1] sub_mi2) + cdef void _set_map(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2) + cdef void _set_refn(self, np.uint32_t ifile, np.uint64_t i1) + cdef bint _get(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2=*) + cdef bint _get_coarse(self, np.uint32_t ifile, np.uint64_t i1) + cdef void _get_coarse_array(self, np.uint32_t ifile, np.uint64_t imax, np.uint8_t[:] arr) except * + cdef bint _isref(self, np.uint32_t ifile, np.uint64_t i) + cdef np.uint64_t _count_total(self, np.uint32_t ifile) + cdef np.uint64_t _count_refined(self, np.uint32_t ifile) + cdef np.uint64_t _count_coarse(self, np.uint32_t ifile) + cdef void _append(self, np.uint32_t ifile, BoolArrayCollection solf) + cdef bint _intersects(self, np.uint32_t ifile, BoolArrayCollection solf) + cdef void _logicalxor(self, np.uint32_t ifile, BoolArrayCollection solf, BoolArrayCollection out) + cdef void _logicaland(self, np.uint32_t ifile, BoolArrayCollection solf, BoolArrayCollection out) + cdef void _select_contaminated(self, np.uint32_t ifile, BoolArrayCollection mask, np.uint8_t[:] out, + np.uint8_t[:] secondary_files, BoolArrayCollection mask2=*) + cdef void _select_uncontaminated(self, np.uint32_t ifile, BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2=*) + cdef bytes _dumps(self, np.uint32_t ifile) + cdef bint _loads(self, np.uint32_t ifile, bytes s) + cdef bint _check(self) + +cdef class BoolArrayCollection: + cdef ewah_map* ewah_coll + cdef ewah_bool_array* ewah_keys + cdef ewah_bool_array* ewah_refn + cdef ewah_bool_array* ewah_coar + + cdef void _reset(self) + cdef int _richcmp(self, BoolArrayCollection solf, int op) except -1 + cdef void _set(self, np.uint64_t i1, np.uint64_t i2=*) + cdef void _set_coarse(self, np.uint64_t i1) + cdef void _set_refined(self, np.uint64_t i1, np.uint64_t i2) + cdef void _set_coarse_array(self, np.uint8_t[:] arr) + cdef void _set_refined_array(self, np.uint64_t mi1, np.uint8_t[:] arr) + cdef void _set_map(self, np.uint64_t i1, np.uint64_t i2) + cdef void _set_refn(self, np.uint64_t i1) + cdef bint _get(self, np.uint64_t i1, np.uint64_t i2=*) + cdef bint _get_coarse(self, np.uint64_t i1) + cdef void _get_coarse_array(self, np.uint64_t imax, np.uint8_t[:] arr) except * + cdef bint _contains(self, np.uint64_t i) + cdef bint _isref(self, np.uint64_t i) + cdef void _ewah_coarse(self) + cdef np.uint64_t _count_total(self) + cdef np.uint64_t _count_refined(self) + cdef np.uint64_t _count_coarse(self) + cdef void _append(self, BoolArrayCollection solf) + cdef void _logicalor(self, BoolArrayCollection solf, BoolArrayCollection out) + cdef bint _intersects(self, BoolArrayCollection solf) + cdef void _logicalxor(self, BoolArrayCollection solf, BoolArrayCollection out) + cdef void _logicaland(self, BoolArrayCollection solf, BoolArrayCollection out) + cdef void _select_contaminated(self, BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2=*) + cdef void _select_uncontaminated(self, BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2=*) + cdef void _get_ghost_zones(self, int ngz, int order1, int order2, + bint periodicity[3], BoolArrayCollection out_ewah, + bint coarse_ghosts=*) + cdef bytes _dumps(self) + cdef bint _loads(self, bytes s) + cdef bint _check(self) + +cdef class BoolArrayCollectionUncompressed: + cdef int nele1 + cdef int nele2 + cdef ewah_map* ewah_coll + cdef bitarrtype* ewah_keys + cdef bitarrtype* ewah_refn + + cdef void _set(self, np.uint64_t i1, np.uint64_t i2=*) + cdef void _set_coarse(self, np.uint64_t i1) + cdef void _set_refined(self, np.uint64_t i1, np.uint64_t i2) + cdef void _set_coarse_array(self, np.uint8_t[:] arr) + cdef void _set_coarse_array_ptr(self, np.uint8_t *arr) + cdef void _set_refined_array(self, np.uint64_t mi1, np.uint8_t[:] arr) + cdef void _set_refined_array_ptr(self, np.uint64_t mi1, np.uint8_t *arr) + cdef void _set_map(self, np.uint64_t i1, np.uint64_t i2) + cdef void _set_refn(self, np.uint64_t i1) + cdef bint _get(self, np.uint64_t i1, np.uint64_t i2=*) + cdef bint _get_coarse(self, np.uint64_t i1) + cdef bint _isref(self, np.uint64_t i) + cdef np.uint64_t _count_total(self) + cdef np.uint64_t _count_refined(self) + cdef void _append(self, BoolArrayCollectionUncompressed solf) + cdef bint _intersects(self, BoolArrayCollectionUncompressed solf) + cdef void _compress(self, BoolArrayCollection solf) + +cdef class SparseUnorderedBitmaskSet: + cdef cset[np.uint64_t] entries + cdef void _set(self, np.uint64_t ind) + cdef void _fill(self, np.uint8_t[:] mask) + cdef void _fill_ewah(self, BoolArrayCollection mm) + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm) + cdef void _reset(self) + cdef to_array(self) + +cdef class SparseUnorderedBitmaskVector: + cdef int total + cdef vector[np.uint64_t] entries + cdef void _set(self, np.uint64_t ind) + cdef void _fill(self, np.uint8_t[:] mask) + cdef void _fill_ewah(self, BoolArrayCollection mm) + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm) + cdef void _reset(self) + cdef to_array(self) + cdef void _remove_duplicates(self) + cdef void _prune(self) + +cdef class SparseUnorderedRefinedBitmaskSet: + cdef cset[ind_pair] entries + cdef void _set(self, np.uint64_t ind1, np.uint64_t ind2) + cdef void _fill(self, np.uint8_t[:] mask1, np.uint8_t[:]) + cdef void _fill_ewah(self, BoolArrayCollection mm) + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm) + cdef void _reset(self) + cdef to_array(self) + +cdef class SparseUnorderedRefinedBitmaskVector: + cdef int total + cdef vector[ind_pair] entries + cdef void _set(self, np.uint64_t ind1, np.uint64_t ind2) + cdef void _fill(self, np.uint8_t[:] mask1, np.uint8_t[:]) + cdef void _fill_ewah(self, BoolArrayCollection mm) + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm) + cdef void _reset(self) + cdef to_array(self) + cdef void _remove_duplicates(self) + cdef void _prune(self) + diff --git a/yt/utilities/lib/ewah_bool_wrap.pyx b/yt/utilities/lib/ewah_bool_wrap.pyx new file mode 100644 index 00000000000..05cbe86ce4b --- /dev/null +++ b/yt/utilities/lib/ewah_bool_wrap.pyx @@ -0,0 +1,1740 @@ +""" +Wrapper for EWAH Bool Array: https://github.com/lemire/EWAHBoolArray + + + +""" + + +import struct +from libcpp.map cimport map as cmap +from libcpp.map cimport map +from libcpp.algorithm cimport sort +from libc.stdlib cimport malloc, free, qsort +from cython.operator cimport dereference, preincrement +import numpy as np +cimport numpy as np +cimport cython +from yt.utilities.lib.geometry_utils cimport \ + morton_neighbors_coarse, morton_neighbors_refined + +cdef extern from "" namespace "std" nogil: + Iter unique[Iter](Iter first, Iter last) + +cdef np.uint64_t FLAG = ~(0) +cdef np.uint64_t MAX_VECTOR_SIZE = 1e7 + +ctypedef cmap[np.uint64_t, ewah_bool_array] ewahmap +ctypedef cmap[np.uint64_t, ewah_bool_array].iterator ewahmap_it +ctypedef pair[np.uint64_t, ewah_bool_array] ewahmap_p + +cdef class FileBitmasks: + + def __cinit__(self, np.uint32_t nfiles): + cdef int i + self.nfiles = nfiles + self.ewah_keys = malloc(nfiles*sizeof(ewah_bool_array*)) + self.ewah_refn = malloc(nfiles*sizeof(ewah_bool_array*)) + self.ewah_coll = malloc(nfiles*sizeof(ewah_map*)) + for i in range(nfiles): + self.ewah_keys[i] = new ewah_bool_array() + self.ewah_refn[i] = new ewah_bool_array() + self.ewah_coll[i] = new ewah_map() + + cdef void _reset(self): + cdef np.int32_t ifile + for ifile in range(self.nfiles): + self.ewah_keys[ifile].reset() + self.ewah_refn[ifile].reset() + self.ewah_coll[ifile].clear() + + cdef bint _iseq(self, FileBitmasks solf): + cdef np.int32_t ifile + cdef ewah_bool_array* arr1 + cdef ewah_bool_array* arr2 + cdef ewahmap *map1 + cdef ewahmap *map2 + cdef ewahmap_p pair1, pair2 + cdef ewahmap_it it_map1, it_map2 + if self.nfiles != solf.nfiles: + return 0 + for ifile in range(self.nfiles): + # Keys + arr1 = ( self.ewah_keys)[ifile] + arr2 = ( solf.ewah_keys)[ifile] + if arr1[0] != arr2[0]: + return 0 + # Refn + arr1 = ( self.ewah_refn)[ifile] + arr2 = ( solf.ewah_refn)[ifile] + if arr1[0] != arr2[0]: + return 0 + # Map + map1 = ( self.ewah_coll)[ifile] + map2 = ( solf.ewah_coll)[ifile] + for pair1 in map1[0]: + it_map2 = map2[0].find(pair1.first) + if it_map2 == map2[0].end(): + return 0 + if pair1.second != dereference(it_map2).second: + return 0 + for pair2 in map2[0]: + it_map1 = map1[0].find(pair2.first) + if it_map1 == map1[0].end(): + return 0 + if pair2.second != dereference(it_map1).second: + return 0 + # Match + return 1 + + def iseq(self, solf): + return self._iseq(solf) + + cdef BoolArrayCollection _get_bitmask(self, np.uint32_t ifile): + cdef BoolArrayCollection out = BoolArrayCollection() + cdef ewah_bool_array **ewah_keys = self.ewah_keys + cdef ewah_bool_array **ewah_refn = self.ewah_refn + cdef ewah_map **ewah_coll = self.ewah_coll + # This version actually copies arrays, which can be costly + cdef ewah_bool_array *ewah_keys_out = out.ewah_keys + cdef ewah_bool_array *ewah_refn_out = out.ewah_refn + cdef ewah_map *ewah_coll_out = out.ewah_coll + ewah_keys_out[0] = ewah_keys[ifile][0] + ewah_refn_out[0] = ewah_refn[ifile][0] + ewah_coll_out[0] = ewah_coll[ifile][0] + # This version only copies pointers which can lead to deallocation of + # the source when the copy is deleted. + # out.ewah_keys = ewah_keys[ifile] + # out.ewah_refn = ewah_refn[ifile] + # out.ewah_coll = ewah_coll[ifile] + return out + + cdef tuple _find_collisions(self, BoolArrayCollection coll, bint verbose = 0): + cdef tuple cc, cr + cc = self._find_collisions_coarse(coll, verbose) + cr = self._find_collisions_refined(coll, verbose) + return cc, cr + + cdef tuple _find_collisions_coarse(self, BoolArrayCollection coll, bint + verbose = 0, file_list = None): + cdef np.int32_t ifile + cdef ewah_bool_array arr_two, arr_swap, arr_keys, arr_refn + cdef ewah_bool_array* iarr + cdef ewah_bool_array* coll_keys + cdef ewah_bool_array* coll_refn + coll_keys = ( coll.ewah_keys) + coll_refn = ( coll.ewah_refn) + if file_list is None: + file_list = range(self.nfiles) + for ifile in file_list: + iarr = (self.ewah_keys)[ifile] + arr_keys.logicaland(iarr[0], arr_two) + arr_keys.logicalor(iarr[0], arr_swap) + arr_keys.swap(arr_swap) + arr_refn.logicalor(arr_two, arr_swap) + arr_refn.swap(arr_swap) + coll_keys[0].swap(arr_keys) + coll_refn[0].swap(arr_refn) + # Print + cdef int nc, nm + nc = coll_refn[0].numberOfOnes() + nm = coll_keys[0].numberOfOnes() + cdef tuple nout = (nc, nm) + if verbose == 1: + print("{: 10d}/{: 10d} collisions at coarse refinement. ({: 10.5f}%)".format(nc,nm,100.0*float(nc)/nm)) + return nout + + cdef tuple _find_collisions_refined(self, BoolArrayCollection coll, bint verbose = 0): + cdef np.int32_t ifile + cdef ewah_bool_array iarr, arr_two, arr_swap + cdef ewah_bool_array* coll_refn + cdef map[np.uint64_t, ewah_bool_array] map_keys, map_refn + cdef map[np.uint64_t, ewah_bool_array]* coll_coll + cdef map[np.uint64_t, ewah_bool_array]* map_bitmask + coll_refn = coll.ewah_refn + if coll_refn[0].numberOfOnes() == 0: + if verbose == 1: + print("{: 10d}/{: 10d} collisions at refined refinement. ({: 10.5f}%)".format(0,0,0)) + return (0,0) + coll_coll = coll.ewah_coll + for ifile in range(self.nfiles): + map_bitmask = ( self.ewah_coll)[ifile] + for it_mi1 in map_bitmask[0]: + mi1 = it_mi1.first + iarr = it_mi1.second + map_keys[mi1].logicaland(iarr, arr_two) + map_keys[mi1].logicalor(iarr, arr_swap) + map_keys[mi1].swap(arr_swap) + map_refn[mi1].logicalor(arr_two, arr_swap) + map_refn[mi1].swap(arr_swap) + coll_coll[0] = map_refn + # Count + cdef int nc, nm + nc = 0 + nm = 0 + for it_mi1 in map_refn: + mi1 = it_mi1.first + iarr = it_mi1.second + nc += iarr.numberOfOnes() + iarr = map_keys[mi1] + nm += iarr.numberOfOnes() + cdef tuple nout = (nc, nm) + # Print + if verbose == 1: + if nm == 0: + print("{: 10d}/{: 10d} collisions at refined refinement. ({: 10.5f}%)".format(nc,nm,0.0)) + else: + print("{: 10d}/{: 10d} collisions at refined refinement. ({: 10.5f}%)".format(nc,nm,100.0*float(nc)/nm)) + return nout + + cdef void _set(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + ewah_keys[0].set(i1) + if i2 != FLAG: + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + + cdef void _set_coarse(self, np.uint32_t ifile, np.uint64_t i1): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + ewah_keys[0].set(i1) + + cdef void _set_refined(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2): + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_coarse_array(self, np.uint32_t ifile, np.uint8_t[:] arr): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef np.uint64_t i1 + for i1 in range(arr.shape[0]): + if arr[i1] == 1: + ewah_keys[0].set(i1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_refined_array(self, np.uint32_t ifile, np.uint64_t i1, np.uint8_t[:] arr): + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + cdef np.uint64_t i2 + for i2 in range(arr.shape[0]): + if arr[i2] == 1: + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_refined_index_array(self, np.uint32_t ifile, np.int64_t nsub_mi, + np.ndarray[np.uint64_t, ndim=1] sub_mi1, + np.ndarray[np.uint64_t, ndim=1] sub_mi2): + cdef np.ndarray[np.int64_t, ndim=1] ind = np.lexsort((sub_mi2[:nsub_mi], + sub_mi1[:nsub_mi])) + cdef np.int64_t i, p + cdef BoolArrayCollection temp + if self._count_refined(ifile) == 0: + # Add to file bitmask in order + for i in range(nsub_mi): + p = ind[i] + self._set_refined(ifile, sub_mi1[p], sub_mi2[p]) + else: + # Add to dummy bitmask in order, then combine + temp = BoolArrayCollection() + for i in range(nsub_mi): + p = ind[i] + temp._set_coarse(sub_mi1[p]) + temp._set_refined(sub_mi1[p], sub_mi2[p]) + self._append(ifile, temp) + + cdef void _set_map(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2): + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + ewah_coll[0][i1].set(i2) + + cdef void _set_refn(self, np.uint32_t ifile, np.uint64_t i1): + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + ewah_refn[0].set(i1) + + cdef bint _get(self, np.uint32_t ifile, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + if (ewah_keys[0].get(i1) == 0): return 0 + if (i2 == FLAG) or (ewah_refn[0].get(i1) == 0): + return 1 + return ewah_coll[0][i1].get(i2) + + cdef bint _get_coarse(self, np.uint32_t ifile, np.uint64_t i1): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + return ewah_keys[0].get(i1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _get_coarse_array(self, np.uint32_t ifile, np.uint64_t imax, + np.uint8_t[:] arr) except *: + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_keys[0].begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_keys[0].end()) + cdef np.uint64_t iset + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + if iset >= imax: + raise IndexError("Index {} exceedes max {}.".format(iset, imax)) + arr[iset] = 1 + preincrement(iter_set[0]) + + cdef bint _isref(self, np.uint32_t ifile, np.uint64_t i): + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + return ewah_refn[0].get(i) + + def count_coarse(self, ifile): + return self._count_coarse(ifile) + + def count_total(self, ifile): + return self._count_total(ifile) + + def count_refined(self, ifile): + return self._count_refined(ifile) + + cdef np.uint64_t _count_coarse(self, np.uint32_t ifile): + return self._count_total(ifile) - self._count_refined(ifile) + + cdef np.uint64_t _count_total(self, np.uint32_t ifile): + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef np.uint64_t out = ewah_keys[0].numberOfOnes() + return out + + cdef np.uint64_t _count_refined(self, np.uint32_t ifile): + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef np.uint64_t out = ewah_refn[0].numberOfOnes() + return out + + def append(self, np.uint32_t ifile, BoolArrayCollection solf): + if solf is None: return + self._append(ifile, solf) + + cdef void _append(self, np.uint32_t ifile, BoolArrayCollection solf): + cdef ewah_bool_array *ewah_keys1 = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn1 = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll1 = ( self.ewah_coll)[ifile] + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array swap, mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicalor(ewah_keys2[0], swap) + ewah_keys1[0].swap(swap) + # Refined + ewah_refn1[0].logicalor(ewah_refn2[0], swap) + ewah_refn1[0].swap(swap) + # Map + it_map2 = ewah_coll2[0].begin() + while it_map2 != ewah_coll2[0].end(): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 == ewah_coll1[0].end(): + ewah_coll1[0][mi1] = mi1_ewah2 + else: + mi1_ewah1 = dereference(it_map1).second + mi1_ewah1.logicalor(mi1_ewah2, swap) + mi1_ewah1.swap(swap) + preincrement(it_map2) + + cdef bint _intersects(self, np.uint32_t ifile, BoolArrayCollection solf): + cdef ewah_bool_array *ewah_keys1 = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn1 = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll1 = ( self.ewah_coll)[ifile] + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + cdef ewah_bool_array ewah_coar1, ewah_coar2 + # No intersection + if ewah_keys1[0].intersects(ewah_keys2[0]) == 0: + return 0 + # Intersection at coarse level + ewah_keys1[0].logicalxor(ewah_refn1[0],ewah_coar1) + ewah_keys2[0].logicalxor(ewah_refn2[0],ewah_coar2) + if ewah_coar1.intersects(ewah_keys2[0]) == 1: + return 1 + if ewah_coar2.intersects(ewah_keys1[0]) == 1: + return 1 + # Intersection at refined level + if ewah_refn1[0].intersects(ewah_refn2[0]) == 1: + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 != ewah_coll2[0].end(): + mi1_ewah1 = dereference(it_map1).second + mi1_ewah2 = dereference(it_map2).second + if mi1_ewah1.intersects(mi1_ewah2): + return 1 + preincrement(it_map1) + return 0 + + cdef void _logicalxor(self, np.uint32_t ifile, BoolArrayCollection solf, BoolArrayCollection out): + cdef ewah_bool_array *ewah_keys1 = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn1 = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll1 = ( self.ewah_coll)[ifile] + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewah_bool_array *ewah_keys_out = out.ewah_keys + cdef ewah_bool_array *ewah_refn_out = out.ewah_refn + cdef ewah_map *ewah_coll_out = out.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2, swap + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicalxor(ewah_keys2[0],ewah_keys_out[0]) + # Refn + ewah_refn1[0].logicalxor(ewah_refn2[0],ewah_refn_out[0]) + # Coll + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + mi1_ewah1 = dereference(it_map1).second + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 == ewah_coll2[0].end(): + ewah_coll_out[0][mi1] = mi1_ewah1 + else: + mi1_ewah2 = dereference(it_map2).second + mi1_ewah1.logicalxor(mi1_ewah2, swap) + ewah_coll_out[0][mi1] = swap + preincrement(it_map1) + it_map2 = ewah_coll2[0].begin() + while (it_map2 != ewah_coll2[0].end()): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 == ewah_coll1[0].end(): + ewah_coll_out[0][mi1] = mi1_ewah2 + preincrement(it_map2) + + def logicalxor(self, ifile, solf, out): + return self._logicalxor(ifile, solf, out) + + cdef void _logicaland(self, np.uint32_t ifile, BoolArrayCollection solf, BoolArrayCollection out): + cdef ewah_bool_array *ewah_keys1 = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn1 = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll1 = ( self.ewah_coll)[ifile] + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewah_bool_array *ewah_keys_out = out.ewah_keys + cdef ewah_bool_array *ewah_refn_out = out.ewah_refn + cdef ewah_map *ewah_coll_out = out.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2, swap + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicaland(ewah_keys2[0],ewah_keys_out[0]) + # Refn + ewah_refn1[0].logicaland(ewah_refn2[0],ewah_refn_out[0]) + # Coll + if ewah_refn_out[0].numberOfOnes() > 0: + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 != ewah_coll2[0].end(): + mi1_ewah1 = dereference(it_map1).second + mi1_ewah2 = dereference(it_map2).second + mi1_ewah1.logicaland(mi1_ewah2, swap) + ewah_coll_out[0][mi1] = swap + preincrement(it_map1) + + def logicaland(self, ifile, solf, out): + return self._logicaland(ifile, solf, out) + + cdef void _select_contaminated(self, np.uint32_t ifile, + BoolArrayCollection mask, np.uint8_t[:] out, + np.uint8_t[:] secondary_files, + BoolArrayCollection mask2 = None): + # Fill mask at indices owned by this file that are also contaminated by + # other files. + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_bool_array ewah_mask + cdef ewah_bool_array *ewah_mask1 + cdef ewah_bool_array *ewah_mask2 + cdef ewah_bool_array ewah_slct + cdef ewah_bool_array *ewah_file + cdef np.uint64_t iset + # Merge masks as necessary + if mask2 is None: + ewah_mask = ( mask.ewah_keys)[0] + else: + ewah_mask1 = mask.ewah_keys + ewah_mask2 = mask2.ewah_keys + ewah_mask1[0].logicalor(ewah_mask2[0],ewah_mask) + # Get just refined cells owned by this file + ewah_mask.logicaland(ewah_refn[0], ewah_slct) + # Set array values + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct.begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct.end()) + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + out[iset] = 1 + preincrement(iter_set[0]) + # Find files that intersect this one + cdef np.uint32_t isfile + for isfile in range(self.nfiles): + if isfile == ifile: continue + ewah_file = ( self.ewah_keys)[isfile] + if ewah_slct.intersects(ewah_file[0]) == 1: + secondary_files[isfile] = 1 + + cdef void _select_uncontaminated(self, np.uint32_t ifile, + BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2 = None): + # Fill mask at indices that are owned by this file and no other. + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_bool_array ewah_mask + cdef ewah_bool_array *ewah_mask1 + cdef ewah_bool_array *ewah_mask2 + cdef ewah_bool_array ewah_slct + cdef ewah_bool_array ewah_coar + cdef np.uint64_t iset + # Merge masks if necessary + if mask2 is None: + ewah_mask = ( mask.ewah_keys)[0] + else: + ewah_mask1 = mask.ewah_keys + ewah_mask2 = mask2.ewah_keys + ewah_mask1[0].logicalor(ewah_mask2[0],ewah_mask) + # Get coarse cells owned by this file + ewah_keys[0].logicalxor(ewah_refn[0],ewah_coar) + ewah_coar.logicaland(ewah_mask,ewah_slct) + # Set array elements + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct.begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct.end()) + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + out[iset] = 1 + preincrement(iter_set[0]) + + cdef bytes _dumps(self, np.uint32_t ifile): + # TODO: write word size + cdef sstream ss + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + cdef ewahmap_it it_map + cdef np.uint64_t nrefn, mi1 + cdef ewah_bool_array mi1_ewah + # Write mi1 ewah & refinment ewah + ewah_keys[0].write(ss,1) + ewah_refn[0].write(ss,1) + # Number of refined bool arrays + nrefn = (ewah_refn[0].numberOfOnes()) + ss.write( &nrefn, sizeof(nrefn)) + # Loop over refined bool arrays + it_map = ewah_coll[0].begin() + while it_map != ewah_coll[0].end(): + mi1 = dereference(it_map).first + mi1_ewah = dereference(it_map).second + ss.write( &mi1, sizeof(mi1)) + mi1_ewah.write(ss,1) + preincrement(it_map) + # Return type cast python bytes string + return ss.str() + + cdef bint _loads(self, np.uint32_t ifile, bytes s): + # TODO: write word size + cdef sstream ss + cdef ewah_bool_array *ewah_keys = ( self.ewah_keys)[ifile] + cdef ewah_bool_array *ewah_refn = ( self.ewah_refn)[ifile] + cdef ewah_map *ewah_coll = ( self.ewah_coll)[ifile] + cdef np.uint64_t nrefn, mi1 + nrefn = mi1 = 0 + # Write string to string stream + ss.write(s, len(s)) + # Read keys and refinment arrays + ewah_keys[0].read(ss,1) + ewah_refn[0].read(ss,1) + # Read and check number of refined cells + ss.read( (&nrefn), sizeof(nrefn)) + if nrefn != ewah_refn[0].numberOfOnes(): + raise Exception("Error in read. File indicates {} refinements, but bool array has {}.".format(nrefn,ewah_refn[0].numberOfOnes())) + # Loop over refined cells + for _ in range(nrefn): + ss.read( (&mi1), sizeof(mi1)) + ewah_coll[0][mi1].read(ss,1) + # or... + #mi1_ewah.read(ss,1) + #ewah_coll[0][mi1].swap(mi1_ewah) + return 1 + + cdef bint _check(self): + cdef np.uint32_t ifile + cdef ewah_bool_array *ewah_keys + cdef ewah_bool_array *ewah_refn + cdef ewah_bool_array tmp1, tmp2 + cdef np.uint64_t nchk + cdef str msg + # Check individual files + for ifile in range(self.nfiles): + ewah_keys = ( self.ewah_keys)[ifile] + ewah_refn = ( self.ewah_refn)[ifile] + # Check that there are not any refn that are not keys + ewah_keys[0].logicalxor(ewah_refn[0], tmp1) + ewah_refn[0].logicaland(tmp1, tmp2) + nchk = tmp2.numberOfOnes() + if nchk > 0: + msg = "File {}: There are {} refined cells that are not set on coarse level.".format(ifile,nchk) + print(msg) + return 0 + # raise Exception(msg) + return 1 + + def check(self): + return self._check() + + def __dealloc__(self): + for ifile in range(self.nfiles): + del self.ewah_keys[ifile] + del self.ewah_refn[ifile] + del self.ewah_coll[ifile] + + def print_info(self, ifile, prefix=''): + print("{}{: 8d} coarse, {: 8d} refined, {: 8d} total".format( + prefix, + self._count_coarse(ifile), + self._count_refined(ifile), + self._count_total(ifile))) + +cdef class BoolArrayCollection: + + def __cinit__(self): + self.ewah_keys = new ewah_bool_array() + self.ewah_refn = new ewah_bool_array() + self.ewah_coar = new ewah_bool_array() + self.ewah_coll = new ewah_map() + + cdef void _reset(self): + self.ewah_keys[0].reset() + self.ewah_refn[0].reset() + self.ewah_coar[0].reset() + self.ewah_coll[0].clear() + + cdef int _richcmp(self, BoolArrayCollection solf, int op) except -1: + + cdef ewah_bool_array *arr1 + cdef ewah_bool_array *arr2 + cdef ewahmap *map1 + cdef ewahmap *map2 + cdef ewahmap_it it_map1, it_map2 + # == + if op == 2: + # Keys + arr1 = self.ewah_keys + arr2 = solf.ewah_keys + if arr1[0] != arr2[0]: + return 0 + # Refn + arr1 = self.ewah_refn + arr2 = solf.ewah_refn + if arr1[0] != arr2[0]: + return 0 + # Map + map1 = self.ewah_coll + map2 = solf.ewah_coll + it_map1 = map1[0].begin() + while (it_map1 != map1[0].end()): + it_map2 = map2[0].find(dereference(it_map1).first) + if it_map2 == map2[0].end(): + return 0 + if dereference(it_map1).second != dereference(it_map2).second: + return 0 + preincrement(it_map1) + it_map2 =map2[0].begin() + while (it_map2 != map2[0].end()): + it_map1 = map1[0].find(dereference(it_map2).first) + if it_map1 == map1[0].end(): + return 0 + if dereference(it_map2).second != dereference(it_map1).second: + return 0 + preincrement(it_map2) + # Match + return 1 + # != + elif op == 3: + if self._richcmp(solf, 2) == 1: + return 0 + return 1 + else: + return -1 + # options = ['<','<=','==','!=','>','>='] + # raise NotImplementedError("Operator {} is not yet implemented.".format(options[op])) + + def __richcmp__(BoolArrayCollection self, BoolArrayCollection solf, int op): + if self._richcmp(solf, op) == 1: + return True + else: + return False + + cdef void _set(self, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + ewah_keys[0].set(i1) + # Note the 0 here, for dereferencing + if i2 != FLAG: + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + + def set(self, i1, i2 = FLAG): + self._set(i1, i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + def set_from(self, np.uint64_t[:] ids): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef np.uint64_t i + cdef np.uint64_t last = 0 + for i in range(ids.shape[0]): + if ids[i] < last: + raise RuntimeError + self._set(ids[i]) + last = ids[i] + print("Set from %s array and ended up with %s bytes" % ( + ids.size, ewah_keys[0].sizeInBytes())) + + cdef void _set_coarse(self, np.uint64_t i1): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + ewah_keys[0].set(i1) + + def set_coarse(self, i1): + return self._set_coarse(i1) + + cdef void _set_refined(self, np.uint64_t i1, np.uint64_t i2): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + # Note the 0 here, for dereferencing + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_coarse_array(self, np.uint8_t[:] arr): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef np.uint64_t i1 + for i1 in range(arr.shape[0]): + if arr[i1] == 1: + ewah_keys[0].set(i1) + # self._set_coarse(i1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_refined_array(self, np.uint64_t i1, np.uint8_t[:] arr): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + cdef np.uint64_t i2 + for i2 in range(arr.shape[0]): + if arr[i2] == 1: + ewah_refn[0].set(i1) + ewah_coll[0][i1].set(i2) + # self._set_refined(i1, i2) + + def set_refined(self, i1, i2): + return self._set_refined(i1, i2) + + cdef void _set_map(self, np.uint64_t i1, np.uint64_t i2): + cdef ewah_map *ewah_coll = self.ewah_coll + ewah_coll[0][i1].set(i2) + + def set_map(self, i1, i2): + self._set_map(i1, i2) + + cdef void _set_refn(self, np.uint64_t i1): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + ewah_refn[0].set(i1) + + def set_refn(self, i1): + self._set_refn(i1) + + cdef bint _get(self, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + # Note the 0 here, for dereferencing + if (ewah_keys[0].get(i1) == 0): return 0 + if (ewah_refn[0].get(i1) == 0) or (i2 == FLAG): + return 1 + return ewah_coll[0][i1].get(i2) + + def get(self, i1, i2 = FLAG): + return self._get(i1, i2) + + cdef bint _get_coarse(self, np.uint64_t i1): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + return ewah_keys[0].get(i1) + + def get_coarse(self, i1): + return self._get_coarse(i1) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _get_coarse_array(self, np.uint64_t imax, np.uint8_t[:] arr) except *: + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_keys[0].begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_keys[0].end()) + cdef np.uint64_t iset + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + if iset >= imax: + raise IndexError("Index {} exceedes max {}.".format(iset, imax)) + arr[iset] = 1 + preincrement(iter_set[0]) + + def get_coarse_array(self, imax, arr): + return self._get_coarse_array(imax, arr) + + cdef bint _contains(self, np.uint64_t i): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + return ewah_keys[0].get(i) + + def contains(self, np.uint64_t i): + return self._contains(i) + + cdef bint _isref(self, np.uint64_t i): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + return ewah_refn[0].get(i) + + def isref(self, np.uint64_t i): + return self._isref(i) + + cdef void _ewah_coarse(self): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_bool_array *ewah_coar = self.ewah_coar + ewah_coar[0].reset() + ewah_keys[0].logicalxor(ewah_refn[0],ewah_coar[0]) + return + + def ewah_coarse(self): + return self._ewah_coarse() + + cdef np.uint64_t _count_total(self): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef np.uint64_t out = ewah_keys.numberOfOnes() + return out + + def count_total(self): + return self._count_total() + + cdef np.uint64_t _count_refined(self): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef np.uint64_t out = ewah_refn.numberOfOnes() + return out + + def count_refined(self): + return self._count_refined() + + cdef np.uint64_t _count_coarse(self): + self._ewah_coarse() + cdef ewah_bool_array *ewah_coar = self.ewah_coar + cdef np.uint64_t out = ewah_coar.numberOfOnes() + return out + + def count_coarse(self): + return self._count_coarse() + + cdef void _logicalor(self, BoolArrayCollection solf, BoolArrayCollection out): + cdef ewah_bool_array *ewah_keys1 = self.ewah_keys + cdef ewah_bool_array *ewah_refn1 = self.ewah_refn + cdef ewahmap *ewah_coll1 = self.ewah_coll + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewah_bool_array *ewah_keys3 = out.ewah_keys + cdef ewah_bool_array *ewah_refn3 = out.ewah_refn + cdef ewahmap *ewah_coll3 = out.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicalor(ewah_keys2[0], ewah_keys3[0]) + # Refined + ewah_refn1[0].logicalor(ewah_refn2[0], ewah_refn3[0]) + # Map + it_map1 = ewah_coll1[0].begin() + while it_map1 != ewah_coll1[0].end(): + mi1 = dereference(it_map1).first + mi1_ewah1 = dereference(it_map1).second + ewah_coll3[0][mi1] = mi1_ewah1 + preincrement(it_map1) + it_map2 = ewah_coll2[0].begin() + while it_map2 != ewah_coll2[0].end(): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 != ewah_coll1[0].end(): + mi1_ewah1 = dereference(it_map1).second + mi1_ewah1.logicalor(mi1_ewah2, ewah_coll3[0][mi1]) + else: + ewah_coll3[0][mi1] = mi1_ewah2 + preincrement(it_map2) + + cdef void _append(self, BoolArrayCollection solf): + cdef ewah_bool_array *ewah_keys1 = self.ewah_keys + cdef ewah_bool_array *ewah_refn1 = self.ewah_refn + cdef ewahmap *ewah_coll1 = self.ewah_coll + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array swap, mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicalor(ewah_keys2[0], swap) + ewah_keys1[0].swap(swap) + # Refined + ewah_refn1[0].logicalor(ewah_refn2[0], swap) + ewah_refn1[0].swap(swap) + # Map + it_map2 = ewah_coll2[0].begin() + while it_map2 != ewah_coll2[0].end(): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 == ewah_coll1[0].end(): + ewah_coll1[0][mi1] = mi1_ewah2 + else: + mi1_ewah1 = dereference(it_map1).second + mi1_ewah1.logicalor(mi1_ewah2, swap) + mi1_ewah1.swap(swap) + preincrement(it_map2) + + def append(self, solf): + return self._append(solf) + + cdef bint _intersects(self, BoolArrayCollection solf): + cdef ewah_bool_array *ewah_keys1 = self.ewah_keys + cdef ewah_bool_array *ewah_refn1 = self.ewah_refn + cdef ewahmap *ewah_coll1 = self.ewah_coll + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + cdef ewah_bool_array ewah_coar1, ewah_coar2 + # No intersection + if ewah_keys1[0].intersects(ewah_keys2[0]) == 0: + return 0 + # Intersection at coarse level + ewah_keys1[0].logicalxor(ewah_refn1[0],ewah_coar1) + ewah_keys2[0].logicalxor(ewah_refn2[0],ewah_coar2) + if ewah_coar1.intersects(ewah_keys2[0]) == 1: + return 1 + if ewah_coar2.intersects(ewah_keys1[0]) == 1: + return 1 + # Intersection at refined level + if ewah_refn1[0].intersects(ewah_refn2[0]) == 1: + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 != ewah_coll2[0].end(): + mi1_ewah1 = dereference(it_map1).second + mi1_ewah2 = dereference(it_map2).second + if mi1_ewah1.intersects(mi1_ewah2): + return 1 + preincrement(it_map1) + return 0 + + cdef void _logicalxor(self, BoolArrayCollection solf, BoolArrayCollection out): + cdef ewah_bool_array *ewah_keys1 = self.ewah_keys + cdef ewah_bool_array *ewah_refn1 = self.ewah_refn + cdef ewah_map *ewah_coll1 = self.ewah_coll + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewah_bool_array *ewah_keys_out = out.ewah_keys + cdef ewah_bool_array *ewah_refn_out = out.ewah_refn + cdef ewah_map *ewah_coll_out = out.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2, swap + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicalxor(ewah_keys2[0],ewah_keys_out[0]) + # Refn + ewah_refn1[0].logicalxor(ewah_refn2[0],ewah_refn_out[0]) + # Coll + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + mi1_ewah1 = dereference(it_map1).second + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 == ewah_coll2[0].end(): + ewah_coll_out[0][mi1] = mi1_ewah1 + else: + mi1_ewah2 = dereference(it_map2).second + mi1_ewah1.logicalxor(mi1_ewah2, swap) + ewah_coll_out[0][mi1] = swap + preincrement(it_map1) + it_map2 = ewah_coll2[0].begin() + while (it_map2 != ewah_coll2[0].end()): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 == ewah_coll1[0].end(): + ewah_coll_out[0][mi1] = mi1_ewah2 + preincrement(it_map2) + + def logicalxor(self, solf, out): + return self._logicalxor(solf, out) + + cdef void _logicaland(self, BoolArrayCollection solf, BoolArrayCollection out): + cdef ewah_bool_array *ewah_keys1 = self.ewah_keys + cdef ewah_bool_array *ewah_refn1 = self.ewah_refn + cdef ewah_map *ewah_coll1 = self.ewah_coll + cdef ewah_bool_array *ewah_keys2 = solf.ewah_keys + cdef ewah_bool_array *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewah_bool_array *ewah_keys_out = out.ewah_keys + cdef ewah_bool_array *ewah_refn_out = out.ewah_refn + cdef ewah_map *ewah_coll_out = out.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2, swap + cdef np.uint64_t mi1 + # Keys + ewah_keys1[0].logicaland(ewah_keys2[0],ewah_keys_out[0]) + # Refn + ewah_refn1[0].logicaland(ewah_refn2[0],ewah_refn_out[0]) + # Coll + if ewah_refn_out[0].numberOfOnes() > 0: + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + mi1_ewah1 = dereference(it_map1).second + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 != ewah_coll2[0].end(): + mi1_ewah2 = dereference(it_map2).second + mi1_ewah1.logicaland(mi1_ewah2, swap) + ewah_coll_out[0][mi1] = swap + preincrement(it_map1) + + def logicaland(self, solf, out): + return self._logicaland(solf, out) + + cdef void _select_contaminated(self, BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2 = None): + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_bool_array ewah_mask + cdef ewah_bool_array *ewah_mask1 + cdef ewah_bool_array *ewah_mask2 + if mask2 is None: + ewah_mask = ( mask.ewah_keys)[0] + else: + ewah_mask1 = mask.ewah_keys + ewah_mask2 = mask2.ewah_keys + ewah_mask1[0].logicalor(ewah_mask2[0],ewah_mask) + cdef ewah_bool_array ewah_slct + ewah_refn[0].logicaland(ewah_mask,ewah_slct) + cdef np.uint64_t iset + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct.begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct.end()) + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + out[iset] = 1 + preincrement(iter_set[0]) + + cdef void _select_uncontaminated(self, BoolArrayCollection mask, np.uint8_t[:] out, + BoolArrayCollection mask2 = None): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_bool_array ewah_mask + cdef ewah_bool_array *ewah_mask1 + cdef ewah_bool_array *ewah_mask2 + if mask2 is None: + ewah_mask = ( mask.ewah_keys)[0] + else: + ewah_mask1 = mask.ewah_keys + ewah_mask2 = mask2.ewah_keys + ewah_mask1[0].logicalor(ewah_mask2[0],ewah_mask) + cdef ewah_bool_array ewah_slct + cdef ewah_bool_array ewah_coar + ewah_keys[0].logicalxor(ewah_refn[0],ewah_coar) + ewah_coar.logicaland(ewah_mask,ewah_slct) + cdef np.uint64_t iset + cdef ewah_bool_iterator *iter_set = new ewah_bool_iterator(ewah_slct.begin()) + cdef ewah_bool_iterator *iter_end = new ewah_bool_iterator(ewah_slct.end()) + while iter_set[0] != iter_end[0]: + iset = dereference(iter_set[0]) + out[iset] = 1 + preincrement(iter_set[0]) + + cdef void _get_ghost_zones(self, int ngz, int order1, int order2, + bint periodicity[3], BoolArrayCollection out_ewah, + bint coarse_ghosts = 0): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewahmap *ewah_coll = self.ewah_coll + cdef ewah_bool_iterator *iter_set1 = new ewah_bool_iterator(ewah_keys.begin()) + cdef ewah_bool_iterator *iter_end1 = new ewah_bool_iterator(ewah_keys.end()) + cdef ewah_bool_iterator *iter_set2 + cdef ewah_bool_iterator *iter_end2 + cdef np.uint64_t max_index1 = (1 << order1) + cdef np.uint64_t max_index2 = (1 << order2) + cdef np.uint64_t nele1 = (max_index1**3) + cdef np.uint64_t nele2 = (max_index2**3) + cdef BoolArrayCollectionUncompressed temp_bool = BoolArrayCollectionUncompressed(nele1, nele2) + cdef BoolArrayCollectionUncompressed out_bool = BoolArrayCollectionUncompressed(nele1, nele2) + cdef np.uint64_t mi1, mi2, mi1_n, mi2_n + cdef np.uint32_t ntot, i + cdef void* pointers[7] + pointers[0] = malloc( sizeof(np.int32_t) * (2*ngz+1)*3) + pointers[1] = malloc( sizeof(np.uint64_t) * (2*ngz+1)*3) + pointers[2] = malloc( sizeof(np.uint64_t) * (2*ngz+1)*3) + pointers[3] = malloc( sizeof(np.uint64_t) * (2*ngz+1)**3) + pointers[4] = malloc( sizeof(np.uint64_t) * (2*ngz+1)**3) + pointers[5] = malloc( sizeof(np.uint8_t) * nele1) + pointers[6] = malloc( sizeof(np.uint8_t) * nele2) + cdef np.uint32_t[:,:] index = pointers[0] + cdef np.uint64_t[:,:] ind1_n = pointers[1] + cdef np.uint64_t[:,:] ind2_n = pointers[2] + cdef np.uint64_t[:] neighbor_list1 = pointers[3] + cdef np.uint64_t[:] neighbor_list2 = pointers[4] + cdef np.uint8_t *bool_keys = pointers[5] + cdef np.uint8_t *bool_coll = pointers[6] + cdef SparseUnorderedRefinedBitmaskSet list_coll = SparseUnorderedRefinedBitmaskSet() + for i in range(nele1): + bool_keys[i] = 0 + while iter_set1[0] != iter_end1[0]: + mi1 = dereference(iter_set1[0]) + if (coarse_ghosts == 1) or (ewah_refn[0].get(mi1) == 0): + # Coarse neighbors + ntot = morton_neighbors_coarse(mi1, max_index1, periodicity, ngz, + index, ind1_n, neighbor_list1) + for i in range(ntot): + mi1_n = neighbor_list1[i] + if ewah_keys[0].get(mi1_n) == 0: + bool_keys[mi1_n] = 1 + else: + for i in range(nele2): + bool_coll[i] = 0 + # Refined neighbors + iter_set2 = new ewah_bool_iterator(ewah_coll[0][mi1].begin()) + iter_end2 = new ewah_bool_iterator(ewah_coll[0][mi1].end()) + while iter_set2[0] != iter_end2[0]: + mi2 = dereference(iter_set2[0]) + ntot = morton_neighbors_refined(mi1, mi2, + max_index1, max_index2, + periodicity, ngz, index, + ind1_n, ind2_n, + neighbor_list1, + neighbor_list2) + for i in range(ntot): + mi1_n = neighbor_list1[i] + mi2_n = neighbor_list2[i] + if mi1_n == mi1: + if ewah_coll[0][mi1].get(mi2_n) == 0: + bool_keys[mi1_n] = 1 + bool_coll[mi2_n] = 1 + else: + if ewah_refn[0].get(mi1_n) == 1: + if ewah_coll[0][mi1_n].get(mi2_n) == 0: + bool_keys[mi1_n] = 1 + list_coll._set(mi1_n, mi2_n) + else: + if ewah_keys[0].get(mi1_n) == 0: + bool_keys[mi1_n] = 1 + preincrement(iter_set2[0]) + # Add to running list + temp_bool._set_refined_array_ptr(mi1, bool_coll) + preincrement(iter_set1[0]) + # Set keys + out_bool._set_coarse_array_ptr(bool_keys) + list_coll._fill_bool(out_bool) + out_bool._append(temp_bool) + out_bool._compress(out_ewah) + # Free things + for i in range(7): + free(pointers[i]) + + cdef bytes _dumps(self): + # TODO: write word size + cdef sstream ss + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewahmap *ewah_coll = self.ewah_coll + cdef ewahmap_it it_map + cdef np.uint64_t nrefn, mi1 + cdef ewah_bool_array mi1_ewah + # Write mi1 ewah & refinment ewah + ewah_keys[0].write(ss,1) + ewah_refn[0].write(ss,1) + # Number of refined bool arrays + nrefn = (ewah_refn[0].numberOfOnes()) + ss.write( &nrefn, sizeof(nrefn)) + # Loop over refined bool arrays + it_map = ewah_coll[0].begin() + while it_map != ewah_coll[0].end(): + mi1 = dereference(it_map).first + mi1_ewah = dereference(it_map).second + ss.write( &mi1, sizeof(mi1)) + mi1_ewah.write(ss,1) + preincrement(it_map) + # Return type cast python bytes string + return ss.str() + + def dumps(self): + return self._dumps() + + cdef bint _loads(self, bytes s): + # TODO: write word size + cdef sstream ss + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewahmap *ewah_coll = self.ewah_coll + cdef np.uint64_t nrefn, mi1 + nrefn = mi1 = 0 + # Write string to string stream + ss.write(s, len(s)) + # Read keys and refinment arrays + ewah_keys[0].read(ss,1) + ewah_refn[0].read(ss,1) + # Read and check number of refined cells + ss.read( (&nrefn), sizeof(nrefn)) + if nrefn != ewah_refn[0].numberOfOnes(): + raise Exception("Error in read. File indicates {} refinements, but bool array has {}.".format(nrefn,ewah_refn[0].numberOfOnes())) + # Loop over refined cells + for _ in range(nrefn): + ss.read( (&mi1), sizeof(mi1)) + ewah_coll[0][mi1].read(ss,1) + # or... + #mi1_ewah.read(ss,1) + #ewah_coll[0][mi1].swap(mi1_ewah) + return 1 + + def loads(self, s): + return self._loads(s) + + def save(self, fname): + cdef bytes serial_BAC + f = open(fname,'wb') + serial_BAC = self._dumps() + f.write(struct.pack('Q',len(serial_BAC))) + f.write(serial_BAC) + f.close() + + def load(self, fname): + cdef np.uint64_t size_serial + cdef bint flag_read + f = open(fname,'rb') + size_serial, = struct.unpack('Q',f.read(struct.calcsize('Q'))) + flag_read = self._loads(f.read(size_serial)) + f.close() + return flag_read + + cdef bint _check(self): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_bool_array tmp1, tmp2 + cdef np.uint64_t nchk + cdef str msg + # Check that there are not any refn that are not keys + ewah_keys[0].logicalxor(ewah_refn[0], tmp1) + ewah_refn[0].logicaland(tmp1, tmp2) + nchk = tmp2.numberOfOnes() + if nchk > 0: + msg = "There are {} refined cells that are not set on coarse level.".format(nchk) + print(msg) + return 0 + # raise Exception(msg) + return 1 + + def __dealloc__(self): + cdef ewah_bool_array *ewah_keys = self.ewah_keys + cdef ewah_bool_array *ewah_refn = self.ewah_refn + cdef ewah_bool_array *ewah_coar = self.ewah_coar + cdef ewah_map *ewah_coll = self.ewah_coll + del ewah_keys + del ewah_refn + del ewah_coar + del ewah_coll + + def print_info(self, prefix=''): + print("{}{: 8d} coarse, {: 8d} refined, {: 8d} total".format(prefix, + self._count_coarse(), + self._count_refined(), + self._count_total())) + +cdef class BoolArrayCollectionUncompressed: + + def __cinit__(self, np.uint64_t nele1, np.uint64_t nele2): + self.nele1 = nele1 + self.nele2 = nele2 + self.ewah_coll = new ewah_map() + cdef np.uint64_t i + self.ewah_keys = malloc(sizeof(bitarrtype)*nele1) + self.ewah_refn = malloc(sizeof(bitarrtype)*nele1) + for i in range(nele1): + self.ewah_keys[i] = 0 + self.ewah_refn[i] = 0 + + def reset(self): + self.__dealloc__() + self.__init__(self.nele1,self.nele2) + + cdef void _compress(self, BoolArrayCollection solf): + cdef np.uint64_t i + cdef ewah_bool_array *ewah_keys = solf.ewah_keys + cdef ewah_bool_array *ewah_refn = solf.ewah_refn + cdef bitarrtype *bool_keys = self.ewah_keys + cdef bitarrtype *bool_refn = self.ewah_refn + for i in range(self.nele1): + if bool_keys[i] == 1: + ewah_keys[0].set(i) + if bool_refn[i] == 1: + ewah_refn[0].set(i) + cdef ewah_map *ewah_coll1 = self.ewah_coll + cdef ewah_map *ewah_coll2 = solf.ewah_coll + ewah_coll2[0] = ewah_coll1[0] + + cdef void _set(self, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + ewah_keys[i1] = 1 + # Note the 0 here, for dereferencing + if i2 != FLAG: + ewah_refn[i1] = 1 + ewah_coll[0][i1].set(i2) + + cdef void _set_coarse(self, np.uint64_t i1): + cdef bitarrtype *ewah_keys = self.ewah_keys + ewah_keys[i1] = 1 + + cdef void _set_refined(self, np.uint64_t i1, np.uint64_t i2): + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + # Note the 0 here, for dereferencing + ewah_refn[i1] = 1 + ewah_coll[0][i1].set(i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_coarse_array(self, np.uint8_t[:] arr): + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef np.uint64_t i1 + for i1 in range(arr.shape[0]): + if arr[i1] == 1: + ewah_keys[i1] = 1 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_coarse_array_ptr(self, np.uint8_t *arr): + # TODO: memcpy? + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef np.uint64_t i1 + for i1 in range(self.nele1): + if arr[i1] == 1: + ewah_keys[i1] = 1 + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_refined_array(self, np.uint64_t i1, np.uint8_t[:] arr): + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + cdef np.uint64_t i2 + for i2 in range(arr.shape[0]): + if arr[i2] == 1: + ewah_refn[i1] = 1 + ewah_coll[0][i1].set(i2) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + @cython.initializedcheck(False) + cdef void _set_refined_array_ptr(self, np.uint64_t i1, np.uint8_t *arr): + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + cdef np.uint64_t i2 + cdef ewah_bool_array *barr = &ewah_coll[0][i1] + for i2 in range(self.nele2): + if arr[i2] == 1: + ewah_refn[i1] = 1 + barr.set(i2) + + cdef void _set_map(self, np.uint64_t i1, np.uint64_t i2): + cdef ewah_map *ewah_coll = self.ewah_coll + ewah_coll[0][i1].set(i2) + + cdef void _set_refn(self, np.uint64_t i1): + cdef bitarrtype *ewah_refn = self.ewah_refn + ewah_refn[i1] = 1 + + cdef bint _get(self, np.uint64_t i1, np.uint64_t i2 = FLAG): + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef ewah_map *ewah_coll = self.ewah_coll + # Note the 0 here, for dereferencing + if ewah_keys[i1] == 0: return 0 + if (ewah_refn[i1] == 0) or (i2 == FLAG): + return 1 + return ewah_coll[0][i1].get(i2) + + cdef bint _get_coarse(self, np.uint64_t i1): + cdef bitarrtype *ewah_keys = self.ewah_keys + return ewah_keys[i1] + # if (ewah_keys[i1] == 0): return 0 + # return 1 + + cdef bint _isref(self, np.uint64_t i): + cdef bitarrtype *ewah_refn = self.ewah_refn + return ewah_refn[i] + + cdef np.uint64_t _count_total(self): + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef np.uint64_t i + cdef np.uint64_t out = 0 + for i in range(self.nele1): + out += ewah_keys[i] + return out + + cdef np.uint64_t _count_refined(self): + cdef bitarrtype *ewah_refn = self.ewah_refn + cdef np.uint64_t i + cdef np.uint64_t out = 0 + for i in range(self.nele1): + out += ewah_refn[i] + return out + + cdef void _append(self, BoolArrayCollectionUncompressed solf): + cdef bitarrtype *ewah_keys1 = self.ewah_keys + cdef bitarrtype *ewah_refn1 = self.ewah_refn + cdef bitarrtype *ewah_keys2 = solf.ewah_keys + cdef bitarrtype *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll1 = self.ewah_coll + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array swap, mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + # TODO: Check if nele1 is equal? + # Keys + for mi1 in range(solf.nele1): + if ewah_keys2[mi1] == 1: + ewah_keys1[mi1] = 1 + # Refined + for mi1 in range(solf.nele1): + if ewah_refn2[mi1] == 1: + ewah_refn1[mi1] = 1 + # Map + it_map2 = ewah_coll2[0].begin() + while it_map2 != ewah_coll2[0].end(): + mi1 = dereference(it_map2).first + mi1_ewah2 = dereference(it_map2).second + it_map1 = ewah_coll1[0].find(mi1) + if it_map1 == ewah_coll1[0].end(): + ewah_coll1[0][mi1] = mi1_ewah2 + else: + mi1_ewah1 = dereference(it_map1).second + mi1_ewah1.logicalor(mi1_ewah2, swap) + mi1_ewah1.swap(swap) + preincrement(it_map2) + + cdef bint _intersects(self, BoolArrayCollectionUncompressed solf): + cdef bitarrtype *ewah_keys1 = self.ewah_keys + cdef bitarrtype *ewah_refn1 = self.ewah_refn + cdef bitarrtype *ewah_keys2 = solf.ewah_keys + cdef bitarrtype *ewah_refn2 = solf.ewah_refn + cdef ewahmap *ewah_coll1 = self.ewah_coll + cdef ewahmap *ewah_coll2 = solf.ewah_coll + cdef ewahmap_it it_map1, it_map2 + cdef ewah_bool_array mi1_ewah1, mi1_ewah2 + cdef np.uint64_t mi1 + # No intersection + for mi1 in range(self.nele1): + if (ewah_keys1[mi1] == 1) and (ewah_keys2[mi1] == 1): + break + if (mi1 < self.nele1): + return 0 + mi1 = self.nele1 # This is to get rid of a warning + # Intersection at refined level + for mi1 in range(self.nele1): + if (ewah_refn1[mi1] == 1) and (ewah_refn2[mi1] == 1): + it_map1 = ewah_coll1[0].begin() + while (it_map1 != ewah_coll1[0].end()): + mi1 = dereference(it_map1).first + it_map2 = ewah_coll2[0].find(mi1) + if it_map2 != ewah_coll2[0].end(): + mi1_ewah1 = dereference(it_map1).second + mi1_ewah2 = dereference(it_map2).second + if mi1_ewah1.intersects(mi1_ewah2): + return 1 + preincrement(it_map1) + break + # Intersection at coarse level or refined inside coarse + if mi1 == self.nele1: + return 1 + return 0 + + def __dealloc__(self): + cdef bitarrtype *ewah_keys = self.ewah_keys + cdef bitarrtype *ewah_refn = self.ewah_refn + free(ewah_keys) + free(ewah_refn) + cdef ewah_map *ewah_coll = self.ewah_coll + del ewah_coll + + def print_info(self, prefix=''): + cdef np.uint64_t nrefn = self._count_refined() + cdef np.uint64_t nkeys = self._count_total() + print("{}{: 8d} coarse, {: 8d} refined, {: 8d} total".format(prefix, + nkeys - nrefn, + nrefn, + nkeys)) + + + +# Vector version +cdef class SparseUnorderedBitmaskVector: + def __cinit__(self): + self.total = 0 + + cdef void _set(self, np.uint64_t ind): + self.entries.push_back(ind) + self.total += 1 + + def set(self, ind): + self._set(ind) + + cdef void _fill(self, np.uint8_t[:] mask): + cdef np.uint64_t i, ind + for i in range(self.entries.size()): + ind = self.entries[i] + mask[ind] = 1 + + cdef void _fill_ewah(self, BoolArrayCollection mm): + self._remove_duplicates() + cdef np.uint64_t i, ind + for i in range(self.entries.size()): + ind = self.entries[i] + mm._set_coarse(ind) + + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm): + self._remove_duplicates() + cdef np.uint64_t i, ind + for i in range(self.entries.size()): + ind = self.entries[i] + mm._set_coarse(ind) + + cdef void _reset(self): + self.entries.erase(self.entries.begin(), self.entries.end()) + self.total = 0 + + cdef to_array(self): + self._remove_duplicates() + cdef np.ndarray[np.uint64_t, ndim=1] rv + rv = np.empty(self.entries.size(), dtype='uint64') + for i in range(self.entries.size()): + rv[i] = self.entries[i] + return rv + + cdef void _remove_duplicates(self): + cdef vector[np.uint64_t].iterator last + sort(self.entries.begin(), self.entries.end()) + last = unique(self.entries.begin(), self.entries.end()) + self.entries.erase(last, self.entries.end()) + + cdef void _prune(self): + if self.total > MAX_VECTOR_SIZE: + self._remove_duplicates() + self.total = 0 + + def __dealloc__(self): + self.entries.clear() + +# Set version +cdef class SparseUnorderedBitmaskSet: + cdef void _set(self, np.uint64_t ind): + self.entries.insert(ind) + + def set(self, ind): + self._set(ind) + + cdef void _fill(self, np.uint8_t[:] mask): + for it in self.entries: + mask[it] = 1 + + cdef void _fill_ewah(self, BoolArrayCollection mm): + for it in self.entries: + mm._set_coarse(it) + + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm): + for it in self.entries: + mm._set_coarse(it) + + cdef void _reset(self): + self.entries.clear() + + cdef to_array(self): + cdef np.uint64_t ind + cdef np.ndarray[np.uint64_t, ndim=1] rv + cdef cset[np.uint64_t].iterator it + rv = np.empty(self.entries.size(), dtype='uint64') + it = self.entries.begin() + i = 0 + while it != self.entries.end(): + ind = dereference(it) + rv[i] = ind + preincrement(it) + i += 1 + return rv + + def __dealloc__(self): + self.entries.clear() + +# vector version +cdef class SparseUnorderedRefinedBitmaskVector: + def __cinit__(self): + self.total = 0 + + cdef void _set(self, np.uint64_t ind1, np.uint64_t ind2): + cdef ind_pair ind + ind.first = ind1 + ind.second = ind2 + self.entries.push_back(ind) + self.total += 1 + + def set(self, ind1, ind2): + self._set(ind1, ind2) + + cdef void _fill(self, np.uint8_t[:] mask1, np.uint8_t[:] mask2): + for it in self.entries: + mask1[it.first] = mask2[it.second] = 1 + + cdef void _fill_ewah(self, BoolArrayCollection mm): + self._remove_duplicates() + for it in self.entries: + mm._set_refined(it.first, it.second) + + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm): + self._remove_duplicates() + for it in self.entries: + mm._set_refined(it.first, it.second) + + cdef void _reset(self): + self.entries.erase(self.entries.begin(), self.entries.end()) + self.total = 0 + + cdef to_array(self): + cdef np.uint64_t i + cdef np.ndarray[np.uint64_t, ndim=2] rv + self._remove_duplicates() + rv = np.empty((self.entries.size(),2),dtype='uint64') + i = 0 + for it in self.entries: + rv[i,0] = it.first + rv[i,1] = it.second + i += 1 + return rv + + cdef void _remove_duplicates(self): + cdef vector[ind_pair].iterator last + sort(self.entries.begin(), self.entries.end()) + last = unique(self.entries.begin(), self.entries.end()) + self.entries.erase(last, self.entries.end()) + # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array + # cdef np.ndarray[np.uint64_t, ndim=2] rv + # cdef np.ndarray[np.uint64_t, ndim=2] rv_uni + # cdef np.uint64_t m + # cdef vector[np.uint64_t].iterator last1 + # cdef vector[np.uint64_t].iterator last2 + # # cdef np.ndarray[np.uint64_t, ndim=1] _ + # cdef vector[np.uint64_t] *entries1 = self.entries1 + # cdef vector[np.uint64_t] *entries2 = self.entries2 + # rv = np.empty((entries1[0].size(),2),dtype='uint64') + # for i in range(entries1[0].size()): + # rv[i,0] = entries1[0][i] + # rv[i,1] = entries2[0][i] + # rv_uni = np.unique(np.ascontiguousarray(rv).view(np.dtype((np.void, rv.dtype.itemsize * rv.shape[1])))).view(rv.dtype).reshape(-1,rv.shape[1]) + # last1 = entries1[0].begin() + rv_uni.shape[0] + # last2 = entries2[0].begin() + rv_uni.shape[0] + # for m in range(rv_uni.shape[0]): + # entries1[0][m] = rv_uni[m,0] + # entries2[0][m] = rv_uni[m,1] + # entries1[0].erase(last1, entries1[0].end()) + # entries2[0].erase(last2, entries2[0].end()) + + cdef void _prune(self): + if self.total > MAX_VECTOR_SIZE: + self._remove_duplicates() + self.total = 0 + + def __dealloc__(self): + self.entries.clear() + +# Set version +cdef class SparseUnorderedRefinedBitmaskSet: + cdef void _set(self, np.uint64_t ind1, np.uint64_t ind2): + cdef ind_pair ind + ind.first = ind1 + ind.second = ind2 + self.entries.insert(ind) + + def set(self, ind1, ind2): + self._set(ind1, ind2) + + cdef void _fill(self, np.uint8_t[:] mask1, np.uint8_t[:] mask2): + for p in self.entries: + mask1[p.first] = mask2[p.second] = 1 + + cdef void _fill_ewah(self, BoolArrayCollection mm): + for it in self.entries: + mm._set_refined(it.first, it.second) + + cdef void _fill_bool(self, BoolArrayCollectionUncompressed mm): + for it in self.entries: + mm._set_refined(it.first, it.second) + + cdef void _reset(self): + self.entries.clear() + + cdef to_array(self): + cdef np.uint64_t i + cdef np.ndarray[np.uint64_t, ndim=2] rv + rv = np.empty((self.entries.size(),2),dtype='uint64') + i = 0 + for it in self.entries: + rv[i,0] = it.first + rv[i,1] = it.second + i += 1 + return rv + + def __dealloc__(self): + self.entries.clear() diff --git a/yt/utilities/lib/ewahboolarray/LICENSE b/yt/utilities/lib/ewahboolarray/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/yt/utilities/lib/ewahboolarray/README b/yt/utilities/lib/ewahboolarray/README new file mode 100644 index 00000000000..b86d316c9ad --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/README @@ -0,0 +1,8 @@ +This code was bundled from the EWAHBoolArray project by Daniel Lemire, +available at: + +https://github.com/lemire/EWAHBoolArray + +Currently this is at revision 88b25a3345b82353ccd97a7de6064e6c179a7cc2 + +This code is available under the Apache2.0 license. diff --git a/yt/utilities/lib/ewahboolarray/boolarray.h b/yt/utilities/lib/ewahboolarray/boolarray.h new file mode 100644 index 00000000000..4a607adf7d4 --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/boolarray.h @@ -0,0 +1,488 @@ +/** + * This code is released under the + * Apache License Version 2.0 http://www.apache.org/licenses/. + * + * (c) Daniel Lemire, http://lemire.me/en/ + */ + +#ifndef BOOLARRAY_H +#define BOOLARRAY_H +#include // mostly for Microsoft compilers +#include +#include +#include +#include +#include +#include + +// uncomment this for debugging +//#define EWAHASSERT + +/** + * A dynamic bitset implementation. (without compression). + */ +template class BoolArray { +public: + BoolArray(const size_t n, const uword initval = 0) + : buffer(n / wordinbits + (n % wordinbits == 0 ? 0 : 1), initval), + sizeinbits(n) {} + + BoolArray() : buffer(), sizeinbits(0) {} + + BoolArray(const BoolArray &ba) + : buffer(ba.buffer), sizeinbits(ba.sizeinbits) {} + static BoolArray bitmapOf(size_t n, ...) { + BoolArray ans; + va_list vl; + va_start(vl, n); + for (size_t i = 0; i < n; i++) { + ans.set(static_cast(va_arg(vl, int))); + } + va_end(vl); + return ans; + } + size_t sizeInBytes() const { return buffer.size() * sizeof(uword); } + + void read(std::istream &in) { + sizeinbits = 0; + in.read(reinterpret_cast(&sizeinbits), sizeof(sizeinbits)); + buffer.resize(sizeinbits / wordinbits + + (sizeinbits % wordinbits == 0 ? 0 : 1)); + if (buffer.size() == 0) + return; + in.read(reinterpret_cast(&buffer[0]), + static_cast(buffer.size() * sizeof(uword))); + } + + void readBuffer(std::istream &in, const size_t size) { + buffer.resize(size); + sizeinbits = size * sizeof(uword) * 8; + if (buffer.empty()) + return; + in.read(reinterpret_cast(&buffer[0]), + buffer.size() * sizeof(uword)); + } + + void setSizeInBits(const size_t sizeib) { sizeinbits = sizeib; } + + void write(std::ostream &out) { write(out, sizeinbits); } + + void write(std::ostream &out, const size_t numberofbits) const { + const size_t size = + numberofbits / wordinbits + (numberofbits % wordinbits == 0 ? 0 : 1); + out.write(reinterpret_cast(&numberofbits), + sizeof(numberofbits)); + if (numberofbits == 0) + return; + out.write(reinterpret_cast(&buffer[0]), + static_cast(size * sizeof(uword))); + } + + void writeBuffer(std::ostream &out, const size_t numberofbits) const { + const size_t size = + numberofbits / wordinbits + (numberofbits % wordinbits == 0 ? 0 : 1); + if (size == 0) + return; +#ifdef EWAHASSERT + assert(buffer.size() >= size); +#endif + out.write(reinterpret_cast(&buffer[0]), size * sizeof(uword)); + } + + size_t sizeOnDisk() const { + size_t size = + sizeinbits / wordinbits + (sizeinbits % wordinbits == 0 ? 0 : 1); + return sizeof(sizeinbits) + size * sizeof(uword); + } + + BoolArray &operator=(const BoolArray &x) { + this->buffer = x.buffer; + this->sizeinbits = x.sizeinbits; + return *this; + } + + bool operator==(const BoolArray &x) const { + if (sizeinbits != x.sizeinbits) + return false; + for (size_t k = 0; k < buffer.size(); ++k) + if (buffer[k] != x.buffer[k]) + return false; + return true; + } + + bool operator!=(const BoolArray &x) const { return !operator==(x); } + + void setWord(const size_t pos, const uword val) { +#ifdef EWAHASSERT + assert(pos < buffer.size()); +#endif + buffer[pos] = val; + } + + void addWord(const uword val) { + if (sizeinbits % wordinbits != 0) + throw std::invalid_argument("you probably didn't want to do this"); + sizeinbits += wordinbits; + buffer.push_back(val); + } + + uword getWord(const size_t pos) const { +#ifdef EWAHASSERT + assert(pos < buffer.size()); +#endif + return buffer[pos]; + } + + /** + * set to true (whether it was already set to true or not) + */ + void set(const size_t pos) { + if (pos >= sizeinbits) + padWithZeroes(pos + 1); + buffer[pos / wordinbits] |= (static_cast(1) << (pos % wordinbits)); + } + + /** + * set to false (whether it was already set to false or not) + * + */ + void unset(const size_t pos) { + if (pos < sizeinbits) + buffer[pos / wordinbits] &= + ~(static_cast(1) << (pos % wordinbits)); + } + + /** + * true of false? (set or unset) + */ + bool get(const size_t pos) const { +#ifdef EWAHASSERT + assert(pos / wordinbits < buffer.size()); +#endif + return (buffer[pos / wordinbits] & + (static_cast(1) << (pos % wordinbits))) != 0; + } + + /** + * set all bits to 0 + */ + void reset() { + if (buffer.size() > 0) + memset(&buffer[0], 0, sizeof(uword) * buffer.size()); + sizeinbits = 0; + } + + size_t sizeInBits() const { return sizeinbits; } + + ~BoolArray() {} + + /** + * Computes the logical and and writes to the provided BoolArray (out). + * The current bitmaps is unchanged. + */ + void logicaland(const BoolArray &ba, BoolArray &out) const { + if (ba.buffer.size() < buffer.size()) + out.setToSize(ba); + else + out.setToSize(*this); + for (size_t i = 0; i < out.buffer.size(); ++i) + out.buffer[i] = buffer[i] & ba.buffer[i]; + } + + /** + * Computes the logical and and return the result. + * The current bitmaps is unchanged. + */ + BoolArray logicaland(const BoolArray &a) const { + BoolArray answer; + logicaland(a, answer); + return answer; + } + + void inplace_logicaland(const BoolArray &ba) { + if (ba.buffer.size() < buffer.size()) + setToSize(ba); + for (size_t i = 0; i < buffer.size(); ++i) + buffer[i] = buffer[i] & ba.buffer[i]; + } + + /** + * Computes the logical andnot and writes to the provided BoolArray (out). + * The current bitmaps is unchanged. + */ + void logicalandnot(const BoolArray &ba, BoolArray &out) const { + out.setToSize(*this); + size_t upto = out.buffer.size() < ba.buffer.size() ? out.buffer.size() + : ba.buffer.size(); + for (size_t i = 0; i < upto; ++i) + out.buffer[i] = buffer[i] & (~ba.buffer[i]); + for (size_t i = upto; i < out.buffer.size(); ++i) + out.buffer[i] = buffer[i]; + out.clearBogusBits(); + } + + /** + * Computes the logical andnot and return the result. + * The current bitmaps is unchanged. + */ + BoolArray logicalandnot(const BoolArray &a) const { + BoolArray answer; + logicalandnot(a, answer); + return answer; + } + + void inplace_logicalandnot(const BoolArray &ba) { + size_t upto = + buffer.size() < ba.buffer.size() ? buffer.size() : ba.buffer.size(); + for (size_t i = 0; i < upto; ++i) + buffer[i] = buffer[i] & (~ba.buffer[i]); + clearBogusBits(); + } + + /** + * Computes the logical or and writes to the provided BoolArray (out). + * The current bitmaps is unchanged. + */ + void logicalor(const BoolArray &ba, BoolArray &out) const { + const BoolArray *smallest; + const BoolArray *largest; + if (ba.buffer.size() > buffer.size()) { + smallest = this; + largest = &ba; + out.setToSize(ba); + } else { + smallest = &ba; + largest = this; + out.setToSize(*this); + } + for (size_t i = 0; i < smallest->buffer.size(); ++i) + out.buffer[i] = buffer[i] | ba.buffer[i]; + for (size_t i = smallest->buffer.size(); i < largest->buffer.size(); ++i) + out.buffer[i] = largest->buffer[i]; + } + + /** + * Computes the logical or and return the result. + * The current bitmaps is unchanged. + */ + BoolArray logicalor(const BoolArray &a) const { + BoolArray answer; + logicalor(a, answer); + return answer; + } + + void inplace_logicalor(const BoolArray &ba) { logicalor(ba, *this); } + + /** + * Computes the logical xor and writes to the provided BoolArray (out). + * The current bitmaps is unchanged. + */ + void logicalxor(const BoolArray &ba, BoolArray &out) const { + const BoolArray *smallest; + const BoolArray *largest; + if (ba.buffer.size() > buffer.size()) { + smallest = this; + largest = &ba; + out.setToSize(ba); + } else { + smallest = &ba; + largest = this; + out.setToSize(*this); + } + for (size_t i = 0; i < smallest->buffer.size(); ++i) + out.buffer[i] = buffer[i] ^ ba.buffer[i]; + for (size_t i = smallest->buffer.size(); i < largest->buffer.size(); ++i) + out.buffer[i] = largest->buffer[i]; + } + + /** + * Computes the logical xor and return the result. + * The current bitmaps is unchanged. + */ + BoolArray logicalxor(const BoolArray &a) const { + BoolArray answer; + logicalxor(a, answer); + return answer; + } + + void inplace_logicalxor(const BoolArray &ba) { logicalxor(ba, *this); } + + /** + * Computes the logical not and writes to the provided BoolArray (out). + * The current bitmaps is unchanged. + */ + void logicalnot(BoolArray &out) const { + out.setToSize(*this); + for (size_t i = 0; i < buffer.size(); ++i) + out.buffer[i] = ~buffer[i]; + out.clearBogusBits(); + } + + /** + * Computes the logical not and return the result. + * The current bitmaps is unchanged. + */ + BoolArray logicalandnot() const { + BoolArray answer; + logicalnot(answer); + return answer; + } + + void inplace_logicalnot() { + for (size_t i = 0; i < buffer.size(); ++i) + buffer[i] = ~buffer[i]; + clearBogusBits(); + } + + /** + * Returns the number of bits set to the value 1. + * The running time complexity is proportional to the + * size of the bitmap. + * + * This is sometimes called the cardinality. + */ + size_t numberOfOnes() const { + size_t count = 0; + for (size_t i = 0; i < buffer.size(); ++i) { + count += countOnes(buffer[i]); + } + return count; + } + + inline void printout(std::ostream &o = std::cout) { + for (size_t k = 0; k < sizeinbits; ++k) + o << get(k) << " "; + o << std::endl; + } + + /** + * Make sure the two bitmaps have the same size (padding with zeroes + * if necessary). It has constant running time complexity. + */ + void makeSameSize(BoolArray &a) { + if (a.sizeinbits < sizeinbits) + a.padWithZeroes(sizeinbits); + else if (sizeinbits < a.sizeinbits) + padWithZeroes(a.sizeinbits); + } + /** + * Make sure the current bitmap has the size of the provided bitmap. + */ + void setToSize(const BoolArray &a) { + sizeinbits = a.sizeinbits; + buffer.resize(a.buffer.size()); + } + + /** + * make sure the size of the array is totalbits bits by padding with zeroes. + * returns the number of words added (storage cost increase) + */ + size_t padWithZeroes(const size_t totalbits) { + size_t currentwordsize = (sizeinbits + wordinbits - 1) / wordinbits; + size_t neededwordsize = (totalbits + wordinbits - 1) / wordinbits; +#ifdef EWAHASSERT + assert(neededwordsize >= currentwordsize); +#endif + buffer.resize(neededwordsize); + sizeinbits = totalbits; + return static_cast(neededwordsize - currentwordsize); + } + + void append(const BoolArray &a); + + enum { wordinbits = sizeof(uword) * 8 }; + + std::vector toArray() const { + std::vector ans; + for (size_t k = 0; k < buffer.size(); ++k) { + uword myword = buffer[k]; + while (myword != 0) { + uint32_t ntz = numberOfTrailingZeros(myword); + ans.push_back(sizeof(uword) * 8 * k + ntz); + myword ^= (static_cast(1) << ntz); + } + } + return ans; + } + + /** + * Transform into a string that presents a list of set bits. + * The running time is linear in the size of the bitmap. + */ + operator std::string() const { + std::stringstream ss; + ss << *this; + return ss.str(); + } + + friend std::ostream &operator<<(std::ostream &out, const BoolArray &a) { + std::vector v = a.toArray(); + out << "{"; + for (std::vector::const_iterator i = v.begin(); i != v.end();) { + out << *i; + ++i; + if (i != v.end()) + out << ","; + } + out << "}"; + return out; + + return (out << static_cast(a)); + } + +private: + void clearBogusBits() { + if ((sizeinbits % wordinbits) != 0) { + const uword maskbogus = + (static_cast(1) << (sizeinbits % wordinbits)) - 1; + buffer[buffer.size() - 1] &= maskbogus; + } + } + + std::vector buffer; + size_t sizeinbits; +}; + +/** + * computes the logical or (union) between "n" bitmaps (referenced by a + * pointer). + * The answer gets written out in container. This might be faster than calling + * logicalor n-1 times. + */ +template +void fast_logicalor_tocontainer(size_t n, const BoolArray **inputs, + BoolArray &container) { + if (n == 0) { + container.reset(); + return; + } + container = *inputs[0]; + for (size_t i = 0; i < n; i++) { + container.inplace_logicalor(*inputs[i]); + } +} + +/** + * computes the logical or (union) between "n" bitmaps (referenced by a + * pointer). + * Returns the answer. This might be faster than calling + * logicalor n-1 times. + */ +template +BoolArray fast_logicalor(size_t n, const BoolArray **inputs) { + BoolArray answer; + fast_logicalor_tocontainer(n, inputs, answer); + return answer; +} + +template void BoolArray::append(const BoolArray &a) { + if (sizeinbits % wordinbits == 0) { + buffer.insert(buffer.end(), a.buffer.begin(), a.buffer.end()); + } else { + throw std::invalid_argument( + "Cannot append if parent does not meet boundary"); + } + sizeinbits += a.sizeinbits; +} + +#endif diff --git a/yt/utilities/lib/ewahboolarray/ewah.h b/yt/utilities/lib/ewahboolarray/ewah.h new file mode 100644 index 00000000000..2f733cc0bf2 --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/ewah.h @@ -0,0 +1,2306 @@ +/** + * This code is released under the + * Apache License Version 2.0 http://www.apache.org/licenses/. + * + * (c) Daniel Lemire, http://lemire.me/en/ + * with contributions from Zarian Waheed and others. + */ + +#ifndef EWAH_H +#define EWAH_H + +#include +#include +#include + +#include "ewahutil.h" +#include "boolarray.h" + +#include "runninglengthword.h" + +template class EWAHBoolArrayIterator; + +template class EWAHBoolArraySetBitForwardIterator; + +class BitmapStatistics; + +template class EWAHBoolArrayRawIterator; + +/** + * This class is a compressed bitmap. + * This is where compression + * happens. + * The underlying data structure is an STL vector. + */ +template class EWAHBoolArray { +public: + EWAHBoolArray() : buffer(1, 0), sizeinbits(0), lastRLW(0) {} + + static EWAHBoolArray bitmapOf(size_t n, ...) { + EWAHBoolArray ans; + va_list vl; + va_start(vl, n); + for (size_t i = 0; i < n; i++) { + ans.set(static_cast(va_arg(vl, int))); + } + va_end(vl); + return ans; + } + + /** + * Recover wasted memory usage. Fit buffers to the actual data. + */ + void trim() { buffer.shrink_to_fit(); } + + /** + * Query the value of bit i. This runs in time proportional to + * the size of the bitmap. This is not meant to be use in + * a performance-sensitive context. + * + * (This implementation is based on zhenjl's Go version of JavaEWAH.) + * + */ + bool get(const size_t pos) const { + if (pos >= static_cast(sizeinbits)) + return false; + const size_t wordpos = pos / wordinbits; + size_t WordChecked = 0; + EWAHBoolArrayRawIterator j = raw_iterator(); + while (j.hasNext()) { + BufferedRunningLengthWord &rle = j.next(); + WordChecked += static_cast(rle.getRunningLength()); + if (wordpos < WordChecked) + return rle.getRunningBit(); + if (wordpos < WordChecked + rle.getNumberOfLiteralWords()) { + const uword w = j.dirtyWords()[wordpos - WordChecked]; + return (w & (static_cast(1) << (pos % wordinbits))) != 0; + } + WordChecked += static_cast(rle.getNumberOfLiteralWords()); + } + return false; + } + + /** + * Returns true if no bit is set. + */ + bool empty() const { + size_t pointer(0); + while (pointer < buffer.size()) { + ConstRunningLengthWord rlw(buffer[pointer]); + if (rlw.getRunningBit()) { + if(rlw.getRunningLength() > 0) return false; + } + ++pointer; + for (size_t k = 0; k < rlw.getNumberOfLiteralWords(); ++k) { + if(buffer[pointer] != 0) return false; + ++pointer; + } + } + return true; + } + + + /** + * Set the ith bit to true (starting at zero). + * Auto-expands the bitmap. It has constant running time complexity. + * Note that you must set the bits in increasing order: + * set(1), set(2) is ok; set(2), set(1) is not ok. + * set(100), set(100) is also not ok. + * + * Note: by design EWAH is not an updatable data structure in + * the sense that once bit 1000 is set, you cannot change the value + * of bits 0 to 1000. + * + * Returns true if the value of the bit was changed, and false otherwise. + * (In practice, if you set the bits in strictly increasing order, it + * should always return true.) + */ + bool set(size_t i); + + /** + * Transform into a string that presents a list of set bits. + * The running time is linear in the compressed size of the bitmap. + */ + operator std::string() const { + std::stringstream ss; + ss << *this; + return ss.str(); + } + friend std::ostream &operator<<(std::ostream &out, const EWAHBoolArray &a) { + + out << "{"; + for (EWAHBoolArray::const_iterator i = a.begin(); i != a.end();) { + out << *i; + ++i; + if (i != a.end()) + out << ","; + } + out << "}"; + + return out; + } + /** + * Make sure the two bitmaps have the same size (padding with zeroes + * if necessary). It has constant running time complexity. + * + * This is useful when calling "logicalnot" functions. + * + * This can an adverse effect of performance, especially when computing + * intersections. + */ + void makeSameSize(EWAHBoolArray &a) { + if (a.sizeinbits < sizeinbits) + a.padWithZeroes(sizeinbits); + else if (sizeinbits < a.sizeinbits) + padWithZeroes(a.sizeinbits); + } + + enum { RESERVEMEMORY = true }; // for speed + + typedef EWAHBoolArraySetBitForwardIterator const_iterator; + + /** + * Returns an iterator that can be used to access the position of the + * set bits. The running time complexity of a full scan is proportional to the + * number + * of set bits: be aware that if you have long strings of 1s, this can be + * very inefficient. + * + * It can be much faster to use the toArray method if you want to + * retrieve the set bits. + */ + const_iterator begin() const { + return EWAHBoolArraySetBitForwardIterator(&buffer); + } + + /** + * Basically a bogus iterator that can be used together with begin() + * for constructions such as for(EWAHBoolArray::iterator i = b.begin(); + * i!=b.end(); ++i) {} + */ + const_iterator &end() const { + return EWAHBoolArraySetBitForwardIterator::end(); + } + + /** + * Retrieve the set bits. Can be much faster than iterating through + * the set bits with an iterator. + */ + std::vector toArray() const; + + /** + * computes the logical and with another compressed bitmap + * answer goes into container + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + void logicaland(const EWAHBoolArray &a, EWAHBoolArray &container) const; + + /** + * computes the logical and with another compressed bitmap + * Return the answer + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + EWAHBoolArray logicaland(const EWAHBoolArray &a) const { + EWAHBoolArray answer; + logicaland(a, answer); + return answer; + } + + /** + * calls logicaland + */ + EWAHBoolArray operator&(const EWAHBoolArray &a) const { + return logicaland(a); + } + + /** + * computes the logical and with another compressed bitmap + * answer goes into container + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result should be equal to that of the current + * bitmap irrespective of a.sizeInBits(). + * + */ + void logicalandnot(const EWAHBoolArray &a, EWAHBoolArray &container) const; + + /** + * calls logicalandnot + */ + EWAHBoolArray operator-(const EWAHBoolArray &a) const { + return logicalandnot(a); + } + + /** + * computes the logical and not with another compressed bitmap + * Return the answer + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result should be equal to that of the current + * bitmap irrespective of a.sizeInBits(). + * + */ + EWAHBoolArray logicalandnot(const EWAHBoolArray &a) const { + EWAHBoolArray answer; + logicalandnot(a, answer); + return answer; + } + + /** + * tests whether the bitmaps "intersect" (have at least one 1-bit at the same + * position). This function does not modify the existing bitmaps. + * It is faster than calling logicaland. + */ + bool intersects(const EWAHBoolArray &a) const; + + /** + * computes the logical or with another compressed bitmap + * answer goes into container + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * If you have many bitmaps, see fast_logicalor_tocontainer. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + void logicalor(const EWAHBoolArray &a, EWAHBoolArray &container) const; + + /** + * computes the size (in number of set bits) of the logical or with another + * compressed bitmap + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + */ + size_t logicalorcount(const EWAHBoolArray &a) const; + + /** + * computes the size (in number of set bits) of the logical and with another + * compressed bitmap + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + */ + size_t logicalandcount(const EWAHBoolArray &a) const; + + /** + * computes the size (in number of set bits) of the logical and not with + * another compressed bitmap + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + */ + size_t logicalandnotcount(const EWAHBoolArray &a) const; + + /** + * computes the size (in number of set bits) of the logical xor with another + * compressed bitmap + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + */ + size_t logicalxorcount(const EWAHBoolArray &a) const; + + /** + * computes the logical or with another compressed bitmap + * Return the answer + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * If you have many bitmaps, see fast_logicalor. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + EWAHBoolArray logicalor(const EWAHBoolArray &a) const { + EWAHBoolArray answer; + logicalor(a, answer); + return answer; + } + + /** + * calls logicalor + */ + EWAHBoolArray operator|(const EWAHBoolArray &a) const { return logicalor(a); } + + /** + * computes the logical xor with another compressed bitmap + * answer goes into container + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + void logicalxor(const EWAHBoolArray &a, EWAHBoolArray &container) const; + + /** + * computes the logical xor with another compressed bitmap + * Return the answer + * Running time complexity is proportional to the sum of the compressed + * bitmap sizes. + * + * The sizeInBits() of the result is equal to the maximum that of the current + * bitmap's sizeInBits() and that of a.sizeInBits(). + */ + EWAHBoolArray logicalxor(const EWAHBoolArray &a) const { + EWAHBoolArray answer; + logicalxor(a, answer); + return answer; + } + + /** + * calls logicalxor + */ + EWAHBoolArray operator^(const EWAHBoolArray &a) const { + return logicalxor(a); + } + /** + * clear the content of the bitmap. It does not + * release the memory. + */ + void reset() { + buffer.clear(); + buffer.push_back(0); + sizeinbits = 0; + lastRLW = 0; + } + + /** + * convenience method. + * + * returns the number of words added (storage cost increase) + */ + inline size_t addWord(const uword newdata, + const uint32_t bitsthatmatter = 8 * sizeof(uword)); + + inline void printout(std::ostream &o = std::cout) { + toBoolArray().printout(o); + } + + /** + * Prints a verbose description of the content of the compressed bitmap. + */ + void debugprintout() const; + + /** + * Return the size in bits of this bitmap (this refers + * to the uncompressed size in bits). + * + * You can increase it with padWithZeroes() + */ + inline size_t sizeInBits() const { return sizeinbits; } + + /** + * Return the size of the buffer in bytes. This + * is equivalent to the storage cost, minus some overhead. + * See sizeOnDisk to get the actual storage cost with overhead. + */ + inline size_t sizeInBytes() const { return buffer.size() * sizeof(uword); } + + /** + * same as addEmptyWord, but you can do several in one shot! + * returns the number of words added (storage cost increase) + */ + size_t addStreamOfEmptyWords(const bool v, size_t number); + + /** + * add a stream of dirty words, returns the number of words added + * (storage cost increase) + */ + size_t addStreamOfDirtyWords(const uword *v, const size_t number); + + /** + * add a stream of dirty words, each one negated, returns the number of words + * added + * (storage cost increase) + */ + size_t addStreamOfNegatedDirtyWords(const uword *v, const size_t number); + + /** + * make sure the size of the array is totalbits bits by padding with zeroes. + * returns the number of words added (storage cost increase). + * + * This is useful when calling "logicalnot" functions. + * + * This can an adverse effect of performance, especially when computing + * intersections. + * + */ + size_t padWithZeroes(const size_t totalbits); + + /** + * Compute the size on disk assuming that it was saved using + * the method "write". + */ + size_t sizeOnDisk(const bool savesizeinbits = true) const; + + /** + * Save this bitmap to a stream. The file format is + * | sizeinbits | buffer lenth | buffer content| + * the sizeinbits part can be omitted if "savesizeinbits=false". + * Both sizeinbits and buffer length are saved using the size_t data + * type which is typically a 32-bit unsigned integer for 32-bit CPUs + * and a 64-bit unsigned integer for 64-bit CPUs. + * Note that this format is machine-specific. Note also + * that the word size is not saved. For robust persistent + * storage, you need to save this extra information elsewhere. + * + * Returns how many bytes were handed out to the stream. + */ + size_t write(std::ostream &out, const bool savesizeinbits = true) const; + + /** + * same as write(std::ostream...), except that you provide a char pointer + * and a "capacity" (in bytes). The function never writes at or beyond "out+capacity". + * If the storage needed exceeds the + * given capacity, the value zero is returned: it should be considered an error. + * Otherwise, the number of bytes copied is returned. + */ + size_t write(char * out, size_t capacity, const bool savesizeinbits = true) const; + + /** + * This only writes the content of the buffer (see write()) method. + * It is for advanced users. + */ + void writeBuffer(std::ostream &out) const; + + /** + * size (in words) of the underlying STL vector. + */ + size_t bufferSize() const { return buffer.size(); } + + /** + * this is the counterpart to the write method. + * if you set savesizeinbits=false, then you are responsible + * for setting the value fo the attribute sizeinbits (see method + * setSizeInBits). + * + * Returns how many bytes were queried from the stream. + */ + size_t read(std::istream &in, const bool savesizeinbits = true); + + + /** + * same as read(std::istream...), except that you provide a char pointer + * and a "capacity" (in bytes). The function never reads at or beyond "in+capacity". + * If the detected storage exceeds the given capacity, the value zero is returned: + * it should be considered an error. + * Otherwise, the number of bytes read is returned. + */ + size_t read(const char * in, size_t capacity, const bool savesizeinbits = true); + + /** + * read the buffer from a stream, see method writeBuffer. + * this is for advanced users. + */ + void readBuffer(std::istream &in, const size_t buffersize); + + /** + * We define two EWAHBoolArray as being equal if they have the same set bits. + * Alternatively, B1==B2 if and only if cardinality(B1 XOR B2) ==0. + */ + bool operator==(const EWAHBoolArray &x) const; + + /** + * We define two EWAHBoolArray as being different if they do not have the same + * set bits. + * Alternatively, B1!=B2 if and only if cardinality(B1 XOR B2) >0. + */ + bool operator!=(const EWAHBoolArray &x) const; + + bool operator==(const BoolArray &x) const; + + bool operator!=(const BoolArray &x) const; + + /** + * Iterate over the uncompressed words. + * Can be considerably faster than begin()/end(). + * Running time complexity of a full scan is proportional to the + * uncompressed size of the bitmap. + */ + EWAHBoolArrayIterator uncompress() const; + + /** + * To iterate over the compressed data. + * Can be faster than any other iterator. + * Running time complexity of a full scan is proportional to the + * compressed size of the bitmap. + */ + EWAHBoolArrayRawIterator raw_iterator() const; + + /** + * Appends the content of some other compressed bitmap + * at the end of the current bitmap. + */ + void append(const EWAHBoolArray &x); + + /** + * For research purposes. This computes the number of + * dirty words and the number of compressed words. + */ + BitmapStatistics computeStatistics() const; + + /** + * For convenience, this fully uncompresses the bitmap. + * Not fast! + */ + BoolArray toBoolArray() const; + + /** + * Convert to a list of positions of "set" bits. + * The recommended container is vector. + * + * See also toArray(). + */ + template + void appendRowIDs(container &out, const size_t offset = 0) const; + + /** + * Convert to a list of positions of "set" bits. + * The recommended container is vector. + * (alias for appendRowIDs). + * + * See also toArray(). + */ + template + void appendSetBits(container &out, const size_t offset = 0) const { + return appendRowIDs(out, offset); + } + + /** + * Returns a vector containing the position of the set + * bits in increasing order. This just calls "toArray". + */ + std::vector toVector() const { return toArray(); } + + /** + * Returns the number of bits set to the value 1. + * The running time complexity is proportional to the + * compressed size of the bitmap. + * + * This is sometimes called the cardinality. + */ + size_t numberOfOnes() const; + + /** + * Swap the content of this bitmap with another bitmap. + * No copying is done. (Running time complexity is constant.) + */ + void swap(EWAHBoolArray &x); + + const std::vector &getBuffer() const { return buffer; } + + enum { wordinbits = sizeof(uword) * 8 }; + + /** + * Please don't copy your bitmaps! The running time + * complexity of a copy is the size of the compressed bitmap. + **/ + EWAHBoolArray(const EWAHBoolArray &other) + : buffer(other.buffer), sizeinbits(other.sizeinbits), + lastRLW(other.lastRLW) {} + + /** + * Copies the content of one bitmap onto another. Running time complexity + * is proportional to the size of the compressed bitmap. + * please, never hard-copy this object. Use the swap method if you must. + */ + EWAHBoolArray &operator=(const EWAHBoolArray &x) { + buffer = x.buffer; + sizeinbits = x.sizeinbits; + lastRLW = x.lastRLW; + return *this; + } + + /** + * Move constructor. + */ + EWAHBoolArray(EWAHBoolArray &&other) + : buffer(std::move(other.buffer)), sizeinbits(other.sizeinbits), + lastRLW(other.lastRLW) {} + + /** + * Move assignment operator. + */ + EWAHBoolArray &operator=(EWAHBoolArray &&x) { + buffer = std::move(x.buffer); + sizeinbits = x.sizeinbits; + lastRLW = x.lastRLW; + return *this; + } + + /** + * This is equivalent to the operator =. It is used + * to keep in mind that assignment can be expensive. + * + *if you don't care to copy the bitmap (performance-wise), use this! + */ + void expensive_copy(const EWAHBoolArray &x) { + buffer = x.buffer; + sizeinbits = x.sizeinbits; + lastRLW = x.lastRLW; + } + + /** + * Write the logical not of this bitmap in the provided container. + * + * This function takes into account the sizeInBits value. + * You may need to call "padWithZeroes" to adjust the sizeInBits. + */ + void logicalnot(EWAHBoolArray &x) const; + + /** + * Write the logical not of this bitmap in the provided container. + * + * This function takes into account the sizeInBits value. + * You may need to call "padWithZeroes" to adjust the sizeInBits. + */ + EWAHBoolArray logicalnot() const { + EWAHBoolArray answer; + logicalnot(answer); + return answer; + } + + /** + * Apply the logical not operation on this bitmap. + * Running time complexity is proportional to the compressed size of the + *bitmap. + * The current bitmap is not modified. + * + * This function takes into account the sizeInBits value. + * You may need to call "padWithZeroes" to adjust the sizeInBits. + **/ + void inplace_logicalnot(); + + /** + * set size in bits. This does not affect the compressed size. It + * runs in constant time. This should not normally be used, except + * as part of a deserialization process. + */ + inline void setSizeInBits(const size_t size) { sizeinbits = size; } + + /** + * Like addStreamOfEmptyWords but + * addStreamOfEmptyWords but does not return the cost increase, + * does not update sizeinbits + */ + inline void fastaddStreamOfEmptyWords(const bool v, size_t number); + /** + * LikeaddStreamOfDirtyWords but does not return the cost increse, + * does not update sizeinbits. + */ + inline void fastaddStreamOfDirtyWords(const uword *v, const size_t number); + +private: + // private because does not increment the size in bits + // returns the number of words added (storage cost increase) + inline size_t addLiteralWord(const uword newdata); + + // private because does not increment the size in bits + // returns the number of words added (storage cost increase) + size_t addEmptyWord(const bool v); + // this second version "might" be faster if you hate OOP. + // in my tests, it turned out to be slower! + // private because does not increment the size in bits + // inline void addEmptyWordStaticCalls(bool v); + + std::vector buffer; + size_t sizeinbits; + size_t lastRLW; +}; + +/** + * computes the logical or (union) between "n" bitmaps (referenced by a + * pointer). + * The answer gets written out in container. This might be faster than calling + * logicalor n-1 times. + */ +template +void fast_logicalor_tocontainer(size_t n, const EWAHBoolArray **inputs, + EWAHBoolArray &container); + +/** + * computes the logical or (union) between "n" bitmaps (referenced by a + * pointer). + * Returns the answer. This might be faster than calling + * logicalor n-1 times. + */ +template +EWAHBoolArray fast_logicalor(size_t n, + const EWAHBoolArray **inputs) { + EWAHBoolArray answer; + fast_logicalor_tocontainer(n, inputs, answer); + return answer; +} + +/** + * Iterate over words of bits from a compressed bitmap. + */ +template class EWAHBoolArrayIterator { +public: + /** + * is there a new word? + */ + bool hasNext() const { return pointer < myparent.size(); } + + /** + * return next word. + */ + uword next() { + uword returnvalue; + if (compressedwords < rl) { + ++compressedwords; + if (b) + returnvalue = notzero; + else + returnvalue = zero; + } else { + ++literalwords; + ++pointer; + returnvalue = myparent[pointer]; + } + if ((compressedwords == rl) && (literalwords == lw)) { + ++pointer; + if (pointer < myparent.size()) + readNewRunningLengthWord(); + } + return returnvalue; + } + + EWAHBoolArrayIterator(const EWAHBoolArrayIterator &other) + : pointer(other.pointer), myparent(other.myparent), + compressedwords(other.compressedwords), + literalwords(other.literalwords), rl(other.rl), lw(other.lw), + b(other.b) {} + + static const uword zero = 0; + static const uword notzero = static_cast(~zero); + +private: + EWAHBoolArrayIterator(const std::vector &parent); + void readNewRunningLengthWord(); + friend class EWAHBoolArray; + size_t pointer; + const std::vector &myparent; + uword compressedwords; + uword literalwords; + uword rl, lw; + bool b; +}; + +/** + * Used to go through the set bits. Not optimally fast, but convenient. + */ +template class EWAHBoolArraySetBitForwardIterator { +public: + typedef std::forward_iterator_tag iterator_category; + typedef size_t *pointer; + typedef size_t &reference_type; + typedef size_t value_type; + typedef ptrdiff_t difference_type; + typedef EWAHBoolArraySetBitForwardIterator type_of_iterator; + /** + * Provides the location of the set bit. + */ + inline size_t operator*() const { return answer; } + + bool operator<(const type_of_iterator &o) const { + if (!o.hasValue) + return true; + if (!hasValue) + return false; + return answer < o.answer; + } + + bool operator<=(const type_of_iterator &o) const { + if (!o.hasValue) + return true; + if (!hasValue) + return false; + return answer <= o.answer; + } + + bool operator>(const type_of_iterator &o) const { return !((*this) <= o); } + + bool operator>=(const type_of_iterator &o) const { return !((*this) < o); } + + EWAHBoolArraySetBitForwardIterator &operator++() { //++i + if (hasNext) + next(); + else + hasValue = false; + return *this; + } + + EWAHBoolArraySetBitForwardIterator operator++(int) { // i++ + EWAHBoolArraySetBitForwardIterator old(*this); + if (hasNext) + next(); + else + hasValue = false; + return old; + } + + bool operator==(const EWAHBoolArraySetBitForwardIterator &o) const { + if ((!hasValue) && (!o.hasValue)) + return true; + return (hasValue == o.hasValue) && (answer == o.answer); + } + + bool operator!=(const EWAHBoolArraySetBitForwardIterator &o) const { + return !(*this == o); + } + + static EWAHBoolArraySetBitForwardIterator &end() { + static EWAHBoolArraySetBitForwardIterator e; + return e; + } + + EWAHBoolArraySetBitForwardIterator(const std::vector *parent, + size_t startpointer = 0) + : word(0), position(0), runningLength(0), literalPosition(0), + wordPosition(startpointer), wordLength(0), buffer(parent), + hasNext(false), hasValue(false), answer(0) { + if (wordPosition < buffer->size()) { + setRunningLengthWord(); + hasNext = moveToNext(); + if (hasNext) { + next(); + hasValue = true; + } + } + } + + EWAHBoolArraySetBitForwardIterator() + : word(0), position(0), runningLength(0), literalPosition(0), + wordPosition(0), wordLength(0), buffer(NULL), hasNext(false), + hasValue(false), answer(0) {} + + inline bool runningHasNext() const { return position < runningLength; } + + inline bool literalHasNext() { + while (word == 0 && wordPosition < wordLength) { + word = (*buffer)[wordPosition++]; + literalPosition = position; + position += WORD_IN_BITS; + } + return word != 0; + } + + inline void setRunningLengthWord() { + uword rlw = (*buffer)[wordPosition]; + runningLength = + (size_t)WORD_IN_BITS * RunningLengthWord::getRunningLength(rlw) + + position; + if (!RunningLengthWord::getRunningBit(rlw)) { + position = runningLength; + } + wordPosition++; // point to first literal word + wordLength = + wordPosition + RunningLengthWord::getNumberOfLiteralWords(rlw); + } + + inline bool moveToNext() { + while (!runningHasNext() && !literalHasNext()) { + if (wordPosition >= buffer->size()) { + return false; + } + setRunningLengthWord(); + } + return true; + } + + void next() { // update answer + if (runningHasNext()) { + answer = position++; + if (runningHasNext()) + return; + } else { + uword t = word & (~word + 1); + answer = literalPosition + countOnes((uword)(t - 1)); + word ^= t; + } + hasNext = moveToNext(); + } + + enum { WORD_IN_BITS = sizeof(uword) * 8 }; + uword word; // lit word + size_t position; + size_t runningLength; + size_t literalPosition; + size_t wordPosition; // points to word in buffer + uword wordLength; + const std::vector *buffer; + bool hasNext; + bool hasValue; + size_t answer; +}; + +/** + * This object is returned by the compressed bitmap as a + * statistical descriptor. + */ +class BitmapStatistics { +public: + BitmapStatistics() + : totalliteral(0), totalcompressed(0), runningwordmarker(0), + maximumofrunningcounterreached(0) {} + size_t getCompressedSize() const { return totalliteral + runningwordmarker; } + size_t getUncompressedSize() const { return totalliteral + totalcompressed; } + size_t getNumberOfDirtyWords() const { return totalliteral; } + size_t getNumberOfCleanWords() const { return totalcompressed; } + size_t getNumberOfMarkers() const { return runningwordmarker; } + size_t getOverRuns() const { return maximumofrunningcounterreached; } + size_t totalliteral; + size_t totalcompressed; + size_t runningwordmarker; + size_t maximumofrunningcounterreached; +}; + +template bool EWAHBoolArray::set(size_t i) { + if (i < sizeinbits) + return false; + const size_t dist = (i + wordinbits) / wordinbits - + (sizeinbits + wordinbits - 1) / wordinbits; + sizeinbits = i + 1; + if (dist > 0) { // easy + if (dist > 1) + fastaddStreamOfEmptyWords(false, dist - 1); + addLiteralWord( + static_cast(static_cast(1) << (i % wordinbits))); + return true; + } + RunningLengthWord lastRunningLengthWord(buffer[lastRLW]); + if (lastRunningLengthWord.getNumberOfLiteralWords() == 0) { + lastRunningLengthWord.setRunningLength( + static_cast(lastRunningLengthWord.getRunningLength() - 1)); + addLiteralWord( + static_cast(static_cast(1) << (i % wordinbits))); + return true; + } + buffer[buffer.size() - 1] |= + static_cast(static_cast(1) << (i % wordinbits)); + // check if we just completed a stream of 1s + if (buffer[buffer.size() - 1] == static_cast(~0)) { + // we remove the last dirty word + buffer[buffer.size() - 1] = 0; + buffer.resize(buffer.size() - 1); + lastRunningLengthWord.setNumberOfLiteralWords(static_cast( + lastRunningLengthWord.getNumberOfLiteralWords() - 1)); + // next we add one clean word + addEmptyWord(true); + } + return true; +} + +template void EWAHBoolArray::inplace_logicalnot() { + size_t pointer(0), lastrlw(0); + while (pointer < buffer.size()) { + RunningLengthWord rlw(buffer[pointer]); + lastrlw = pointer; // we save this up + if (rlw.getRunningBit()) + rlw.setRunningBit(false); + else + rlw.setRunningBit(true); + ++pointer; + for (size_t k = 0; k < rlw.getNumberOfLiteralWords(); ++k) { + buffer[pointer] = static_cast(~buffer[pointer]); + ++pointer; + } + } + if (sizeinbits % wordinbits != 0) { + RunningLengthWord rlw(buffer[lastrlw]); + const uword maskbogus = + (static_cast(1) << (sizeinbits % wordinbits)) - 1; + if (rlw.getNumberOfLiteralWords() > 0) { // easy case + buffer[lastrlw + 1 + rlw.getNumberOfLiteralWords() - 1] &= maskbogus; + } else { + rlw.setRunningLength(rlw.getRunningLength() - 1); + addLiteralWord(maskbogus); + } + } +} + +template size_t EWAHBoolArray::numberOfOnes() const { + size_t tot(0); + size_t pointer(0); + while (pointer < buffer.size()) { + ConstRunningLengthWord rlw(buffer[pointer]); + if (rlw.getRunningBit()) { + tot += static_cast(rlw.getRunningLength() * wordinbits); + } + ++pointer; + for (size_t k = 0; k < rlw.getNumberOfLiteralWords(); ++k) { + tot += countOnes((uword)buffer[pointer]); + ++pointer; + } + } + return tot; +} + +template +std::vector EWAHBoolArray::toArray() const { + std::vector ans; + size_t pos(0); + size_t pointer(0); + const size_t buffersize = buffer.size(); + while (pointer < buffersize) { + ConstRunningLengthWord rlw(buffer[pointer]); + const size_t productofrl = + static_cast(rlw.getRunningLength() * wordinbits); + if (rlw.getRunningBit()) { + size_t upper_limit = pos + productofrl; + for (; pos < upper_limit; ++pos) { + ans.push_back(pos); + } + } else { + pos += productofrl; + } + ++pointer; + const size_t rlwlw = rlw.getNumberOfLiteralWords(); + for (size_t k = 0; k < rlwlw; ++k) { + uword myword = buffer[pointer]; + while (myword != 0) { + uint64_t t = myword & (~myword + 1); + uint32_t r = numberOfTrailingZeros(t); + ans.push_back(pos + r); + myword ^= t; + } + pos += wordinbits; + ++pointer; + } + } + return ans; +} + +template +void EWAHBoolArray::logicalnot(EWAHBoolArray &x) const { + x.reset(); + x.buffer.reserve(buffer.size()); + EWAHBoolArrayRawIterator i = this->raw_iterator(); + if (!i.hasNext()) + return; // nothing to do + while (true) { + BufferedRunningLengthWord &rlw = i.next(); + if (i.hasNext()) { + if (rlw.getRunningLength() > 0) + x.fastaddStreamOfEmptyWords(!rlw.getRunningBit(), + rlw.getRunningLength()); + if (rlw.getNumberOfLiteralWords() > 0) { + const uword *dw = i.dirtyWords(); + for (size_t k = 0; k < rlw.getNumberOfLiteralWords(); ++k) { + x.addLiteralWord(~dw[k]); + } + } + } else { + if (rlw.getNumberOfLiteralWords() == 0) { + if ((this->sizeinbits % wordinbits != 0) && !rlw.getRunningBit()) { + if (rlw.getRunningLength() > 1) + x.fastaddStreamOfEmptyWords(!rlw.getRunningBit(), + rlw.getRunningLength() - 1); + const uword maskbogus = + (static_cast(1) << (this->sizeinbits % wordinbits)) - 1; + x.addLiteralWord(maskbogus); + break; + } else { + if (rlw.getRunningLength() > 0) + x.fastaddStreamOfEmptyWords(!rlw.getRunningBit(), + rlw.getRunningLength()); + break; + } + } + if (rlw.getRunningLength() > 0) + x.fastaddStreamOfEmptyWords(!rlw.getRunningBit(), + rlw.getRunningLength()); + const uword *dw = i.dirtyWords(); + for (size_t k = 0; k + 1 < rlw.getNumberOfLiteralWords(); ++k) { + x.addLiteralWord(~dw[k]); + } + const uword maskbogus = + (this->sizeinbits % wordinbits != 0) + ? (static_cast(1) << (this->sizeinbits % wordinbits)) - 1 + : ~static_cast(0); + x.addLiteralWord((~dw[rlw.getNumberOfLiteralWords() - 1]) & maskbogus); + break; + } + } + x.sizeinbits = this->sizeinbits; +} + +template +size_t EWAHBoolArray::addWord(const uword newdata, + const uint32_t bitsthatmatter) { + sizeinbits += bitsthatmatter; + if (newdata == 0) { + return addEmptyWord(0); + } else if (newdata == static_cast(~0)) { + return addEmptyWord(1); + } else { + return addLiteralWord(newdata); + } +} + +template +inline void EWAHBoolArray::writeBuffer(std::ostream &out) const { + if (!buffer.empty()) + out.write(reinterpret_cast(&buffer[0]), + sizeof(uword) * buffer.size()); +} + +template +inline void EWAHBoolArray::readBuffer(std::istream &in, + const size_t buffersize) { + buffer.resize(buffersize); + if (buffersize > 0) + in.read(reinterpret_cast(&buffer[0]), sizeof(uword) * buffersize); +} + +template +size_t EWAHBoolArray::write(std::ostream &out, + const bool savesizeinbits) const { + size_t written = 0; + if (savesizeinbits) { + out.write(reinterpret_cast(&sizeinbits), sizeof(sizeinbits)); + written += sizeof(sizeinbits); + } + const size_t buffersize = buffer.size(); + out.write(reinterpret_cast(&buffersize), sizeof(buffersize)); + written += sizeof(buffersize); + + if (buffersize > 0) { + out.write(reinterpret_cast(&buffer[0]), + static_cast(sizeof(uword) * buffersize)); + written += sizeof(uword) * buffersize; + } + return written; +} + +template +size_t EWAHBoolArray::write(char * out, size_t capacity, + const bool savesizeinbits) const { + size_t written = 0; + if (savesizeinbits) { + if(capacity < sizeof(sizeinbits)) return 0; + capacity -= sizeof(sizeinbits); + memcpy(out, &sizeinbits, sizeof(sizeinbits)); + out += sizeof(sizeinbits); + written += sizeof(sizeinbits); + } + const size_t buffersize = buffer.size(); + if(capacity < sizeof(buffersize)) return 0; + capacity -= sizeof(buffersize); + memcpy(out, &buffersize, sizeof(buffersize)); + out += sizeof(buffersize); + written += sizeof(buffersize); + + if (buffersize > 0) { + if(capacity < sizeof(uword) * buffersize) return 0; + memcpy(out, &buffer[0], sizeof(uword) * buffersize); + written += sizeof(uword) * buffersize; + } + return written; +} + + +template +size_t EWAHBoolArray::read(std::istream &in, const bool savesizeinbits) { + size_t read = 0; + if (savesizeinbits) { + in.read(reinterpret_cast(&sizeinbits), sizeof(sizeinbits)); + read += sizeof(sizeinbits); + } else { + sizeinbits = 0; + } + size_t buffersize(0); + in.read(reinterpret_cast(&buffersize), sizeof(buffersize)); + read += sizeof(buffersize); + buffer.resize(buffersize); + if (buffersize > 0) { + in.read(reinterpret_cast(&buffer[0]), + static_cast(sizeof(uword) * buffersize)); + read += sizeof(uword) * buffersize; + } + return read; +} + + +template +size_t EWAHBoolArray::read(const char * in, size_t capacity, const bool savesizeinbits) { + size_t read = 0; + if (savesizeinbits) { + if(capacity < sizeof(sizeinbits)) return 0; + capacity -= sizeof(sizeinbits); + memcpy(reinterpret_cast(&sizeinbits), in, sizeof(sizeinbits)); + read += sizeof(sizeinbits); + in += sizeof(sizeinbits); + } else { + sizeinbits = 0; + } + size_t buffersize(0); + if(capacity < sizeof(buffersize)) return 0; + capacity -= sizeof(buffersize); + memcpy(reinterpret_cast(&buffersize), in, sizeof(buffersize)); + in += sizeof(buffersize); + read += sizeof(buffersize); + + buffer.resize(buffersize); + if (buffersize > 0) { + if(capacity < sizeof(uword) * buffersize) return 0; + memcpy(&buffer[0], in, sizeof(uword) * buffersize); + read += sizeof(uword) * buffersize; + } + return read; +} + +template +size_t EWAHBoolArray::addLiteralWord(const uword newdata) { + RunningLengthWord lastRunningLengthWord(buffer[lastRLW]); + uword numbersofar = lastRunningLengthWord.getNumberOfLiteralWords(); + if (numbersofar >= + RunningLengthWord::largestliteralcount) { // 0x7FFF) { + buffer.push_back(0); + lastRLW = buffer.size() - 1; + RunningLengthWord lastRunningLengthWord2(buffer[lastRLW]); + lastRunningLengthWord2.setNumberOfLiteralWords(1); + buffer.push_back(newdata); + return 2; + } + lastRunningLengthWord.setNumberOfLiteralWords( + static_cast(numbersofar + 1)); + buffer.push_back(newdata); + return 1; +} + +template +size_t EWAHBoolArray::padWithZeroes(const size_t totalbits) { + size_t wordsadded = 0; + if (totalbits <= sizeinbits) + return wordsadded; + + size_t missingbits = totalbits - sizeinbits; + + RunningLengthWord rlw(buffer[lastRLW]); + if (rlw.getNumberOfLiteralWords() > 0) { + // Consume trailing zeroes of trailing literal word (past sizeinbits) + size_t remain = sizeinbits % wordinbits; + if (remain > 0) // Is last word partial? + { + size_t avail = wordinbits - remain; + if (avail > 0) { + if (missingbits > avail) { + missingbits -= avail; + } else { + missingbits = 0; + } + sizeinbits += avail; + } + } + } + + if (missingbits > 0) { + size_t wordstoadd = missingbits / wordinbits; + if ((missingbits % wordinbits) != 0) + ++wordstoadd; + + wordsadded = addStreamOfEmptyWords(false, wordstoadd); + } + sizeinbits = totalbits; + return wordsadded; +} + +/** + * This is a low-level iterator. + */ + +template class EWAHBoolArrayRawIterator { +public: + EWAHBoolArrayRawIterator(const EWAHBoolArray &p) + : pointer(0), myparent(&p.getBuffer()), rlw((*myparent)[pointer], this) {} + EWAHBoolArrayRawIterator(const EWAHBoolArrayRawIterator &o) + : pointer(o.pointer), myparent(o.myparent), rlw(o.rlw) {} + + bool hasNext() const { return pointer < myparent->size(); } + + BufferedRunningLengthWord &next() { + rlw.read((*myparent)[pointer]); + pointer = static_cast(pointer + rlw.getNumberOfLiteralWords() + 1); + return rlw; + } + + const uword *dirtyWords() const { + return myparent->data() + + static_cast(pointer - rlw.getNumberOfLiteralWords()); + } + + EWAHBoolArrayRawIterator &operator=(const EWAHBoolArrayRawIterator &other) { + pointer = other.pointer; + myparent = other.myparent; + rlw = other.rlw; + return *this; + } + + size_t pointer; + const std::vector *myparent; + BufferedRunningLengthWord rlw; + + EWAHBoolArrayRawIterator(); +}; + +template +EWAHBoolArrayIterator EWAHBoolArray::uncompress() const { + return EWAHBoolArrayIterator(buffer); +} + +template +EWAHBoolArrayRawIterator EWAHBoolArray::raw_iterator() const { + return EWAHBoolArrayRawIterator(*this); +} + +template +bool EWAHBoolArray::operator==(const EWAHBoolArray &x) const { + EWAHBoolArrayRawIterator i = x.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + return (i.hasNext() == false) && (j.hasNext() == false); + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey = i_is_prey ? rlwi : rlwj; + BufferedRunningLengthWord &predator = i_is_prey ? rlwj : rlwi; + size_t index = 0; + const bool nonzero = + ((!predator.getRunningBit()) + ? prey.nonzero_discharge(predator.getRunningLength(), index) + : prey.nonzero_dischargeNegated(predator.getRunningLength(), + index)); + if (nonzero) { + return false; + } + if (predator.getRunningLength() - index > 0) { + if (predator.getRunningBit()) { + return false; + } + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) + if ((rlwi.getLiteralWordAt(k) ^ rlwj.getLiteralWordAt(k)) != 0) + return false; + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + BufferedRunningLengthWord &remaining = i_remains ? rlwi : rlwj; + return !remaining.nonzero_discharge(); +} + +template void EWAHBoolArray::swap(EWAHBoolArray &x) { + buffer.swap(x.buffer); + size_t tmp = x.sizeinbits; + x.sizeinbits = sizeinbits; + sizeinbits = tmp; + tmp = x.lastRLW; + x.lastRLW = lastRLW; + lastRLW = tmp; +} + +template +void EWAHBoolArray::append(const EWAHBoolArray &x) { + if (sizeinbits % wordinbits == 0) { + // hoping for the best? + sizeinbits += x.sizeinbits; + ConstRunningLengthWord lRLW(buffer[lastRLW]); + if ((lRLW.getRunningLength() == 0) && + (lRLW.getNumberOfLiteralWords() == 0)) { + // it could be that the running length word is empty, in such a case, + // we want to get rid of it! + lastRLW = x.lastRLW + buffer.size() - 1; + buffer.resize(buffer.size() - 1); + buffer.insert(buffer.end(), x.buffer.begin(), x.buffer.end()); + } else { + lastRLW = x.lastRLW + buffer.size(); + buffer.insert(buffer.end(), x.buffer.begin(), x.buffer.end()); + } + } else { + std::stringstream ss; + ss << "This should really not happen! You are trying to append to a bitmap " + "having a fractional number of words, that is, " + << static_cast(sizeinbits) << " bits with a word size in bits of " + << static_cast(wordinbits) << ". "; + ss << "Size of the bitmap being appended: " << x.sizeinbits << " bits." + << std::endl; + throw std::invalid_argument(ss.str()); + } +} + +template +EWAHBoolArrayIterator::EWAHBoolArrayIterator( + const std::vector &parent) + : pointer(0), myparent(parent), compressedwords(0), literalwords(0), rl(0), + lw(0), b(0) { + if (pointer < myparent.size()) + readNewRunningLengthWord(); +} + +template +void EWAHBoolArrayIterator::readNewRunningLengthWord() { + literalwords = 0; + compressedwords = 0; + ConstRunningLengthWord rlw(myparent[pointer]); + rl = rlw.getRunningLength(); + lw = rlw.getNumberOfLiteralWords(); + b = rlw.getRunningBit(); + if ((rl == 0) && (lw == 0)) { + if (pointer < myparent.size() - 1) { + ++pointer; + readNewRunningLengthWord(); + } else { + pointer = myparent.size(); + } + } +} + +template +BoolArray EWAHBoolArray::toBoolArray() const { + BoolArray ans(sizeinbits); + EWAHBoolArrayIterator i = uncompress(); + size_t counter = 0; + while (i.hasNext()) { + ans.setWord(counter++, i.next()); + } + return ans; +} + +template +template +void EWAHBoolArray::appendRowIDs(container &out, + const size_t offset) const { + size_t pointer(0); + size_t currentoffset(offset); + if (RESERVEMEMORY) + out.reserve(buffer.size() + 64); // trading memory for speed. + const size_t buffersize = buffer.size(); + while (pointer < buffersize) { + ConstRunningLengthWord rlw(buffer[pointer]); + const size_t productofrl = + static_cast(rlw.getRunningLength() * wordinbits); + if (rlw.getRunningBit()) { + const size_t upper_limit = currentoffset + productofrl; + for (; currentoffset < upper_limit; ++currentoffset) { + out.push_back(currentoffset); + } + } else { + currentoffset += productofrl; + } + ++pointer; + const size_t rlwlw = rlw.getNumberOfLiteralWords(); + for (uword k = 0; k < rlwlw; ++k) { + uword currentword = buffer[pointer]; + while (currentword != 0) { + uint64_t t = currentword & -currentword; + uint32_t r = numberOfTrailingZeros(t); + out.push_back(currentoffset + r); + currentword ^= t; + } + currentoffset += wordinbits; + ++pointer; + } + } +} + +template +bool EWAHBoolArray::operator!=(const EWAHBoolArray &x) const { + return !(*this == x); +} + +template +bool EWAHBoolArray::operator==(const BoolArray &x) const { + // could be more efficient + return (this->toBoolArray() == x); +} + +template +bool EWAHBoolArray::operator!=(const BoolArray &x) const { + // could be more efficient + return (this->toBoolArray() != x); +} + +template +size_t EWAHBoolArray::addStreamOfEmptyWords(const bool v, + size_t number) { + if (number == 0) + return 0; + sizeinbits += number * wordinbits; + size_t wordsadded = 0; + if ((RunningLengthWord::getRunningBit(buffer[lastRLW]) != v) && + (RunningLengthWord::size(buffer[lastRLW]) == 0)) { + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + } else if ((RunningLengthWord::getNumberOfLiteralWords( + buffer[lastRLW]) != 0) || + (RunningLengthWord::getRunningBit(buffer[lastRLW]) != v)) { + buffer.push_back(0); + ++wordsadded; + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + } + const uword runlen = + RunningLengthWord::getRunningLength(buffer[lastRLW]); + + const uword whatwecanadd = + number < static_cast( + RunningLengthWord::largestrunninglengthcount - runlen) + ? static_cast(number) + : static_cast( + RunningLengthWord::largestrunninglengthcount - runlen); + RunningLengthWord::setRunningLength( + buffer[lastRLW], static_cast(runlen + whatwecanadd)); + + number -= static_cast(whatwecanadd); + while (number >= RunningLengthWord::largestrunninglengthcount) { + buffer.push_back(0); + ++wordsadded; + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + RunningLengthWord::setRunningLength( + buffer[lastRLW], RunningLengthWord::largestrunninglengthcount); + number -= static_cast( + RunningLengthWord::largestrunninglengthcount); + } + if (number > 0) { + buffer.push_back(0); + ++wordsadded; + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + RunningLengthWord::setRunningLength(buffer[lastRLW], + static_cast(number)); + } + return wordsadded; +} + +template +void EWAHBoolArray::fastaddStreamOfEmptyWords(const bool v, + size_t number) { + if (number == 0) + return; + if ((RunningLengthWord::getRunningBit(buffer[lastRLW]) != v) && + (RunningLengthWord::size(buffer[lastRLW]) == 0)) { + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + } else if ((RunningLengthWord::getNumberOfLiteralWords( + buffer[lastRLW]) != 0) || + (RunningLengthWord::getRunningBit(buffer[lastRLW]) != v)) { + buffer.push_back(0); + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + } + const uword runlen = + RunningLengthWord::getRunningLength(buffer[lastRLW]); + + const uword whatwecanadd = + number < static_cast( + RunningLengthWord::largestrunninglengthcount - runlen) + ? static_cast(number) + : static_cast( + RunningLengthWord::largestrunninglengthcount - runlen); + RunningLengthWord::setRunningLength( + buffer[lastRLW], static_cast(runlen + whatwecanadd)); + + number -= static_cast(whatwecanadd); + while (number >= RunningLengthWord::largestrunninglengthcount) { + buffer.push_back(0); + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + RunningLengthWord::setRunningLength( + buffer[lastRLW], RunningLengthWord::largestrunninglengthcount); + number -= static_cast( + RunningLengthWord::largestrunninglengthcount); + } + if (number > 0) { + buffer.push_back(0); + lastRLW = buffer.size() - 1; + if (v) + RunningLengthWord::setRunningBit(buffer[lastRLW], v); + RunningLengthWord::setRunningLength(buffer[lastRLW], + static_cast(number)); + } +} + +template +size_t EWAHBoolArray::addStreamOfDirtyWords(const uword *v, + const size_t number) { + if (number == 0) + return 0; + uword rlw = buffer[lastRLW]; + size_t NumberOfLiteralWords = + RunningLengthWord::getNumberOfLiteralWords(rlw); + if (NumberOfLiteralWords + number <= + RunningLengthWord::largestliteralcount) { + RunningLengthWord::setNumberOfLiteralWords( + rlw, NumberOfLiteralWords + number); + buffer[lastRLW] = rlw; + sizeinbits += number * wordinbits; + buffer.insert(buffer.end(), v, v + number); + return number; + } + // we proceed the long way + size_t howmanywecanadd = + RunningLengthWord::largestliteralcount - NumberOfLiteralWords; + RunningLengthWord::setNumberOfLiteralWords( + rlw, RunningLengthWord::largestliteralcount); + buffer[lastRLW] = rlw; + buffer.insert(buffer.end(), v, v + howmanywecanadd); + size_t wordadded = howmanywecanadd; + sizeinbits += howmanywecanadd * wordinbits; + buffer.push_back(0); + lastRLW = buffer.size() - 1; + ++wordadded; + wordadded += + addStreamOfDirtyWords(v + howmanywecanadd, number - howmanywecanadd); + return wordadded; +} + +template +void EWAHBoolArray::fastaddStreamOfDirtyWords(const uword *v, + const size_t number) { + if (number == 0) + return; + uword rlw = buffer[lastRLW]; + size_t NumberOfLiteralWords = + RunningLengthWord::getNumberOfLiteralWords(rlw); + if (NumberOfLiteralWords + number <= + RunningLengthWord::largestliteralcount) { + RunningLengthWord::setNumberOfLiteralWords( + rlw, NumberOfLiteralWords + number); + buffer[lastRLW] = rlw; + for (size_t i = 0; i < number; ++i) + buffer.push_back(v[i]); + // buffer.insert(buffer.end(), v, v+number); // seems slower than push_back? + return; + } + // we proceed the long way + size_t howmanywecanadd = + RunningLengthWord::largestliteralcount - NumberOfLiteralWords; + RunningLengthWord::setNumberOfLiteralWords( + rlw, RunningLengthWord::largestliteralcount); + buffer[lastRLW] = rlw; + for (size_t i = 0; i < howmanywecanadd; ++i) + buffer.push_back(v[i]); + // buffer.insert(buffer.end(), v, v+howmanywecanadd);// seems slower than + // push_back? + buffer.push_back(0); + lastRLW = buffer.size() - 1; + fastaddStreamOfDirtyWords(v + howmanywecanadd, number - howmanywecanadd); +} + +template +size_t EWAHBoolArray::addStreamOfNegatedDirtyWords(const uword *v, + const size_t number) { + if (number == 0) + return 0; + uword rlw = buffer[lastRLW]; + size_t NumberOfLiteralWords = + RunningLengthWord::getNumberOfLiteralWords(rlw); + if (NumberOfLiteralWords + number <= + RunningLengthWord::largestliteralcount) { + RunningLengthWord::setNumberOfLiteralWords( + rlw, NumberOfLiteralWords + number); + buffer[lastRLW] = rlw; + sizeinbits += number * wordinbits; + for (size_t k = 0; k < number; ++k) + buffer.push_back(~v[k]); + return number; + } + // we proceed the long way + size_t howmanywecanadd = + RunningLengthWord::largestliteralcount - NumberOfLiteralWords; + RunningLengthWord::setNumberOfLiteralWords( + rlw, RunningLengthWord::largestliteralcount); + buffer[lastRLW] = rlw; + for (size_t k = 0; k < howmanywecanadd; ++k) + buffer.push_back(~v[k]); + size_t wordadded = howmanywecanadd; + sizeinbits += howmanywecanadd * wordinbits; + buffer.push_back(0); + lastRLW = buffer.size() - 1; + ++wordadded; + wordadded += + addStreamOfDirtyWords(v + howmanywecanadd, number - howmanywecanadd); + return wordadded; +} + +template size_t EWAHBoolArray::addEmptyWord(const bool v) { + RunningLengthWord lastRunningLengthWord(buffer[lastRLW]); + const bool noliteralword = + (lastRunningLengthWord.getNumberOfLiteralWords() == 0); + // first, if the last running length word is empty, we align it + // this + uword runlen = lastRunningLengthWord.getRunningLength(); + if ((noliteralword) && (runlen == 0)) { + lastRunningLengthWord.setRunningBit(v); + } + if ((noliteralword) && (lastRunningLengthWord.getRunningBit() == v) && + (runlen < RunningLengthWord::largestrunninglengthcount)) { + lastRunningLengthWord.setRunningLength(static_cast(runlen + 1)); + return 0; + } else { + // we have to start anew + buffer.push_back(0); + lastRLW = buffer.size() - 1; + RunningLengthWord lastRunningLengthWord2(buffer[lastRLW]); + lastRunningLengthWord2.setRunningBit(v); + lastRunningLengthWord2.setRunningLength(1); + return 1; + } +} + +template +void fast_logicalor_tocontainer(size_t n, const EWAHBoolArray **inputs, + EWAHBoolArray &container) { + class EWAHBoolArrayPtr { + + public: + EWAHBoolArrayPtr(const EWAHBoolArray *p, bool o) : ptr(p), own(o) {} + const EWAHBoolArray *ptr; + bool own; // whether to clean + + bool operator<(const EWAHBoolArrayPtr &o) const { + return o.ptr->sizeInBytes() < ptr->sizeInBytes(); // backward on purpose + } + }; + + if (n == 0) { + container.reset(); + return; + } + if (n == 1) { + container = *inputs[0]; + return; + } + std::priority_queue pq; + for (size_t i = 0; i < n; i++) { + // could use emplace + pq.push(EWAHBoolArrayPtr(inputs[i], false)); + } + while (pq.size() > 2) { + + EWAHBoolArrayPtr x1 = pq.top(); + pq.pop(); + + EWAHBoolArrayPtr x2 = pq.top(); + pq.pop(); + + EWAHBoolArray *buffer = new EWAHBoolArray(); + x1.ptr->logicalor(*x2.ptr, *buffer); + + if (x1.own) { + delete x1.ptr; + } + if (x2.own) { + delete x2.ptr; + } + pq.push(EWAHBoolArrayPtr(buffer, true)); + } + EWAHBoolArrayPtr x1 = pq.top(); + pq.pop(); + + EWAHBoolArrayPtr x2 = pq.top(); + pq.pop(); + + x1.ptr->logicalor(*x2.ptr, container); + + if (x1.own) { + delete x1.ptr; + } + if (x2.own) { + delete x2.ptr; + } +} + +template +void EWAHBoolArray::logicalor(const EWAHBoolArray &a, + EWAHBoolArray &container) const { + container.reset(); + if (RESERVEMEMORY) + container.buffer.reserve(buffer.size() + a.buffer.size()); + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + container.setSizeInBits(sizeInBits()); + return; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey = i_is_prey ? rlwi : rlwj; + BufferedRunningLengthWord &predator = i_is_prey ? rlwj : rlwi; + if (predator.getRunningBit()) { + container.fastaddStreamOfEmptyWords(true, predator.getRunningLength()); + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else { + const size_t index = + prey.discharge(container, predator.getRunningLength()); + container.fastaddStreamOfEmptyWords(false, predator.getRunningLength() - + index); + } + predator.discardRunningWordsWithReload(); + } + + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + container.addWord(rlwi.getLiteralWordAt(k) | rlwj.getLiteralWordAt(k)); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + BufferedRunningLengthWord &remaining = i_remains ? rlwi : rlwj; + remaining.discharge(container); + container.setSizeInBits(sizeInBits() > a.sizeInBits() ? sizeInBits() : a.sizeInBits()); +} + +template +size_t EWAHBoolArray::logicalorcount(const EWAHBoolArray &a) const { + size_t answer = 0; + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + return 0; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey = i_is_prey ? rlwi : rlwj; + BufferedRunningLengthWord &predator = i_is_prey ? rlwj : rlwi; + if (predator.getRunningBit()) { + answer += predator.getRunningLength() * wordinbits; + prey.discardFirstWordsWithReload(predator.getRunningLength()); + + } else { + // const size_t index = + prey.dischargeCount(predator.getRunningLength(), &answer); + } + predator.discardRunningWordsWithReload(); + } + + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + answer += countOnes( + (uword)(rlwi.getLiteralWordAt(k) | rlwj.getLiteralWordAt(k))); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + BufferedRunningLengthWord &remaining = i_remains ? rlwi : rlwj; + answer += remaining.dischargeCount(); + return answer; +} + +template +void EWAHBoolArray::logicalxor(const EWAHBoolArray &a, + EWAHBoolArray &container) const { + container.reset(); + if (RESERVEMEMORY) + container.buffer.reserve(buffer.size() + a.buffer.size()); + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + container.setSizeInBits(sizeInBits()); + return; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey = i_is_prey ? rlwi : rlwj; + BufferedRunningLengthWord &predator = i_is_prey ? rlwj : rlwi; + const size_t index = + (!predator.getRunningBit()) + ? prey.discharge(container, predator.getRunningLength()) + : prey.dischargeNegated(container, predator.getRunningLength()); + container.fastaddStreamOfEmptyWords(predator.getRunningBit(), + predator.getRunningLength() - index); + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) + container.addWord(rlwi.getLiteralWordAt(k) ^ rlwj.getLiteralWordAt(k)); + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + BufferedRunningLengthWord &remaining = i_remains ? rlwi : rlwj; + remaining.discharge(container); + container.setSizeInBits(sizeInBits() > a.sizeInBits() ? sizeInBits() : a.sizeInBits()); +} + +template +size_t EWAHBoolArray::logicalxorcount(const EWAHBoolArray &a) const { + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!i.hasNext()) + return a.numberOfOnes(); + if (!j.hasNext()) + return this->numberOfOnes(); + + size_t answer = 0; + + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey = i_is_prey ? rlwi : rlwj; + BufferedRunningLengthWord &predator = i_is_prey ? rlwj : rlwi; + size_t index; + + if (predator.getRunningBit()) { + index = + prey.dischargeCountNegated(predator.getRunningLength(), &answer); + } else { + index = prey.dischargeCount(predator.getRunningLength(), &answer); + } + if (predator.getRunningBit()) + answer += (predator.getRunningLength() - index) * wordinbits; + + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + answer += countOnes( + (uword)(rlwi.getLiteralWordAt(k) ^ rlwj.getLiteralWordAt(k))); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + BufferedRunningLengthWord &remaining = i_remains ? rlwi : rlwj; + answer += remaining.dischargeCount(); + return answer; +} + +template +void EWAHBoolArray::logicaland(const EWAHBoolArray &a, + EWAHBoolArray &container) const { + container.reset(); + if (RESERVEMEMORY) + container.buffer.reserve(buffer.size() > a.buffer.size() ? buffer.size() + : a.buffer.size()); + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + container.setSizeInBits(sizeInBits()); + return; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey(i_is_prey ? rlwi : rlwj); + BufferedRunningLengthWord &predator(i_is_prey ? rlwj : rlwi); + if (!predator.getRunningBit()) { + container.fastaddStreamOfEmptyWords(false, predator.getRunningLength()); + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else { + const size_t index = + prey.discharge(container, predator.getRunningLength()); + container.fastaddStreamOfEmptyWords(false, predator.getRunningLength() - + index); + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + container.addWord(rlwi.getLiteralWordAt(k) & rlwj.getLiteralWordAt(k)); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + container.setSizeInBits(sizeInBits()); + container.setSizeInBits(sizeInBits() > a.sizeInBits() ? sizeInBits() : a.sizeInBits()); +} + +template +void EWAHBoolArray::logicalandnot(const EWAHBoolArray &a, + EWAHBoolArray &container) const { + container.reset(); + if (RESERVEMEMORY) + container.buffer.reserve(buffer.size() > a.buffer.size() ? buffer.size() + : a.buffer.size()); + EWAHBoolArrayRawIterator i = raw_iterator(); + EWAHBoolArrayRawIterator j = a.raw_iterator(); + if (!j.hasNext()) { // the other fellow is empty + container = *this; // just copy, stupidly, the data + return; + } + if (!(i.hasNext())) { // hopefully this never happens... + container.setSizeInBits(sizeInBits()); + return; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey(i_is_prey ? rlwi : rlwj); + BufferedRunningLengthWord &predator(i_is_prey ? rlwj : rlwi); + if (((predator.getRunningBit()) && (i_is_prey)) || + ((!predator.getRunningBit()) && (!i_is_prey))) { + container.fastaddStreamOfEmptyWords(false, predator.getRunningLength()); + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else if (i_is_prey) { + const size_t index = + prey.discharge(container, predator.getRunningLength()); + container.fastaddStreamOfEmptyWords(false, predator.getRunningLength() - + index); + } else { + const size_t index = + prey.dischargeNegated(container, predator.getRunningLength()); + container.fastaddStreamOfEmptyWords(true, predator.getRunningLength() - + index); + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + container.addWord(rlwi.getLiteralWordAt(k) & ~rlwj.getLiteralWordAt(k)); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + if (i_remains) { + rlwi.discharge(container); + } + container.setSizeInBits(sizeInBits()); +} + +template +size_t EWAHBoolArray::logicalandnotcount(const EWAHBoolArray &a) const { + EWAHBoolArrayRawIterator i = raw_iterator(); + EWAHBoolArrayRawIterator j = a.raw_iterator(); + if (!j.hasNext()) { // the other fellow is empty + return this->numberOfOnes(); + } + if (!(i.hasNext())) { // hopefully this never happens... + return 0; + } + size_t answer = 0; + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey(i_is_prey ? rlwi : rlwj); + BufferedRunningLengthWord &predator(i_is_prey ? rlwj : rlwi); + if (((predator.getRunningBit()) && (i_is_prey)) || + ((!predator.getRunningBit()) && (!i_is_prey))) { + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else if (i_is_prey) { + prey.dischargeCount(predator.getRunningLength(), &answer); + } else { + const size_t index = + prey.dischargeCountNegated(predator.getRunningLength(), &answer); + answer += (predator.getRunningLength() - index) * wordinbits; + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + answer += countOnes( + (uword)(rlwi.getLiteralWordAt(k) & (~rlwj.getLiteralWordAt(k)))); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + const bool i_remains = rlwi.size() > 0; + if (i_remains) { + answer += rlwi.dischargeCount(); + } + return answer; +} + +template +size_t EWAHBoolArray::logicalandcount(const EWAHBoolArray &a) const { + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + return 0; + } + size_t answer = 0; + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey(i_is_prey ? rlwi : rlwj); + BufferedRunningLengthWord &predator(i_is_prey ? rlwj : rlwi); + if (!predator.getRunningBit()) { + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else { + // const size_t index = + prey.dischargeCount(predator.getRunningLength(), &answer); + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + answer += countOnes( + (uword)(rlwi.getLiteralWordAt(k) & rlwj.getLiteralWordAt(k))); + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + return answer; +} + +template +bool EWAHBoolArray::intersects(const EWAHBoolArray &a) const { + EWAHBoolArrayRawIterator i = a.raw_iterator(); + EWAHBoolArrayRawIterator j = raw_iterator(); + if (!(i.hasNext() and j.hasNext())) { // hopefully this never happens... + return false; + } + // at this point, this should be safe: + BufferedRunningLengthWord &rlwi = i.next(); + BufferedRunningLengthWord &rlwj = j.next(); + + while ((rlwi.size() > 0) && (rlwj.size() > 0)) { + while ((rlwi.getRunningLength() > 0) || (rlwj.getRunningLength() > 0)) { + const bool i_is_prey = rlwi.getRunningLength() < rlwj.getRunningLength(); + BufferedRunningLengthWord &prey(i_is_prey ? rlwi : rlwj); + BufferedRunningLengthWord &predator(i_is_prey ? rlwj : rlwi); + if (!predator.getRunningBit()) { + prey.discardFirstWordsWithReload(predator.getRunningLength()); + } else { + size_t index = 0; + bool isnonzero = + prey.nonzero_discharge(predator.getRunningLength(), index); + if (isnonzero) + return true; + } + predator.discardRunningWordsWithReload(); + } + const size_t nbre_literal = std::min(rlwi.getNumberOfLiteralWords(), + rlwj.getNumberOfLiteralWords()); + if (nbre_literal > 0) { + for (size_t k = 0; k < nbre_literal; ++k) { + if ((rlwi.getLiteralWordAt(k) & rlwj.getLiteralWordAt(k)) != 0) + return true; + } + rlwi.discardLiteralWordsWithReload(nbre_literal); + rlwj.discardLiteralWordsWithReload(nbre_literal); + } + } + return false; +} + +template +BitmapStatistics EWAHBoolArray::computeStatistics() const { + BitmapStatistics bs; + EWAHBoolArrayRawIterator i = raw_iterator(); + while (i.hasNext()) { + BufferedRunningLengthWord &brlw(i.next()); + ++bs.runningwordmarker; + bs.totalliteral += brlw.getNumberOfLiteralWords(); + bs.totalcompressed += brlw.getRunningLength(); + if (brlw.getRunningLength() == + RunningLengthWord::largestrunninglengthcount) { + ++bs.maximumofrunningcounterreached; + } + } + return bs; +} + +template void EWAHBoolArray::debugprintout() const { + std::cout << "==printing out EWAHBoolArray==" << std::endl; + std::cout << "Number of compressed words: " << buffer.size() << std::endl; + size_t pointer = 0; + while (pointer < buffer.size()) { + ConstRunningLengthWord rlw(buffer[pointer]); + bool b = rlw.getRunningBit(); + const uword rl = rlw.getRunningLength(); + const uword lw = rlw.getNumberOfLiteralWords(); + std::cout << "pointer = " << pointer << " running bit=" << b + << " running length=" << rl << " lit. words=" << lw << std::endl; + for (uword j = 0; j < lw; ++j) { + const uword &w = buffer[pointer + j + 1]; + std::cout << toBinaryString(w) << std::endl; + } + pointer += lw + 1; + } + std::cout << "==END==" << std::endl; +} + +template +size_t EWAHBoolArray::sizeOnDisk(const bool savesizeinbits) const { + return (savesizeinbits ? sizeof(sizeinbits) : 0) + sizeof(size_t) + + sizeof(uword) * buffer.size(); +} + +#endif diff --git a/yt/utilities/lib/ewahboolarray/ewahutil.h b/yt/utilities/lib/ewahboolarray/ewahutil.h new file mode 100644 index 00000000000..1d5ee30a0f0 --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/ewahutil.h @@ -0,0 +1,240 @@ +/** + * This code is released under the + * Apache License Version 2.0 http://www.apache.org/licenses/. + * + * (c) Daniel Lemire, http://lemire.me/en/ + * + * Some code from the public domain tuklib. + */ + +#ifndef EWAHUTIL_H +#define EWAHUTIL_H + +#include +#include +#include // mostly for Microsoft compilers +#include +#include // part of Visual Studio 2010 and better + +#include +#include +#include +#include +#include +#include +#include +#include + +// taken from stackoverflow +#ifndef NDEBUG +#define ASSERT(condition, message) \ + do { \ + if (!(condition)) { \ + std::cerr << "Assertion `" #condition "` failed in " << __FILE__ \ + << " line " << __LINE__ << ": " << message << std::endl; \ + std::exit(EXIT_FAILURE); \ + } \ + } while (false) +#else +#define ASSERT(condition, message) \ + do { \ + } while (false) +#endif + +#ifdef _MSC_VER +#include +#endif + +static inline uint32_t ctz64(uint64_t n) { +#if defined(__GNUC__) && UINT_MAX >= UINT32_MAX && ULLONG_MAX >= UINT64_MAX + return static_cast(__builtin_ctzll(n)); +#elif defined(_WIN64) && defined(_MSC_VER) && _MSC_VER >= 1400 && \ + ULONG_MAX >= UINT64_MAX + uint32_t i; + _BitScanForward64((unsigned long *)&i, n); + return i; +#else + uint32_t i = 1; + if ((n & static_cast(4294967295)) == 0) { + n >>= 32; + i += 32; + } + if ((n & static_cast(0x0000FFFFUL)) == 0) { + n >>= 16; + i += 16; + } + + if ((n & static_cast(0x000000FFUL)) == 0) { + n >>= 8; + i += 8; + } + + if ((n & static_cast(0x0000000FUL)) == 0) { + n >>= 4; + i += 4; + } + + if ((n & static_cast(0x00000003UL)) == 0) { + n >>= 2; + i += 2; + } + i -= (n & 0x1); + return i; +#endif +} + +static inline uint32_t ctz32(uint32_t n) { +#if defined(__GNUC__) && UINT_MAX >= UINT32_MAX + return static_cast(__builtin_ctz(n)); + +#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + uint32_t i; + __asm__("bsfl %1, %0" : "=r"(i) : "rm"(n)); + return i; + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + uint32_t i; + _BitScanForward((unsigned long *)&i, n); + return i; + +#else + uint32_t i = 1; + + if ((n & static_cast(0x0000FFFF)) == 0) { + n >>= 16; + i += 16; + } + + if ((n & static_cast(0x000000FF)) == 0) { + n >>= 8; + i += 8; + } + + if ((n & static_cast(0x0000000F)) == 0) { + n >>= 4; + i += 4; + } + + if ((n & static_cast(0x00000003)) == 0) { + n >>= 2; + i += 2; + } + + i -= (n & 1); + + return i; +#endif +} + +static inline uint32_t ctz16(uint16_t n) { +#if defined(__GNUC__) && UINT_MAX >= UINT32_MAX + return static_cast(__builtin_ctz(n)); + +#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + uint32_t i; + __asm__("bsfl %1, %0" : "=r"(i) : "rm"(n)); + return i; + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + uint32_t i; + _BitScanForward((unsigned long *)&i, n); + return i; + +#else + uint32_t i = 1; + + if ((n & static_cast(0x000000FF)) == 0) { + n >>= 8; + i += 8; + } + + if ((n & static_cast(0x0000000F)) == 0) { + n >>= 4; + i += 4; + } + + if ((n & static_cast(0x00000003)) == 0) { + n >>= 2; + i += 2; + } + i -= (n & 1); + + return i; +#endif +} + +#ifdef __GNUC__ +/** + * count the number of bits set to one (32 bit version) + */ +inline uint32_t countOnes(uint32_t x) { + return static_cast(__builtin_popcount(x)); +} +#elif defined(_MSC_VER) && _MSC_VER >= 1400 +inline uint32_t countOnes(uint32_t x) { return __popcnt(x); } +#else +inline uint32_t countOnes(uint32_t v) { + v = v - ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return static_cast((((v + (v >> 4)) & 0x0F0F0F0F) * 0x01010101) >> + 24); +} +#endif + +#ifdef __GNUC__ +/** + * count the number of bits set to one (64 bit version) + */ +inline uint32_t countOnes(uint64_t x) { + return static_cast(__builtin_popcountll(x)); +} +#elif defined(_WIN64) && defined(_MSC_VER) && _MSC_VER >= 1400 +inline uint32_t countOnes(uint64_t x) { + return static_cast(__popcnt64(static_cast<__int64>(x))); +} +#else +inline uint32_t countOnes(uint64_t v) { + v = v - ((v >> 1) & 0x5555555555555555); + v = (v & 0x3333333333333333) + ((v >> 2) & 0x3333333333333333); + v = ((v + (v >> 4)) & 0x0F0F0F0F0F0F0F0F); + return static_cast((v * (0x0101010101010101)) >> 56); +} +#endif + +inline uint32_t countOnes(uint16_t v) { + return countOnes(static_cast(v)); +} + +inline uint32_t numberOfTrailingZeros(uint32_t x) { + if (x == 0) + return 32; + return ctz32(x); +} + +inline uint32_t numberOfTrailingZeros(uint64_t x) { + if (x == 0) + return 64; + return ctz64(x); +} + +inline uint32_t numberOfTrailingZeros(uint16_t x) { + if (x == 0) + return 16; + return ctz16(x); +} + +/** + * Returns the binary representation of a binary word. + */ +template std::string toBinaryString(const uword w) { + std::ostringstream convert; + for (uint32_t k = 0; k < sizeof(uword) * 8; ++k) { + if (w & (static_cast(1) << k)) + convert << "1"; + else + convert << "0"; + } + return convert.str(); +} + +#endif diff --git a/yt/utilities/lib/ewahboolarray/runninglengthword.h b/yt/utilities/lib/ewahboolarray/runninglengthword.h new file mode 100644 index 00000000000..85ccdf5ddad --- /dev/null +++ b/yt/utilities/lib/ewahboolarray/runninglengthword.h @@ -0,0 +1,548 @@ +/** + * This code is released under the + * Apache License Version 2.0 http://www.apache.org/licenses/. + * + * (c) Daniel Lemire, http://lemire.me/en/ + */ +#ifndef RUNNINGLENGTHWORD_H_ +#define RUNNINGLENGTHWORD_H_ +#include +/** + * For expert users. + * This class is used to represent a special type of word storing + * a run length. It is defined by the Enhanced Word Aligned Hybrid (EWAH) + * format. You don't normally need to access this class. + */ +template class RunningLengthWord { +public: + RunningLengthWord(uword &data) : mydata(data) {} + + RunningLengthWord(const RunningLengthWord &rlw) : mydata(rlw.mydata) {} + + RunningLengthWord &operator=(const RunningLengthWord &rlw) { + mydata = rlw.mydata; + return *this; + } + + /** + * Which bit is being repeated? + */ + bool getRunningBit() const { return mydata & static_cast(1); } + + /** + * how many words should be filled by the running bit + */ + static inline bool getRunningBit(uword data) { + return data & static_cast(1); + } + + /** + * how many words should be filled by the running bit + */ + uword getRunningLength() const { + return static_cast((mydata >> 1) & largestrunninglengthcount); + } + + /** + * followed by how many literal words? + */ + static inline uword getRunningLength(uword data) { + return static_cast((data >> 1) & largestrunninglengthcount); + } + + /** + * followed by how many literal words? + */ + uword getNumberOfLiteralWords() const { + return static_cast(mydata >> (1 + runninglengthbits)); + } + + /** + * Total of getRunningLength() and getNumberOfLiteralWords() + */ + uword size() const { + return static_cast(getRunningLength() + getNumberOfLiteralWords()); + } + + /** + * Total of getRunningLength() and getNumberOfLiteralWords() + */ + static inline uword size(uword data) { + return static_cast(getRunningLength(data) + + getNumberOfLiteralWords(data)); + } + + /** + * followed by how many literal words? + */ + static inline uword getNumberOfLiteralWords(uword data) { + return static_cast(data >> (1 + runninglengthbits)); + } + + /** + * running length of which type of bits + */ + void setRunningBit(bool b) { + if (b) + mydata |= static_cast(1); + else + mydata &= static_cast(~1); + } + + void discardFirstWords(uword x) { + const uword rl(getRunningLength()); + if (rl >= x) { + setRunningLength(rl - x); + return; + } + x -= rl; + setRunningLength(0); + setNumberOfLiteralWords(getNumberOfLiteralWords() - x); + } + + /** + * running length of which type of bits + */ + static inline void setRunningBit(uword &data, bool b) { + if (b) + data |= static_cast(1); + else + data &= static_cast(~1); + } + + void setRunningLength(uword l) { + mydata |= shiftedlargestrunninglengthcount; + mydata &= + static_cast((l << 1) | notshiftedlargestrunninglengthcount); + } + + // static call for people who hate objects + static inline void setRunningLength(uword &data, uword l) { + data |= shiftedlargestrunninglengthcount; + data &= static_cast((l << 1) | notshiftedlargestrunninglengthcount); + } + + void setNumberOfLiteralWords(uword l) { + mydata |= notrunninglengthplusrunningbit; + mydata &= static_cast((l << (runninglengthbits + 1)) | + runninglengthplusrunningbit); + } + // static call for people who hate objects + static inline void setNumberOfLiteralWords(uword &data, uword l) { + data |= notrunninglengthplusrunningbit; + data &= static_cast(l << (runninglengthbits + 1)) | + runninglengthplusrunningbit; + } + + static const uint32_t runninglengthbits = sizeof(uword) * 4; + static const uint32_t literalbits = sizeof(uword) * 8 - 1 - runninglengthbits; + static const uword largestliteralcount = + (static_cast(1) << literalbits) - 1; + static const uword largestrunninglengthcount = + (static_cast(1) << runninglengthbits) - 1; + static const uword shiftedlargestrunninglengthcount = + largestrunninglengthcount << 1; + static const uword notshiftedlargestrunninglengthcount = + static_cast(~shiftedlargestrunninglengthcount); + static const uword runninglengthplusrunningbit = + (static_cast(1) << (runninglengthbits + 1)) - 1; + static const uword notrunninglengthplusrunningbit = + static_cast(~runninglengthplusrunningbit); + static const uword notlargestrunninglengthcount = + static_cast(~largestrunninglengthcount); + + uword &mydata; +}; + +/** + * Same as RunningLengthWord, except that the values cannot be modified. + */ +template class ConstRunningLengthWord { +public: + ConstRunningLengthWord() : mydata(0) {} + + ConstRunningLengthWord(const uword data) : mydata(data) {} + + ConstRunningLengthWord(const ConstRunningLengthWord &rlw) + : mydata(rlw.mydata) {} + + /** + * Which bit is being repeated? + */ + bool getRunningBit() const { return mydata & static_cast(1); } + + /** + * how many words should be filled by the running bit + */ + uword getRunningLength() const { + return static_cast( + (mydata >> 1) & RunningLengthWord::largestrunninglengthcount); + } + + /** + * followed by how many literal words? + */ + uword getNumberOfLiteralWords() const { + return static_cast( + mydata >> (1 + RunningLengthWord::runninglengthbits)); + } + + /** + * Total of getRunningLength() and getNumberOfLiteralWords() + */ + uword size() const { return getRunningLength() + getNumberOfLiteralWords(); } + + uword mydata; +}; + +template class EWAHBoolArray; + +template class EWAHBoolArrayRawIterator; + +/** + * Same as RunningLengthWord, except that the values are buffered for quick + * access. + */ +template class BufferedRunningLengthWord { +public: + enum { wordinbits = sizeof(uword) * 8 }; + + BufferedRunningLengthWord(const uword &data, + EWAHBoolArrayRawIterator *p) + : RunningBit(data & static_cast(1)), + RunningLength(static_cast( + (data >> 1) & RunningLengthWord::largestrunninglengthcount)), + NumberOfLiteralWords(static_cast( + data >> (1 + RunningLengthWord::runninglengthbits))), + parent(p) {} + BufferedRunningLengthWord(const RunningLengthWord &p) + : RunningBit(p.mydata & static_cast(1)), + RunningLength((p.mydata >> 1) & + RunningLengthWord::largestrunninglengthcount), + NumberOfLiteralWords(p.mydata >> + (1 + RunningLengthWord::runninglengthbits)), + parent(p.parent) {} + + void discharge(EWAHBoolArray &container) { + while (size() > 0) { + // first run + size_t pl = getRunningLength(); + container.fastaddStreamOfEmptyWords(getRunningBit(), pl); + size_t pd = getNumberOfLiteralWords(); + writeLiteralWords(pd, container); + if (!next()) + break; + } + } + + size_t dischargeCount() { + size_t answer = 0; + while (size() > 0) { + // first run + if (getRunningBit()) { + answer += wordinbits * getRunningLength(); + } + size_t pd = getNumberOfLiteralWords(); + for (size_t i = 0; i < pd; ++i) + answer += countOnes((uword)getLiteralWordAt(i)); + if (!next()) + break; + } + return answer; + } + + size_t dischargeCountNegated() { + size_t answer = 0; + while (size() > 0) { + // first run + if (!getRunningBit()) { + answer += wordinbits * getRunningLength(); + } + size_t pd = getNumberOfLiteralWords(); + for (size_t i = 0; i < pd; ++i) + answer += countOnes((uword)(~getLiteralWordAt(i))); + if (!next()) + break; + } + return answer; + } + + // Symbolically write out up to max words, returns how many were written, + // write to count the number bits written (we assume that count was initially + // zero) + size_t dischargeCount(size_t max, size_t *count) { + size_t index = 0; + while (true) { + if (index + RunningLength > max) { + const size_t offset = max - index; + if (getRunningBit()) + *count += offset * wordinbits; + RunningLength -= offset; + return max; + } + if (getRunningBit()) + *count += RunningLength * wordinbits; + index += RunningLength; + if (NumberOfLiteralWords + index > max) { + const size_t offset = max - index; + for (size_t i = 0; i < offset; ++i) + *count += countOnes((uword)getLiteralWordAt(i)); + RunningLength = 0; + NumberOfLiteralWords -= offset; + return max; + } + for (size_t i = 0; i < NumberOfLiteralWords; ++i) + *count += countOnes((uword)getLiteralWordAt(i)); + index += NumberOfLiteralWords; + if (!next()) + break; + } + return index; + } + + size_t dischargeCountNegated(size_t max, size_t *count) { + size_t index = 0; + while (true) { + if (index + RunningLength > max) { + const size_t offset = max - index; + if (!getRunningBit()) + *count += offset * wordinbits; + RunningLength -= offset; + return max; + } + if (!getRunningBit()) + *count += RunningLength * wordinbits; + index += RunningLength; + if (NumberOfLiteralWords + index > max) { + const size_t offset = max - index; + for (size_t i = 0; i < offset; ++i) + *count += countOnes((uword)(~getLiteralWordAt(i))); + RunningLength = 0; + NumberOfLiteralWords -= offset; + return max; + } + for (size_t i = 0; i < NumberOfLiteralWords; ++i) + *count += countOnes((uword)(~getLiteralWordAt(i))); + index += NumberOfLiteralWords; + if (!next()) + break; + } + return index; + } + bool nonzero_discharge() { + while (size() > 0) { + // first run + size_t pl = getRunningLength(); + if ((pl > 0) && (getRunningBit())) + return true; + size_t pd = getNumberOfLiteralWords(); + if (pd > 0) + return true; + discardFirstWordsWithReload(pl + pd); + } + return false; + } + + // Write out up to max words, returns how many were written + size_t discharge(EWAHBoolArray &container, size_t max) { + size_t index = 0; + while (true) { + if (index + RunningLength > max) { + const size_t offset = max - index; + container.fastaddStreamOfEmptyWords(getRunningBit(), offset); + RunningLength -= offset; + return max; + } + container.fastaddStreamOfEmptyWords(getRunningBit(), RunningLength); + index += RunningLength; + if (NumberOfLiteralWords + index > max) { + const size_t offset = max - index; + writeLiteralWords(offset, container); + RunningLength = 0; + NumberOfLiteralWords -= offset; + return max; + } + writeLiteralWords(NumberOfLiteralWords, container); + index += NumberOfLiteralWords; + if (!next()) + break; + } + return index; + } + + bool nonzero_discharge(size_t max, size_t &index) { + index = 0; + while ((index < max) && (size() > 0)) { + // first run + size_t pl = getRunningLength(); + if (index + pl > max) { + pl = max - index; + } + if ((getRunningBit()) && (pl > 0)) + return true; + index += pl; + size_t pd = getNumberOfLiteralWords(); + if (pd + index > max) { + pd = max - index; + } + if (pd > 0) + return true; + discardFirstWordsWithReload(pl + pd); + } + return false; + } + + // Write out up to max words, returns how many were written + size_t dischargeNegated(EWAHBoolArray &container, size_t max) { + // todo: could be optimized further + size_t index = 0; + while ((index < max) && (size() > 0)) { + // first run + size_t pl = getRunningLength(); + if (index + pl > max) { + pl = max - index; + } + container.fastaddStreamOfEmptyWords(!getRunningBit(), pl); + index += pl; + size_t pd = getNumberOfLiteralWords(); + if (pd + index > max) { + pd = max - index; + } + writeNegatedLiteralWords(pd, container); + discardFirstWordsWithReload(pl + pd); + index += pd; + } + return index; + } + bool nonzero_dischargeNegated(size_t max, size_t &index) { + while ((index < max) && (size() > 0)) { + // first run + size_t pl = getRunningLength(); + if (index + pl > max) { + pl = max - index; + } + if ((!getRunningBit()) && (pl > 0)) + return true; + index += pl; + size_t pd = getNumberOfLiteralWords(); + if (pd + index > max) { + pd = max - index; + } + if (pd > 0) + return true; + discardFirstWordsWithReload(pl + pd); + index += pd; + } + return false; + } + + uword getLiteralWordAt(size_t index) { return parent->dirtyWords()[index]; } + + void writeLiteralWords(size_t numWords, EWAHBoolArray &container) { + container.fastaddStreamOfDirtyWords(parent->dirtyWords(), numWords); + } + + void writeNegatedLiteralWords(size_t numWords, + EWAHBoolArray &container) { + container.addStreamOfNegatedDirtyWords(parent->dirtyWords(), numWords); + } + + void discardRunningWords() { RunningLength = 0; } + + void discardRunningWordsWithReload() { + RunningLength = 0; + if (NumberOfLiteralWords == 0) + next(); + } + + bool next() { + if (!parent->hasNext()) { + NumberOfLiteralWords = 0; + RunningLength = 0; + return false; + } + parent->next(); + return true; + } + + void read(const uword &data) { + RunningBit = data & static_cast(1); + RunningLength = static_cast( + (data >> 1) & RunningLengthWord::largestrunninglengthcount); + NumberOfLiteralWords = static_cast( + data >> (1 + RunningLengthWord::runninglengthbits)); + } + + /** + * Which bit is being repeated? + */ + bool getRunningBit() const { return RunningBit; } + + void discardFirstWords(uword x) { + if (RunningLength >= x) { + RunningLength = static_cast(RunningLength - x); + return; + } + x = static_cast(x - RunningLength); + RunningLength = 0; + NumberOfLiteralWords = static_cast(NumberOfLiteralWords - x); + } + + /** + * how many words should be filled by the running bit (see previous method) + */ + uword getRunningLength() const { return RunningLength; } + + /** + * followed by how many literal words? + */ + uword getNumberOfLiteralWords() const { return NumberOfLiteralWords; } + + /** + * Total of getRunningLength() and getNumberOfLiteralWords() + */ + uword size() const { + return static_cast(RunningLength + NumberOfLiteralWords); + } + + friend std::ostream &operator<<(std::ostream &out, + const BufferedRunningLengthWord &a) { + out << "{RunningBit:" << a.RunningBit + << ",RunningLength:" << a.RunningLength + << ",NumberOfLiteralWords:" << a.NumberOfLiteralWords << "}"; + return out; + } + void discardLiteralWordsWithReload(uword x) { + assert(NumberOfLiteralWords >= x); + NumberOfLiteralWords -= x; + if (NumberOfLiteralWords == 0) + next(); + } + + void discardFirstWordsWithReload(uword x) { + while (x > 0) { + if (RunningLength > x) { + RunningLength = static_cast(RunningLength - x); + return; + } + x = static_cast(x - RunningLength); + RunningLength = 0; + size_t toDiscard = x > NumberOfLiteralWords ? NumberOfLiteralWords : x; + NumberOfLiteralWords = + static_cast(NumberOfLiteralWords - toDiscard); + x -= toDiscard; + if ((x > 0) || (size() == 0)) { + if (!next()) + break; + } + } + } + +private: + bool RunningBit; + uword RunningLength; + uword NumberOfLiteralWords; + EWAHBoolArrayRawIterator *parent; +}; + +#endif /* RUNNINGLENGTHWORD_H_ */ diff --git a/yt/utilities/lib/field_interpolation_tables.pxd b/yt/utilities/lib/field_interpolation_tables.pxd index 19dbc2a7ec6..7ff9814b8a2 100644 --- a/yt/utilities/lib/field_interpolation_tables.pxd +++ b/yt/utilities/lib/field_interpolation_tables.pxd @@ -5,13 +5,6 @@ Field Interpolation Tables """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy as np diff --git a/yt/utilities/lib/fixed_interpolator.c b/yt/utilities/lib/fixed_interpolator.c index d9c535923eb..93fb21d0330 100644 --- a/yt/utilities/lib/fixed_interpolator.c +++ b/yt/utilities/lib/fixed_interpolator.c @@ -1,9 +1,4 @@ /******************************************************************************* -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. *******************************************************************************/ // diff --git a/yt/utilities/lib/fixed_interpolator.h b/yt/utilities/lib/fixed_interpolator.h index 60e3816015e..31363ae2e80 100644 --- a/yt/utilities/lib/fixed_interpolator.h +++ b/yt/utilities/lib/fixed_interpolator.h @@ -1,9 +1,4 @@ /******************************************************************************* -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. *******************************************************************************/ // // A small, tiny, itty bitty module for computation-intensive interpolation diff --git a/yt/utilities/lib/fixed_interpolator.pxd b/yt/utilities/lib/fixed_interpolator.pxd index 5baa09bfd37..f950367318e 100644 --- a/yt/utilities/lib/fixed_interpolator.pxd +++ b/yt/utilities/lib/fixed_interpolator.pxd @@ -5,13 +5,6 @@ Fixed interpolator includes """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np diff --git a/yt/utilities/lib/fnv_hash.pxd b/yt/utilities/lib/fnv_hash.pxd index be9686dd154..7700981fa64 100644 --- a/yt/utilities/lib/fnv_hash.pxd +++ b/yt/utilities/lib/fnv_hash.pxd @@ -6,13 +6,6 @@ Definitions for fnv_hash """ -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np diff --git a/yt/utilities/lib/fnv_hash.pyx b/yt/utilities/lib/fnv_hash.pyx index d5ed2b45de7..d3ba7b0fc98 100644 --- a/yt/utilities/lib/fnv_hash.pyx +++ b/yt/utilities/lib/fnv_hash.pyx @@ -4,13 +4,6 @@ Fast hashing routines """ -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -41,4 +34,4 @@ def fnv_hash(octets): octets : bytestring The string of bytes to generate a hash from. """ - return c_fnv_hash(octets) \ No newline at end of file + return c_fnv_hash(octets) diff --git a/yt/utilities/lib/fortran_reader.pyx b/yt/utilities/lib/fortran_reader.pyx index 4a16820481c..a6a77793947 100644 --- a/yt/utilities/lib/fortran_reader.pyx +++ b/yt/utilities/lib/fortran_reader.pyx @@ -5,13 +5,6 @@ Simple readers for fortran unformatted data, specifically for the Tiger code. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -30,7 +23,7 @@ cdef extern from "endian_swap.h": void FIX_FLOAT( float ) cdef extern from "platform_dep.h": - void *alloca(int) + void *alloca(size_t) cdef extern from "stdio.h": cdef int SEEK_SET @@ -68,11 +61,11 @@ def count_art_octs(char *fn, long offset, for _ in range(min_level + 1, max_level + 1): fread(dummy_records, sizeof(int), 2, f); fread(&nLevel, sizeof(int), 1, f); FIX_LONG(nLevel) - print level_info + print(level_info) level_info.append(nLevel) fread(dummy_records, sizeof(int), 2, f); fread(&next_record, sizeof(int), 1, f); FIX_LONG(next_record) - print "Record size is:", next_record + print("Record size is:", next_record) # Offset for one record header we just read next_record = (nLevel * (next_record + 2*sizeof(int))) - sizeof(int) fseek(f, next_record, SEEK_CUR) @@ -85,7 +78,7 @@ def count_art_octs(char *fn, long offset, next_record = (2*sizeof(int) + readin) * (nLevel * nchild) next_record -= sizeof(int) fseek(f, next_record, SEEK_CUR) - print "nhvars",nhydro_vars + print("nhvars",nhydro_vars) fclose(f) def read_art_tree(char *fn, long offset, @@ -122,11 +115,11 @@ def read_art_tree(char *fn, long offset, fread(&readin, sizeof(int), 1, f); FIX_LONG(readin) iOct = iHOLL[Level] - 1 nLevel = iNOLL[Level] - #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct - #print ftell(f) + #print("Reading Hierarchy for Level", Lev, Level, nLevel, iOct) + #print(ftell(f)) for ic1 in range(nLevel): iOctMax = max(iOctMax, iOct) - #print readin, iOct, nLevel, sizeof(int) + #print(readin, iOct, nLevel, sizeof(int)) next_record = ftell(f) fread(&readin, sizeof(int), 1, f); FIX_LONG(readin) assert readin==52 @@ -154,11 +147,11 @@ def read_art_tree(char *fn, long offset, #skip over the hydro variables #find the length of one child section - #print 'measuring child record ', + #print('measuring child record ',) fread(&next_record, sizeof(int), 1, f); - #print next_record, + #print(next_record,) FIX_LONG(next_record) - #print next_record + #print(next_record) fseek(f,ftell(f)-sizeof(int),SEEK_SET) #rewind #This is a sloppy fix; next_record is 64bit #and I don't think FIX_LONG(next_record) is working @@ -169,7 +162,7 @@ def read_art_tree(char *fn, long offset, #find the length of all of the children section child_record = ftell(f) + (next_record+2*sizeof(int))*nLevel*nchild - #print 'Skipping over hydro vars', ftell(f), child_record + #print('Skipping over hydro vars', ftell(f), child_record) fseek(f, child_record, SEEK_SET) # for ic1 in range(nLevel * nchild): @@ -194,7 +187,7 @@ def read_art_root_vars(char *fn, long root_grid_offset, fseek(f, root_grid_offset, SEEK_SET) # Now we seet out the cell we want cdef int my_offset = (((iz * ny) + iy) * nx + ix) - #print cell_record_size, my_offset, ftell(f) + #print(cell_record_size, my_offset, ftell(f)) fseek(f, cell_record_size * my_offset, SEEK_CUR) #(((C)*GridDimension[1]+(B))*GridDimension[0]+A) for j in range(nhydro_vars): @@ -223,11 +216,11 @@ cdef void read_art_vars(FILE *f, for j in range(8): #iterate over the children l = 0 fread(padding, sizeof(int), 3, f); FIX_LONG(padding[0]) - #print "Record Size", padding[0] + #print("Record Size", padding[0]) # This should be replaced by an fread of nhydro_vars length for k in range(nhydro_vars): #iterate over the record fread(&temp, sizeof(float), 1, f); FIX_FLOAT(temp) - #print k, temp + #print(k, temp) if k in fields: var[j,l] = temp l += 1 @@ -279,9 +272,9 @@ def read_art_grid(int varindex, offi = di - start_index[0] offj = dj - start_index[1] offk = dk - start_index[2] - #print offi, filled.shape[0], - #print offj, filled.shape[1], - #print offk, filled.shape[2] + #print(offi, filled.shape[0],) + #print(offj, filled.shape[1],) + #print(offk, filled.shape[2]) if filled[offi, offj, offk] == 1: continue if level > 0: odind = (kr*2 + jr)*2 + ir diff --git a/yt/utilities/lib/fp_utils.pxd b/yt/utilities/lib/fp_utils.pxd index d2ff09e010b..add1ba03461 100644 --- a/yt/utilities/lib/fp_utils.pxd +++ b/yt/utilities/lib/fp_utils.pxd @@ -5,13 +5,6 @@ Shareable definitions for common fp/int Cython utilities """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np cimport cython @@ -58,3 +51,10 @@ cdef inline np.int64_t i64min(np.int64_t i0, np.int64_t i1) nogil: if i0 < i1: return i0 return i1 +cdef inline _ensure_code(arr): + if hasattr(arr, "units"): + if "code_length" == str(arr.units): + return arr + arr.convert_to_units("code_length") + return arr + diff --git a/yt/utilities/lib/geometry_utils.pxd b/yt/utilities/lib/geometry_utils.pxd new file mode 100644 index 00000000000..844faaa5b6a --- /dev/null +++ b/yt/utilities/lib/geometry_utils.pxd @@ -0,0 +1,354 @@ +""" +Particle Deposition onto Octs + + + + +""" + +cimport numpy as np +cimport cython +from libc.float cimport DBL_MANT_DIG +from libc.math cimport frexp, ldexp, sqrt + +DEF ORDER_MAX=20 +DEF INDEX_MAX_64=2097151 +# TODO: Handle error for indices past max +DEF XSHIFT=2 +DEF YSHIFT=1 +DEF ZSHIFT=0 + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.int64_t ifrexp(np.float64_t x, np.int64_t *e): + cdef np.float64_t m + cdef int e0 = 0 + m = frexp(x,&e0) + e[0] = e0 + return ldexp(m,DBL_MANT_DIG) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.int64_t msdb(np.int64_t a, np.int64_t b): + """Get the most significant differing bit between a and b.""" + cdef np.int64_t c, ndx + c = a ^ b + ndx = 0 + while (0 < c): + c = (c >> 1) + ndx+=1 + return ndx + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.int64_t xor_msb(np.float64_t a, np.float64_t b): + """Get the exponent of the highest differing bit between a and b""" + # Get mantissa and exponents for each number + cdef np.int64_t a_m, a_e, b_m, b_e, x, y, z + b_e = 0 + a_e = 0 + a_m = ifrexp(a,&a_e) + b_m = ifrexp(b,&b_e) + x = ((a_e+1)*DBL_MANT_DIG) + y = ((b_e+1)*DBL_MANT_DIG) + # Compare mantissa if exponents equal + if x == y: + if a_m == b_m: return 0 + z = msdb(a_m,b_m) + #if 1: return z + x = x - z + return x-1 # required so that xor_msb(0.0,1.0)!=xor_msb(1.0,1.0) + # Otherwise return largest exponent + if y < x: + return x + else: + return y + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline int compare_floats_morton(np.float64_t p[3], np.float64_t q[3]): + cdef int j, out, dim + cdef np.int64_t x, y + x = -9999999999 + y = 0 + dim = 0 + for j in range(3):#[::-1]: + y = xor_msb(p[j],q[j]) + if x < y: + x = y + dim = j + if p[dim] < q[dim]: + out = 1 + else: + out = 0 + return out + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.float64_t euclidean_distance(np.float64_t[:] p, np.float64_t[:] q): + cdef int j + cdef np.float64_t d + d = 0.0 + for j in range(3): + d+=(p[j]-q[j])**2 + return sqrt(d) + +# Todo: allow radius reported independently in each dimension for rectangular domain +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.float64_t smallest_quadtree_box(np.float64_t p[3], np.float64_t q[3], np.int32_t order, + np.float64_t DLE[3], np.float64_t DRE[3], + np.float64_t *cx, np.float64_t *cy, np.float64_t *cz): + cdef int j + cdef np.float64_t c[3] + cdef np.uint64_t pidx[3] + # cdef np.uint64_t qidx[3] + for j in range(3): + pidx[j] = 0 + # qidx[j] = 0 + cdef np.uint64_t pidx_next[3] + cdef np.uint64_t qidx_next[3] + cdef np.float64_t dds[3] + cdef np.float64_t rad + cdef int lvl = 0 + cdef int done = 0 + while not done: + if (lvl+1 >= order): + done = 1 + for j in range(3): + dds[j] = (DRE[j] - DLE[j])/(1 << ( lvl+1)) + pidx_next[j] = ((p[j] - DLE[j])/dds[j]) + qidx_next[j] = ((q[j] - DLE[j])/dds[j]) + for j in range(3): + if pidx_next[j]!=qidx_next[j]: + done = 1 + break + if not done: + for j in range(3): + pidx[j] = pidx_next[j] + # qidx[j] = qidx_next[j] + lvl+=1 + rad = 0.0 + for j in range(3): + dds[j] = (DRE[j] - DLE[j])/(1 << lvl) + c[j] = dds[j]*(pidx[j]+0.5) + rad+=((dds[j]/2.0)**2) + cx[0] = c[0] + cy[0] = c[1] + cz[0] = c[2] + return sqrt(rad) + +#----------------------------------------------------------------------------- +# 21 bits spread over 64 with 3 bits in between +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint64_t spread_64bits_by3(np.uint64_t x): + x=(x&(0x00000000001FFFFF)) + x=(x|(x<<20))*(0x000001FFC00003FF) + +#----------------------------------------------------------------------------- +# 21 bits spread over 64 with 2 bits in between +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint64_t spread_64bits_by2(np.uint64_t x): + # This magic comes from http://stackoverflow.com/questions/1024754/how-to-compute-a-3d-morton-number-interleave-the-bits-of-3-ints + # Only reversible up to 2097151 + # Select highest 21 bits (Required to be reversible to 21st bit) + # x = ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---k jihg fedc ba98 7654 3210 + x=(x&(0x00000000001FFFFF)) + # x = ---- ---- ---- ---- ---- ---k jihg fedc ba-- ---- ---- ---- ---- --98 7654 3210 + x=(x|(x<<20))&(0x000001FFC00003FF) + # x = ---- ---- ---- -kji hgf- ---- ---- -edc ba-- ---- ---- 9876 5--- ---- ---4 3210 + x=(x|(x<<10))&(0x0007E007C00F801F) + # x = ---- ---- -kji h--- -gf- ---- -edc ---- ba-- ---- 987- ---6 5--- ---4 32-- --10 + x=(x|(x<<4))&(0x00786070C0E181C3) + # x = ---- ---k ji-- h--g --f- ---e d--c --b- -a-- --98 --7- -6-- 5--- -43- -2-- 1--0 + x=(x|(x<<2))&(0x0199219243248649) + # x = ---- -kj- -i-- h--g --f- -e-- d--c --b- -a-- 9--8 --7- -6-- 5--4 --3- -2-- 1--0 + x=(x|(x<<2))&(0x0649249249249249) + # x = ---k --j- -i-- h--g --f- -e-- d--c --b- -a-- 9--8 --7- -6-- 5--4 --3- -2-- 1--0 + x=(x|(x<<2))&(0x1249249249249249) + return x + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint64_t compact_64bits_by2(np.uint64_t x): + # Reversed magic + x=x&(0x1249249249249249) + x=(x|(x>>2))&(0x0649249249249249) + x=(x|(x>>2))&(0x0199219243248649) + x=(x|(x>>2))&(0x00786070C0E181C3) + x=(x|(x>>4))&(0x0007E007C00F801F) + x=(x|(x>>10))&(0x000001FFC00003FF) + x=(x|(x>>20))&(0x00000000001FFFFF) + return x + +#----------------------------------------------------------------------------- +# 10 bits spread over 32 with 2 bits in between +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint32_t spread_32bits_by2(np.uint32_t x): + # Only reversible up to 1023 + # Select highest 10 bits (Required to be reversible to 10st bit) + # x = ---- ---- ---- ---- ---- --98 7654 3210 + x=(x&(0x000003FF)) + # x = ---- --98 ---- ---- ---- ---- 7654 3210 + x=(x|(x<<16))&(0xFF0000FF) + # x = ---- --98 ---- ---- 7654 ---- ---- 3210 + x=(x|(x<<8))&(0x0300F00F) + # x = ---- --98 ---- 76-- --54 ---- 32-- --10 + x=(x|(x<<4))&(0x030C30C3) + # x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0 + x=(x|(x<<2))&(0x09249249) + return x + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint32_t compact_32bits_by2(np.uint32_t x): + # Reversed magic + x=x&(0x09249249) + x=(x|(x>>2))&(0x030C30C3) + x=(x|(x>>4))&(0x0300F00F) + x=(x|(x>>8))&(0xFF0000FF) + x=(x|(x>>16))&(0x000003FF) + return x + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline np.uint64_t masked_merge_64bit(np.uint64_t a, np.uint64_t b, np.uint64_t mask): + # https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge + return a ^ ((a ^ b) & mask) + +@cython.cdivision(True) +cdef inline np.uint64_t encode_morton_64bit(np.uint64_t x_ind, np.uint64_t y_ind, np.uint64_t z_ind): + cdef np.uint64_t mi + mi = 0 + mi |= spread_64bits_by2(z_ind)<>XSHIFT) + p[1] = compact_64bits_by2(mi>>YSHIFT) + p[2] = compact_64bits_by2(mi>>ZSHIFT) + +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *DRE, np.int32_t order): + cdef int i + cdef np.float64_t dds[3] + cdef np.uint64_t x_ind, y_ind, z_ind + cdef np.uint64_t mi + for i in range(3): + dds[i] = (DRE[i] - DLE[i]) / (1 << order) + x_ind = ((x - DLE[0])/dds[0]) + y_ind = ((y - DLE[1])/dds[1]) + z_ind = ((z - DLE[2])/dds[2]) + mi = encode_morton_64bit(x_ind,y_ind,z_ind) + return mi + +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton_relative(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *DRE, + np.int32_t order1, np.int32_t order2): + cdef int i + cdef np.float64_t dds1[3] + cdef np.float64_t dds2[3] + cdef np.float64_t DLE2[3] + cdef np.uint64_t x_ind, y_ind, z_ind + cdef np.uint64_t mi2 + for i in range(3): + dds1[i] = (DRE[i] - DLE[i]) / (1 << order1) + dds2[i] = dds1[i] / (1 << order2) + DLE2[0] = ( ((x - DLE[0])/dds1[0])) * dds1[0] + DLE2[1] = ( ((y - DLE[1])/dds1[1])) * dds1[1] + DLE2[2] = ( ((z - DLE[2])/dds1[2])) * dds1[2] + x_ind = ((x - DLE2[0])/dds2[0]) + y_ind = ((y - DLE2[1])/dds2[1]) + z_ind = ((z - DLE2[2])/dds2[2]) + mi2 = encode_morton_64bit(x_ind,y_ind,z_ind) + return mi2 + + +# This dosn't seem to be much, if at all, faster... +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton_dds(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *dds): + cdef np.uint64_t x_ind, y_ind, z_ind + cdef np.uint64_t mi + x_ind = ((x - DLE[0])/dds[0]) + y_ind = ((y - DLE[1])/dds[1]) + z_ind = ((z - DLE[2])/dds[2]) + mi = encode_morton_64bit(x_ind,y_ind,z_ind) + return mi + +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton_relative_dds(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *dds1, np.float64_t *dds2): + cdef np.float64_t DLE2[3] + cdef np.uint64_t x_ind, y_ind, z_ind + cdef np.uint64_t mi2 + DLE2[0] = ( ((x - DLE[0])/dds1[0])) * dds1[0] + DLE2[1] = ( ((y - DLE[1])/dds1[1])) * dds1[1] + DLE2[2] = ( ((z - DLE[2])/dds1[2])) * dds1[2] + x_ind = ((x - DLE2[0])/dds2[0]) + y_ind = ((y - DLE2[1])/dds2[1]) + z_ind = ((z - DLE2[2])/dds2[2]) + mi2 = encode_morton_64bit(x_ind,y_ind,z_ind) + return mi2 + + +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton_split_dds(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *dds, np.uint64_t *p): + cdef np.uint64_t mi + p[0] = ((x - DLE[0])/dds[0]) + p[1] = ((y - DLE[1])/dds[1]) + p[2] = ((z - DLE[2])/dds[2]) + mi = encode_morton_64bit(p[0], p[1], p[2]) + return mi + +@cython.cdivision(True) +cdef inline np.uint64_t bounded_morton_split_relative_dds(np.float64_t x, np.float64_t y, np.float64_t z, + np.float64_t *DLE, np.float64_t *dds1, np.float64_t *dds2, + np.uint64_t *p2): + cdef np.float64_t DLE2[3] + cdef np.uint64_t mi2 + DLE2[0] = DLE[0] + ( ((x - DLE[0])/dds1[0])) * dds1[0] + DLE2[1] = DLE[1] + ( ((y - DLE[1])/dds1[1])) * dds1[1] + DLE2[2] = DLE[2] + ( ((z - DLE[2])/dds1[2])) * dds1[2] + p2[0] = ((x - DLE2[0])/dds2[0]) + p2[1] = ((y - DLE2[1])/dds2[1]) + p2[2] = ((z - DLE2[2])/dds2[2]) + mi2 = encode_morton_64bit(p2[0], p2[1], p2[2]) + return mi2 + + +cdef np.uint32_t morton_neighbors_coarse(np.uint64_t mi1, np.uint64_t max_index1, + bint periodicity[3], np.uint32_t nn, + np.uint32_t[:,:] index, + np.uint64_t[:,:] ind1_n, + np.uint64_t[:] neighbors) + +cdef np.uint32_t morton_neighbors_refined(np.uint64_t mi1, np.uint64_t mi2, + np.uint64_t max_index1, np.uint64_t max_index2, + bint periodicity[3], np.uint32_t nn, + np.uint32_t[:,:] index, + np.uint64_t[:,:] ind1_n, + np.uint64_t[:,:] ind2_n, + np.uint64_t[:] neighbors1, + np.uint64_t[:] neighbors2) diff --git a/yt/utilities/lib/geometry_utils.pyx b/yt/utilities/lib/geometry_utils.pyx index 92bfdad25b7..5e94a7975ea 100644 --- a/yt/utilities/lib/geometry_utils.pyx +++ b/yt/utilities/lib/geometry_utils.pyx @@ -5,13 +5,6 @@ Simple integrators for the radiative transfer equation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -23,6 +16,13 @@ from libc.math cimport copysign, fabs from yt.utilities.exceptions import YTDomainOverflow from yt.utilities.lib.vec3_ops cimport subtract, cross, dot, L2_norm + +DEF ORDER_MAX=20 +DEF INDEX_MAX_64=2097151 +DEF XSHIFT=2 +DEF YSHIFT=1 +DEF ZSHIFT=0 + cdef extern from "math.h": double exp(double x) nogil float expf(float x) nogil @@ -125,6 +125,73 @@ cdef np.int64_t setbit(np.int64_t x, np.int64_t w, np.int64_t i, np.int64_t b): elif b == 0: return x & ~2**(w-i-1) +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def spread_bits(np.uint64_t x): + return spread_64bits_by2(x) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def compact_bits(np.uint64_t x): + return compact_64bits_by2(x) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def lsz(np.uint64_t v, int stride = 1, int start = 0): + cdef int c + c = start + while ((np.uint64(1) << np.uint64(c)) & np.uint64(v)): + c += stride + return c + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def lsb(np.uint64_t v, int stride = 1, int start = 0): + cdef int c + c = start + while (np.uint64(v) << np.uint64(c)) and not ((np.uint64(1) << np.uint64(c)) & np.uint64(v)): + c += stride + return c + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def bitwise_addition(np.uint64_t x, np.int64_t y0, + int stride = 1, int start = 0): + if (y0 == 0): return x + cdef int end, p, pstart + cdef list mstr + cdef np.uint64_t m, y, out + y = np.uint64(np.abs(y0)) + if (y0 > 0): + func_ls = lsz + else: + func_ls = lsb + # Continue until all bits added + p = 0 + out = x + while (y >> p): + if (y & (1 << p)): + # Get end point + pstart = start + p*stride + end = func_ls(out,stride=stride,start=pstart) + # Create mask + mstr = (end + 1) * ['0'] + for i in range(pstart,end+1,stride): + mstr[i] = '1' + m = int(''.join(mstr[::-1]), 2) + # Invert portion in mask + # print(mstr[::-1]) + # print(y,p,(pstart,end+1),bin(m),bin(out),bin(~out)) + out = masked_merge_64bit(out, ~out, m) + # Move to next bit + p += 1 + return out + @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) @@ -216,40 +283,50 @@ def get_hilbert_points(int order, np.ndarray[np.int64_t, ndim=1] indices): positions[i, j] = p[j] return positions -# yt did not invent these! :) -cdef np.uint64_t _const20 = 0x000001FFC00003FF -cdef np.uint64_t _const10 = 0x0007E007C00F801F -cdef np.uint64_t _const04 = 0x00786070C0E181C3 -cdef np.uint64_t _const2a = 0x0199219243248649 -cdef np.uint64_t _const2b = 0x0649249249249249 -cdef np.uint64_t _const2c = 0x1249249249249249 +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef np.uint64_t point_to_morton(np.uint64_t p[3]): + # Weird indent thing going on... also, should this reference the pxd func? + return encode_morton_64bit(p[0],p[1],p[2]) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void morton_to_point(np.uint64_t mi, np.uint64_t *p): + decode_morton_64bit(mi,p) @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) -cdef inline np.uint64_t spread_bits(np.uint64_t x): - # This magic comes from http://stackoverflow.com/questions/1024754/how-to-compute-a-3d-morton-number-interleave-the-bits-of-3-ints - x=(x|(x<<20))&_const20 - x=(x|(x<<10))&_const10 - x=(x|(x<<4))&_const04 - x=(x|(x<<2))&_const2a - x=(x|(x<<2))&_const2b - x=(x|(x<<2))&_const2c - return x +def get_morton_index(np.ndarray[np.uint64_t, ndim=1] left_index): + cdef int j + cdef np.uint64_t morton_index + cdef np.uint64_t p[3] + for j in range(3): + if left_index[j] >= INDEX_MAX_64: + raise ValueError("Point exceeds max ({}) ".format(INDEX_MAX_64)+ + "for 64bit interleave.") + p[j] = left_index[j] + morton_index = point_to_morton(p) + return morton_index @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) def get_morton_indices(np.ndarray[np.uint64_t, ndim=2] left_index): - cdef np.int64_t i, mi + cdef np.int64_t i + cdef int j cdef np.ndarray[np.uint64_t, ndim=1] morton_indices + cdef np.uint64_t p[3] morton_indices = np.zeros(left_index.shape[0], 'uint64') for i in range(left_index.shape[0]): - mi = 0 - mi |= spread_bits(left_index[i,2])<<0 - mi |= spread_bits(left_index[i,1])<<1 - mi |= spread_bits(left_index[i,0])<<2 - morton_indices[i] = mi + for j in range(3): + if left_index[i, j] >= INDEX_MAX_64: + raise ValueError("Point exceeds max ({}) ".format(INDEX_MAX_64)+ + "for 64bit interleave.") + p[j] = left_index[i, j] + morton_indices[i] = point_to_morton(p) return morton_indices @cython.cdivision(True) @@ -257,18 +334,562 @@ def get_morton_indices(np.ndarray[np.uint64_t, ndim=2] left_index): @cython.wraparound(False) def get_morton_indices_unravel(np.ndarray[np.uint64_t, ndim=1] left_x, np.ndarray[np.uint64_t, ndim=1] left_y, - np.ndarray[np.uint64_t, ndim=1] left_z,): - cdef np.int64_t i, mi + np.ndarray[np.uint64_t, ndim=1] left_z): + cdef np.int64_t i cdef np.ndarray[np.uint64_t, ndim=1] morton_indices + cdef np.uint64_t p[3] morton_indices = np.zeros(left_x.shape[0], 'uint64') for i in range(left_x.shape[0]): - mi = 0 - mi |= spread_bits(left_z[i])<<0 - mi |= spread_bits(left_y[i])<<1 - mi |= spread_bits(left_x[i])<<2 - morton_indices[i] = mi + p[0] = left_x[i] + p[1] = left_y[i] + p[2] = left_z[i] + for j in range(3): + if p[j] >= INDEX_MAX_64: + raise ValueError("Point exceeds max ({}) ".format(INDEX_MAX_64)+ + "for 64 bit interleave.") + morton_indices[i] = point_to_morton(p) return morton_indices +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_point(np.uint64_t index): + cdef int j + cdef np.uint64_t p[3] + cdef np.ndarray[np.uint64_t, ndim=1] position + position = np.zeros(3, 'uint64') + morton_to_point(index, p) + for j in range(3): + position[j] = p[j] + return position + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_points(np.ndarray[np.uint64_t, ndim=1] indices): + # This is inspired by the scurve package by user cortesi on GH. + cdef int i, j + cdef np.uint64_t p[3] + cdef np.ndarray[np.uint64_t, ndim=2] positions + positions = np.zeros((indices.shape[0], 3), 'uint64') + for i in range(indices.shape[0]): + morton_to_point(indices[i], p) + for j in range(3): + positions[i, j] = p[j] + return positions + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_neighbors_coarse(mi1, max_index1, periodic, nn): + cdef int i + cdef np.uint32_t ntot + cdef np.ndarray[np.uint32_t, ndim=2] index = np.zeros((2*nn+1,3), dtype='uint32') + cdef np.ndarray[np.uint64_t, ndim=2] ind1_n = np.zeros((2*nn+1,3), dtype='uint64') + cdef np.ndarray[np.uint64_t, ndim=1] neighbors = np.zeros((2*nn+1)**3, dtype='uint64') + cdef bint periodicity[3] + if periodic: + for i in range(3): periodicity[i] = 1 + else: + for i in range(3): periodicity[i] = 0 + ntot = morton_neighbors_coarse(mi1, max_index1, periodicity, nn, + index, ind1_n, neighbors) + return np.resize(neighbors, (ntot,)) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef np.uint32_t morton_neighbors_coarse(np.uint64_t mi1, np.uint64_t max_index1, + bint periodicity[3], np.uint32_t nn, + np.uint32_t[:,:] index, + np.uint64_t[:,:] ind1_n, + np.uint64_t[:] neighbors): + cdef np.uint32_t ntot = 0 + cdef np.uint64_t ind1[3] + cdef np.uint32_t count[3] + cdef np.uint32_t origin[3] + cdef np.int64_t adv + cdef int i, j, k, ii, ij, ik + for i in range(3): + count[i] = 0 + origin[i] = 0 + # Get indices + decode_morton_64bit(mi1,ind1) + # Determine which directions are valid + for j,i in enumerate(range(-nn,(nn+1))): + if i == 0: + for k in range(3): + ind1_n[j,k] = ind1[k] + index[count[k],k] = j + origin[k] = count[k] + count[k] += 1 + else: + for k in range(3): + adv = ((ind1[k]) + i) + if (adv < 0): + if periodicity[k]: + while adv < 0: + adv += max_index1 + ind1_n[j,k] = (adv % max_index1) + else: + continue + elif (adv >= max_index1): + if periodicity[k]: + ind1_n[j,k] = (adv % max_index1) + else: + continue + else: + ind1_n[j,k] = (adv) + # print(i,k,adv,max_index1,ind1_n[j,k],adv % max_index1) + index[count[k],k] = j + count[k] += 1 + # Iterate over ever combinations + for ii in range(count[0]): + i = index[ii,0] + for ij in range(count[1]): + j = index[ij,1] + for ik in range(count[2]): + k = index[ik,2] + if (ii != origin[0]) or (ij != origin[1]) or (ik != origin[2]): + neighbors[ntot] = encode_morton_64bit(ind1_n[i,0], + ind1_n[j,1], + ind1_n[k,2]) + ntot += 1 + return ntot + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_neighbors_refined(mi1, mi2, max_index1, max_index2, periodic, nn): + cdef int i + cdef np.uint32_t ntot + cdef np.ndarray[np.uint32_t, ndim=2] index = np.zeros((2*nn+1,3), dtype='uint32') + cdef np.ndarray[np.uint64_t, ndim=2] ind1_n = np.zeros((2*nn+1,3), dtype='uint64') + cdef np.ndarray[np.uint64_t, ndim=2] ind2_n = np.zeros((2*nn+1,3), dtype='uint64') + cdef np.ndarray[np.uint64_t, ndim=1] neighbors1 = np.zeros((2*nn+1)**3, dtype='uint64') + cdef np.ndarray[np.uint64_t, ndim=1] neighbors2 = np.zeros((2*nn+1)**3, dtype='uint64') + cdef bint periodicity[3] + if periodic: + for i in range(3): periodicity[i] = 1 + else: + for i in range(3): periodicity[i] = 0 + ntot = morton_neighbors_refined(mi1, mi2, max_index1, max_index2, + periodicity, nn, + index, ind1_n, ind2_n, + neighbors1, neighbors2) + return np.resize(neighbors1, (ntot,)), np.resize(neighbors2, (ntot,)) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef np.uint32_t morton_neighbors_refined(np.uint64_t mi1, np.uint64_t mi2, + np.uint64_t max_index1, np.uint64_t max_index2, + bint periodicity[3], np.uint32_t nn, + np.uint32_t[:,:] index, + np.uint64_t[:,:] ind1_n, + np.uint64_t[:,:] ind2_n, + np.uint64_t[:] neighbors1, + np.uint64_t[:] neighbors2): + cdef np.uint32_t ntot = 0 + cdef np.uint64_t ind1[3] + cdef np.uint64_t ind2[3] + cdef np.uint32_t count[3] + cdef np.uint32_t origin[3] + cdef np.int64_t adv, maj, rem, adv1 + cdef int i, j, k, ii, ij, ik + for i in range(3): + count[i] = 0 + origin[i] = 0 + # Get indices + decode_morton_64bit(mi1,ind1) + decode_morton_64bit(mi2,ind2) + # Determine which directions are valid + for j,i in enumerate(range(-nn,(nn+1))): + if i == 0: + for k in range(3): + ind1_n[j,k] = ind1[k] + ind2_n[j,k] = ind2[k] + index[count[k],k] = j + origin[k] = count[k] + count[k] += 1 + else: + for k in range(3): + adv = (ind2[k] + i) + maj = adv / (max_index2) + rem = adv % (max_index2) + if adv < 0: + adv1 = (ind1[k] + (maj-1)) + if adv1 < 0: + if periodicity[k]: + while adv1 < 0: + adv1 += max_index1 + ind1_n[j,k] = adv1 + else: + continue + else: + ind1_n[j,k] = adv1 + while adv < 0: + adv += max_index2 + ind2_n[j,k] = adv + elif adv >= max_index2: + adv1 = (ind1[k] + maj) + if adv1 >= max_index1: + if periodicity[k]: + ind1_n[j,k] = (adv1 % max_index1) + else: + continue + else: + ind1_n[j,k] = adv1 + ind2_n[j,k] = rem + else: + ind1_n[j,k] = ind1[k] + ind2_n[j,k] = (adv) + index[count[k],k] = j + count[k] += 1 + # Iterate over ever combinations + for ii in range(count[0]): + i = index[ii,0] + for ij in range(count[1]): + j = index[ij,1] + for ik in range(count[2]): + k = index[ik,2] + if (ii != origin[0]) or (ij != origin[1]) or (ik != origin[2]): + neighbors1[ntot] = encode_morton_64bit(ind1_n[i,0], + ind1_n[j,1], + ind1_n[k,2]) + neighbors2[ntot] = encode_morton_64bit(ind2_n[i,0], + ind2_n[j,1], + ind2_n[k,2]) + ntot += 1 + return ntot + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_neighbor_periodic(np.ndarray[np.uint64_t,ndim=1] p, + list dim_list, list num_list, + np.uint64_t max_index): + cdef np.uint64_t p1[3] + cdef int j, dim, num + for j in range(3): + p1[j] = np.uint64(p[j]) + for dim,num in zip(dim_list,num_list): + p1[dim] = np.uint64((np.int64(p[dim]) + num) % max_index) + return np.int64(point_to_morton(p1)) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_neighbor_bounded(np.ndarray[np.uint64_t,ndim=1] p, + list dim_list, list num_list, + np.uint64_t max_index): + cdef np.int64_t x + cdef np.uint64_t p1[3] + cdef int j, dim, num + for j in range(3): + p1[j] = np.uint64(p[j]) + for dim,num in zip(dim_list,num_list): + x = np.int64(p[dim]) + num + if (x >= 0) and (x < max_index): + p1[dim] = np.uint64(x) + else: + return np.int64(-1) + return np.int64(point_to_morton(p1)) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_neighbor(np.ndarray[np.uint64_t,ndim=1] p, + list dim_list, list num_list, + np.uint64_t max_index, periodic = False): + if periodic: + return morton_neighbor_periodic(p, dim_list, num_list, max_index) + else: + return morton_neighbor_bounded(p, dim_list, num_list, max_index) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_neighbors(np.ndarray[np.uint64_t,ndim=1] mi, + int order = ORDER_MAX, periodic = False): + """Returns array of neighboring morton indices""" + # Declare + cdef int i, j, k, l, n + cdef np.uint64_t max_index + cdef np.ndarray[np.uint64_t, ndim=2] p + cdef np.int64_t nmi + cdef np.ndarray[np.uint64_t, ndim=1] mi_neighbors + p = get_morton_points(mi) + mi_neighbors = np.zeros(26*mi.shape[0], 'uint64') + n = 0 + max_index = np.int64(1 << order) + # Define function + if periodic: + fneighbor = morton_neighbor_periodic + else: + fneighbor = morton_neighbor_bounded + for i in range(mi.shape[0]): + for j in range(3): + # +1 in dimension j + nmi = fneighbor(p[i,:],[j],[+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension k + for k in range(j+1,3): + # +1 in dimension k + nmi = fneighbor(p[i,:],[j,k],[+1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension l + for l in range(k+1,3): + nmi = fneighbor(p[i,:],[j,k,l],[+1,+1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + nmi = fneighbor(p[i,:],[j,k,l],[+1,+1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # -1 in dimension k + nmi = fneighbor(p[i,:],[j,k],[+1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension l + for l in range(k+1,3): + nmi = fneighbor(p[i,:],[j,k,l],[+1,-1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + nmi = fneighbor(p[i,:],[j,k,l],[+1,-1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # -1 in dimension j + nmi = fneighbor(p[i,:],[j],[-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension k + for k in range(j+1,3): + # +1 in dimension k + nmi = fneighbor(p[i,:],[j,k],[-1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension l + for l in range(k+1,3): + nmi = fneighbor(p[i,:],[j,k,l],[-1,+1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + nmi = fneighbor(p[i,:],[j,k,l],[-1,+1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # -1 in dimension k + nmi = fneighbor(p[i,:],[j,k],[-1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + # +/- in dimension l + for l in range(k+1,3): + nmi = fneighbor(p[i,:],[j,k,l],[-1,-1,+1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + nmi = fneighbor(p[i,:],[j,k,l],[-1,-1,-1],max_index) + if nmi > 0: + mi_neighbors[n] = np.uint64(nmi) + n+=1 + mi_neighbors = np.resize(mi_neighbors,(n,)) + return np.unique(np.hstack([mi,mi_neighbors])) + +def ifrexp_cy(np.float64_t x): + cdef np.int64_t e, m + m = ifrexp(x, &e) + return m,e + +def msdb_cy(np.int64_t a, np.int64_t b): + return msdb(a,b) + +def msdb_cy(np.int64_t a, np.int64_t b): + return msdb(a,b) + +def xor_msb_cy(np.float64_t a, np.float64_t b): + return xor_msb(a,b) + +def morton_qsort_swap(np.ndarray[np.uint64_t, ndim=1] ind, + np.uint64_t a, np.uint64_t b): + # http://www.geeksforgeeks.org/iterative-quick-sort/ + cdef np.int64_t t = ind[a] + ind[a] = ind[b] + ind[b] = t + +def morton_qsort_partition(np.ndarray[floating, ndim=2] pos, + np.int64_t l, np.int64_t h, + np.ndarray[np.uint64_t, ndim=1] ind, + use_loop = False): + # Initialize + cdef int k + cdef np.int64_t i, j + cdef np.float64_t ppos[3] + cdef np.float64_t ipos[3] + cdef np.uint64_t done, pivot + if use_loop: + # http://www.geeksforgeeks.org/iterative-quick-sort/ + # A bit slower + # Set starting point & pivot + i = (l - 1) + for k in range(3): + ppos[k] = pos[ind[h],k] + # Loop over array moving ind for points smaller than pivot to front + for j in range(l, h): + for k in range(3): + ipos[k] = pos[ind[j],k] + if compare_floats_morton(ipos,ppos): + i+=1 + morton_qsort_swap(ind,i,j) + # Swap the pivot to the midpoint in the partition + i+=1 + morton_qsort_swap(ind,i,h) + return i + else: + # Set starting point & pivot + i = l-1 + j = h + done = 0 + pivot = ind[h] + for k in range(3): + ppos[k] = pos[pivot,k] + # Loop until entire array processed + while not done: + # Process bottom + while not done: + i+=1 + if i == j: + done = 1 + break + for k in range(3): + ipos[k] = pos[ind[i],k] + if compare_floats_morton(ppos,ipos): + ind[j] = ind[i] + break + # Process top + while not done: + j-=1 + if j == i: + done = 1 + break + for k in range(3): + ipos[k] = pos[ind[j],k] + if compare_floats_morton(ipos,ppos): + ind[i] = ind[j] + break + ind[j] = pivot + return j + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_qsort_recursive(np.ndarray[floating, ndim=2] pos, + np.int64_t l, np.int64_t h, + np.ndarray[np.uint64_t, ndim=1] ind, + use_loop = False): + # http://www.geeksforgeeks.org/iterative-quick-sort/ + cdef np.int64_t p + if (l < h): + p = morton_qsort_partition(pos, l, h, ind, use_loop=use_loop) + morton_qsort_recursive(pos, l, p-1, ind, use_loop=use_loop) + morton_qsort_recursive(pos, p+1, h, ind, use_loop=use_loop) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_qsort_iterative(np.ndarray[floating, ndim=2] pos, + np.int64_t l, np.int64_t h, + np.ndarray[np.uint64_t, ndim=1] ind, + use_loop = False): + # http://www.geeksforgeeks.org/iterative-quick-sort/ + # Auxillary stack + cdef np.ndarray[np.int64_t, ndim=1] stack = np.zeros(h-l+1, dtype=np.int64) + cdef np.int64_t top = -1 + cdef np.int64_t p + top+=1 + stack[top] = l + top+=1 + stack[top] = h + # Pop from stack until it's empty + while (top >= 0): + # Get next set + h = stack[top] + top-=1 + l = stack[top] + top-=1 + # Partition + p = morton_qsort_partition(pos, l, h, ind, use_loop=use_loop) + # Add left partition to the stack + if (p-1) > l: + top+=1 + stack[top] = l + top+=1 + stack[top] = p - 1 + # Add right partition to the stack + if (p+1) < h: + top+=1 + stack[top] = p + 1 + top+=1 + stack[top] = h + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def morton_qsort(np.ndarray[floating, ndim=2] pos, + np.int64_t l, np.int64_t h, + np.ndarray[np.uint64_t, ndim=1] ind, + recursive = False, + use_loop = False): + #get_morton_argsort1(pos,l,h,ind) + if recursive: + morton_qsort_recursive(pos,l,h,ind,use_loop=use_loop) + else: + morton_qsort_iterative(pos,l,h,ind,use_loop=use_loop) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def get_morton_argsort1(np.ndarray[floating, ndim=2] pos, + np.int64_t start, np.int64_t end, + np.ndarray[np.uint64_t, ndim=1] ind): + # Return if only one position selected + if start >= end: return + # Initialize + cdef np.int64_t top + top = morton_qsort_partition(pos,start,end,ind) + # Do remaining parts on either side of pivot, sort side first + if (top-1-start < end-(top+1)): + get_morton_argsort1(pos,start,top-1,ind) + get_morton_argsort1(pos,top+1,end,ind) + else: + get_morton_argsort1(pos,top+1,end,ind) + get_morton_argsort1(pos,start,top-1,ind) + return + +def compare_morton(np.ndarray[floating, ndim=1] p0, np.ndarray[floating, ndim=1] q0): + cdef np.float64_t p[3] + cdef np.float64_t q[3] + # cdef np.int64_t iep,ieq,imp,imq + cdef int j + for j in range(3): + p[j] = p0[j] + q[j] = q0[j] + # imp = ifrexp(p[j],&iep) + # imq = ifrexp(q[j],&ieq) + # print(j,p[j],q[j],xor_msb(p[j],q[j]),'m=',imp,imq,'e=',iep,ieq) + return compare_floats_morton(p,q) + @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) @@ -304,17 +925,12 @@ cdef np.int64_t position_to_morton(np.ndarray[floating, ndim=1] pos_x, if use == 0: ind[i] = FLAG continue - mi = 0 - mi |= spread_bits(ii[2])<<0 - mi |= spread_bits(ii[1])<<1 - mi |= spread_bits(ii[0])<<2 - ind[i] = mi + ind[i] = encode_morton_64bit(ii[0],ii[1],ii[2]) return pos_x.shape[0] -DEF ORDER_MAX=20 - def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z, - domain_left_edge, domain_right_edge, filter_bbox = False): + domain_left_edge, domain_right_edge, filter_bbox = False, + order = ORDER_MAX): cdef int i cdef int filter if filter_bbox: @@ -327,7 +943,7 @@ def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z, for i in range(3): DLE[i] = domain_left_edge[i] DRE[i] = domain_right_edge[i] - dds[i] = (DRE[i] - DLE[i]) / (1 << ORDER_MAX) + dds[i] = (DRE[i] - DLE[i]) / (1 << order) cdef np.ndarray[np.uint64_t, ndim=1] ind ind = np.zeros(pos_x.shape[0], dtype="uint64") cdef np.int64_t rv @@ -340,7 +956,7 @@ def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z, pos_x, pos_y, pos_z, dds, DLE, DRE, ind, filter) else: - print "Could not identify dtype.", pos_x.dtype + print("Could not identify dtype.", pos_x.dtype) raise NotImplementedError if rv < pos_x.shape[0]: mis = (pos_x.min(), pos_y.min(), pos_z.min()) @@ -349,6 +965,276 @@ def compute_morton(np.ndarray pos_x, np.ndarray pos_y, np.ndarray pos_z, domain_left_edge, domain_right_edge) return ind +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def dist(np.ndarray[np.float64_t, ndim=1] p0, np.ndarray[np.float64_t, ndim=1] q0): + cdef int j + cdef np.float64_t p[3] + cdef np.float64_t q[3] + for j in range(3): + p[j] = p0[j] + q[j] = q0[j] + return euclidean_distance(p,q) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def dist_to_box(np.ndarray[np.float64_t, ndim=1] p, + np.ndarray[np.float64_t, ndim=1] cbox, + np.float64_t rbox): + cdef int j + cdef np.float64_t d = 0.0 + for j in range(3): + d+= max((cbox[j]-rbox)-p[j],0.0,p[j]-(cbox[j]+rbox))**2 + return np.sqrt(d) + + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def solution_radius(np.ndarray[np.float64_t, ndim=2] P, int k, np.uint64_t i, + np.ndarray[np.uint64_t, ndim=1] idx, int order, + np.ndarray[np.float64_t, ndim=1] DLE, + np.ndarray[np.float64_t, ndim=1] DRE): + c = np.zeros(3, dtype=np.float64) + return quadtree_box(P[i,:],P[idx[k-1],:],order,DLE,DRE,c) + +@cython.cdivision(True) +@cython.boundscheck(False) +@cython.wraparound(False) +def knn_direct(np.ndarray[np.float64_t, ndim=2] P, np.uint64_t k, np.uint64_t i, + np.ndarray[np.uint64_t, ndim=1] idx, return_dist = False, + return_rad = False): + """Directly compute the k nearest neighbors by sorting on distance. + + Args: + P (np.ndarray): (N,d) array of points to search sorted by Morton order. + k (int): number of nearest neighbors to find. + i (int): index of point that nearest neighbors should be found for. + idx (np.ndarray): indicies of points from P to be considered. + return_dist (Optional[bool]): If True, distances to the k nearest + neighbors are also returned (in order of proximity). + (default = False) + return_rad (Optional[bool]): If True, distance to farthest nearest + neighbor is also returned. This is set to False if return_dist is + True. (default = False) + + Returns: + np.ndarray: Indicies of k nearest neighbors to point i. + + """ + cdef int j,m + cdef np.int64_t[:] sort_fwd + cdef np.float64_t[:] ipos + cdef np.float64_t[:] jpos + cdef np.float64_t[:] dist = np.zeros(len(idx), dtype='float64') + ipos = np.zeros(3) + jpos = np.zeros(3) + for m in range(3): + ipos[m] = P[i,m] + for j in range(len(idx)): + for m in range(3): + jpos[m] = P[idx[j],m] + dist[j] = euclidean_distance(ipos, jpos) + sort_fwd = np.argsort(dist, kind='mergesort')[:k] + if return_dist: + return np.array(idx)[sort_fwd], np.array(dist)[sort_fwd] + elif return_rad: + return np.array(idx)[sort_fwd], np.array(dist)[sort_fwd][k-1] + else: + return np.array(idx)[sort_fwd] + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def quadtree_box(np.ndarray[np.float64_t, ndim=1] p, + np.ndarray[np.float64_t, ndim=1] q, int order, + np.ndarray[np.float64_t, ndim=1] DLE, + np.ndarray[np.float64_t, ndim=1] DRE, + np.ndarray[np.float64_t, ndim=1] c): + # Declare & transfer values to ctypes + cdef int j + cdef np.float64_t ppos[3] + cdef np.float64_t qpos[3] + cdef np.float64_t rbox + cdef np.float64_t cbox[3] + cdef np.float64_t DLE1[3] + cdef np.float64_t DRE1[3] + for j in range(3): + ppos[j] = p[j] + qpos[j] = q[j] + DLE1[j] = DLE[j] + DRE1[j] = DRE[j] + # Get smallest box containing p & q + rbox = smallest_quadtree_box(ppos,qpos,order,DLE1,DRE1, + &cbox[0],&cbox[1],&cbox[2]) + # Transfer values to python array + for j in range(3): + c[j] = cbox[j] + return rbox + + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def csearch_morton(np.ndarray[np.float64_t, ndim=2] P, int k, np.uint64_t i, + np.ndarray[np.uint64_t, ndim=1] Ai, + np.uint64_t l, np.uint64_t h, int order, + np.ndarray[np.float64_t, ndim=1] DLE, + np.ndarray[np.float64_t, ndim=1] DRE, int nu = 4): + """Expand search concentrically to determine set of k nearest neighbors for + point i. + + Args: + P (np.ndarray): (N,d) array of points to search sorted by Morton order. + k (int): number of nearest neighbors to find. + i (int): index of point that nearest neighbors should be found for. + Ai (np.ndarray): (N,k) array of partial nearest neighbor indices. + l (int): index of lowest point to consider in addition to Ai. + h (int): index of highest point to consider in addition to Ai. + order (int): Maximum depth that Morton order quadtree should reach. + DLE (np.float64[3]): 3 floats defining domain lower bounds in each dim. + DRE (np.float64[3]): 3 floats defining domain upper bounds in each dim. + nu (int): minimum number of points before a direct knn search is + performed. (default = 4) + + Returns: + np.ndarray: (N,k) array of nearest neighbor indices. + + Raises: + ValueError: If l i): + raise ValueError("Both l and h must be on the same side of i.") + m = np.uint64((h + l)/2) + # New range is small enough to consider directly + if (h-l) < nu: + if m > i: + return knn_direct(P,k,i,np.hstack((Ai,np.arange(l,h+1,dtype=np.uint64)))) + else: + return knn_direct(P,k,i,np.hstack((np.arange(l,h+1,dtype=np.uint64),Ai))) + # Add middle point + if m > i: + Ai,rad_Ai = knn_direct(P,k,i,np.hstack((Ai,m)).astype(np.uint64),return_rad=True) + else: + Ai,rad_Ai = knn_direct(P,k,i,np.hstack((m,Ai)).astype(np.uint64),return_rad=True) + cbox_sol = np.zeros(3,dtype=np.float64) + rbox_sol = quadtree_box(P[i,:],P[Ai[k-1],:],order,DLE,DRE,cbox_sol) + # Return current solution if hl box is outside current solution's box + # Uses actual box + cbox_hl = np.zeros(3,dtype=np.float64) + rbox_hl = quadtree_box(P[l,:],P[h,:],order,DLE,DRE,cbox_hl) + if dist_to_box(cbox_sol,cbox_hl,rbox_hl) >= 1.5*rbox_sol: + print('{} outside: rad = {}, rbox = {}, dist = {}'.format(m,rad_Ai,rbox_sol,dist_to_box(P[i,:],cbox_hl,rbox_hl))) + return Ai + # Expand search to lower/higher indicies as needed + if i < m: # They are already sorted... + Ai = csearch_morton(P,k,i,Ai,l,m-1,order,DLE,DRE,nu=nu) + if compare_morton(P[m,:],P[i,:]+dist(P[i,:],P[Ai[k-1],:])): + Ai = csearch_morton(P,k,i,Ai,m+1,h,order,DLE,DRE,nu=nu) + else: + Ai = csearch_morton(P,k,i,Ai,m+1,h,order,DLE,DRE,nu=nu) + if compare_morton(P[i,:]-dist(P[i,:],P[Ai[k-1],:]),P[m,:]): + Ai = csearch_morton(P,k,i,Ai,l,m-1,order,DLE,DRE,nu=nu) + return Ai + + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def knn_morton(np.ndarray[np.float64_t, ndim=2] P0, int k, np.uint64_t i0, + float c = 1.0, int nu = 4, issorted = False, int order = ORDER_MAX, + np.ndarray[np.float64_t, ndim=1] DLE = np.zeros(3,dtype=np.float64), + np.ndarray[np.float64_t, ndim=1] DRE = np.zeros(3,dtype=np.float64)): + """Get the indicies of the k nearest neighbors to point i. + + Args: + P (np.ndarray): (N,d) array of points to search. + k (int): number of nearest neighbors to find for each point in P. + i (np.uint64): index of point to find neighbors for. + c (float): factor determining how many indicies before/after i are used + in the initial search (i-c*k to i+c*k, default = 1.0) + nu (int): minimum number of points before a direct knn search is + performed. (default = 4) + issorted (Optional[bool]): if True, P is assumed to be sorted already + according to Morton order. + order (int): Maximum depth that Morton order quadtree should reach. + If not provided, ORDER_MAX is used. + DLE (np.ndarray): (d,) array of domain lower bounds in each dimension. + If not provided, this is determined from the points. + DRE (np.ndarray): (d,) array of domain upper bounds in each dimension. + If not provided, this is determined from the points. + + Returns: + np.ndarray: (N,k) indicies of k nearest neighbors for each point in P. +""" + cdef int j + cdef np.uint64_t i + cdef np.int64_t N = P0.shape[0] + cdef np.ndarray[np.float64_t, ndim=2] P + cdef np.ndarray[np.uint64_t, ndim=1] sort_fwd = np.arange(N,dtype=np.uint64) + cdef np.ndarray[np.uint64_t, ndim=1] sort_rev = np.arange(N,dtype=np.uint64) + cdef np.ndarray[np.uint64_t, ndim=1] Ai + cdef np.int64_t idxmin, idxmax, u, l, I + # Sort if necessary + if issorted: + P = P0 + i = i0 + else: + morton_qsort(P0,0,N-1,sort_fwd) + sort_rev = np.argsort(sort_fwd).astype(np.uint64) + P = P0[sort_fwd,:] + i = sort_rev[i0] + # Check domain and set if singular + for j in range(3): + if DLE[j] == DRE[j]: + DLE[j] = min(P[:,j]) + DRE[j] = max(P[:,j]) + # Get initial guess bassed on position in z-order + idxmin = max(i-c*k, 0) + idxmax = min(i+c*k, N-1) + Ai = np.hstack((np.arange(idxmin,i,dtype=np.uint64), + np.arange(i+1,idxmax+1,dtype=np.uint64))) + Ai,rad_Ai = knn_direct(P,k,i,Ai,return_rad=True) + # Get radius of solution + cbox_Ai = np.zeros(3,dtype=np.float64) + rbox_Ai = quadtree_box(P[i,:],P[Ai[k-1],:],order,DLE,DRE,cbox_Ai) + rad_Ai = rbox_Ai + # Extend upper bound to match lower bound + if idxmax < (N-1): + if compare_morton(P[i,:]+rad_Ai,P[idxmax,:]): + u = i + else: + I = 1 + while (idxmax+(2**I) < N) and compare_morton(P[idxmax+(2**I),:],P[i,:]+rad_Ai): + I+=1 + u = min(idxmax+(2**I),N-1) + Ai = csearch_morton(P,k,i,Ai,min(idxmax+1,N-1),u,order,DLE,DRE,nu=nu) + else: + u = idxmax + # Extend lower bound to match upper bound + if idxmin > 0: + if compare_morton(P[idxmin,:],P[i,:]-rad_Ai): + l = i + else: + I = 1 + while (idxmin-(2**I) >= 0) and compare_morton(P[i,:]-rad_Ai,P[idxmin-(2**I),:]): + I+=1 + l = max(idxmin-(2**I),0) + Ai = csearch_morton(P,k,i,Ai,l,max(idxmin-1,0),order,DLE,DRE,nu=nu) + else: + l = idxmin + # Return indices of neighbors in the correct order + if issorted: + return Ai + else: + return sort_fwd[Ai] + cdef struct PointSet cdef struct PointSet: int count diff --git a/yt/utilities/lib/grid_traversal.pxd b/yt/utilities/lib/grid_traversal.pxd index b62d565a1f1..ca403bc96f7 100644 --- a/yt/utilities/lib/grid_traversal.pxd +++ b/yt/utilities/lib/grid_traversal.pxd @@ -6,13 +6,6 @@ Definitions for the traversal code """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/grid_traversal.pyx b/yt/utilities/lib/grid_traversal.pyx index 1b651e6efab..10d8e354fb8 100644 --- a/yt/utilities/lib/grid_traversal.pyx +++ b/yt/utilities/lib/grid_traversal.pyx @@ -5,13 +5,6 @@ Simple integrators for the radiative transfer equation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -324,7 +317,7 @@ def healpix_aitoff_proj(np.ndarray[np.float64_t, ndim=1] pix_image, # for l in range(3): # v0[k] += v1[l] * irotation[l,k] # healpix_interface.vec2pix_nest(nside, v0, &ipix) - # #print "Rotated", v0[0], v0[1], v0[2], v1[0], v1[1], v1[2], ipix, pix_image[ipix] + # #print("Rotated", v0[0], v0[1], v0[2], v1[0], v1[1], v1[2], ipix, pix_image[ipix]) # image[j, i] = pix_image[ipix] def arr_fisheye_vectors(int resolution, np.float64_t fov, int nimx=1, int diff --git a/yt/utilities/lib/healpix_interface.pxd b/yt/utilities/lib/healpix_interface.pxd index edf6a092922..298da0bb133 100644 --- a/yt/utilities/lib/healpix_interface.pxd +++ b/yt/utilities/lib/healpix_interface.pxd @@ -5,13 +5,6 @@ A light interface to a few HEALPix routines """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/image_samplers.pxd b/yt/utilities/lib/image_samplers.pxd index 9b1801c9a82..fb404fc4e36 100644 --- a/yt/utilities/lib/image_samplers.pxd +++ b/yt/utilities/lib/image_samplers.pxd @@ -6,13 +6,6 @@ Definitions for image samplers """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 732165a6fd4..214e2cd1d38 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -5,13 +5,6 @@ Image sampler definitions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/image_utilities.pyx b/yt/utilities/lib/image_utilities.pyx index f51cd44ad1c..a83e6ea9412 100644 --- a/yt/utilities/lib/image_utilities.pyx +++ b/yt/utilities/lib/image_utilities.pyx @@ -2,13 +2,6 @@ Utilities for images """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/interpolators.pyx b/yt/utilities/lib/interpolators.pyx index 169d9eaa3ae..9bf6f2c64b7 100644 --- a/yt/utilities/lib/interpolators.pyx +++ b/yt/utilities/lib/interpolators.pyx @@ -5,13 +5,6 @@ Simple interpolators """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/lenses.pxd b/yt/utilities/lib/lenses.pxd index 9b706c03753..edca8640ffc 100644 --- a/yt/utilities/lib/lenses.pxd +++ b/yt/utilities/lib/lenses.pxd @@ -6,13 +6,6 @@ Definitions for the lens code """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/lenses.pyx b/yt/utilities/lib/lenses.pyx index bc3c029872c..0e36474f2b0 100644 --- a/yt/utilities/lib/lenses.pyx +++ b/yt/utilities/lib/lenses.pyx @@ -5,13 +5,6 @@ Functions for computing the extent of lenses and whatnot """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/line_integral_convolution.pyx b/yt/utilities/lib/line_integral_convolution.pyx index 762c26919ab..09d68abb82c 100644 --- a/yt/utilities/lib/line_integral_convolution.pyx +++ b/yt/utilities/lib/line_integral_convolution.pyx @@ -5,17 +5,6 @@ Utilities for line integral convolution annotation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Code originally from Scipy Cookbook (http://wiki.scipy.org/Cookbook/LineIntegralConvolution), -# with bug fixed which leads to crash when non equal-size vector field in two -# dimensions is provided. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np diff --git a/yt/utilities/lib/marching_cubes.pyx b/yt/utilities/lib/marching_cubes.pyx index 42254185ea7..ce18bbbe8c6 100644 --- a/yt/utilities/lib/marching_cubes.pyx +++ b/yt/utilities/lib/marching_cubes.pyx @@ -5,13 +5,6 @@ Marching cubes implementation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np cimport cython @@ -342,7 +335,7 @@ def march_cubes_grid_flux( point[m] = (current.p[n][m]-cell_pos[m])*idds[m] # Now we calculate the value at this point temp = offset_interpolate(dims, point, intdata) - #print "something", temp, point[0], point[1], point[2] + #print("something", temp, point[0], point[1], point[2]) wval += temp for m in range(3): center[m] += temp * point[m] diff --git a/yt/utilities/lib/mesh_construction.pyx b/yt/utilities/lib/mesh_construction.pyx index b85b800caf8..47db9f4f6c0 100644 --- a/yt/utilities/lib/mesh_construction.pyx +++ b/yt/utilities/lib/mesh_construction.pyx @@ -8,13 +8,6 @@ Note - this file is only used for the Embree-accelerated ray-tracer. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport cython diff --git a/yt/utilities/lib/mesh_intersection.pyx b/yt/utilities/lib/mesh_intersection.pyx index 8d220c1d943..28d501fcc66 100644 --- a/yt/utilities/lib/mesh_intersection.pyx +++ b/yt/utilities/lib/mesh_intersection.pyx @@ -6,13 +6,6 @@ Note - this file is only used for the Embree-accelerated ray-tracer. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport pyembree.rtcore as rtc cimport pyembree.rtcore_ray as rtcr diff --git a/yt/utilities/lib/mesh_samplers.pyx b/yt/utilities/lib/mesh_samplers.pyx index 716b331ffe0..9381f7d3483 100644 --- a/yt/utilities/lib/mesh_samplers.pyx +++ b/yt/utilities/lib/mesh_samplers.pyx @@ -6,13 +6,6 @@ Note - this file is only used for the Embree-accelerated ray-tracer. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport pyembree.rtcore as rtc cimport pyembree.rtcore_ray as rtcr diff --git a/yt/utilities/lib/mesh_traversal.pyx b/yt/utilities/lib/mesh_traversal.pyx index 86a80dc6ddf..11839be98bc 100644 --- a/yt/utilities/lib/mesh_traversal.pyx +++ b/yt/utilities/lib/mesh_traversal.pyx @@ -5,13 +5,6 @@ mesh source using either pyembree or the cython ray caster. """ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport cython cimport numpy as np @@ -31,9 +24,9 @@ rtc.rtcInit(NULL) rtc.rtcSetErrorFunction(error_printer) cdef void error_printer(const rtc.RTCError code, const char *_str): - print "ERROR CAUGHT IN EMBREE" + print("ERROR CAUGHT IN EMBREE") rtc.print_error(code) - print "ERROR MESSAGE:", _str + print("ERROR MESSAGE:", _str) cdef class YTEmbreeScene: diff --git a/yt/utilities/lib/mesh_utilities.pyx b/yt/utilities/lib/mesh_utilities.pyx index 8d2a5c61108..5a0bddfd0dc 100644 --- a/yt/utilities/lib/mesh_utilities.pyx +++ b/yt/utilities/lib/mesh_utilities.pyx @@ -5,13 +5,6 @@ Utilities for unstructured and semi-structured meshes """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index d8f72f4f011..701dec9bb38 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -5,13 +5,6 @@ Simple utilities that don't fit anywhere else """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from yt.funcs import get_pbar import numpy as np @@ -22,7 +15,6 @@ cimport libc.math as math from libc.math cimport abs, sqrt from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max from yt.geometry.selection_routines cimport _ensure_code -from yt.utilities.exceptions import YTEquivalentDimsError from libc.stdlib cimport malloc, free from libc.string cimport strcmp @@ -469,17 +461,17 @@ def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data, for i in range(n_grids): # Check for disqualification for j in range(2): - #print "Checking against", i,j,dim,data[i,j,dim] + #print("Checking against", i,j,dim,data[i,j,dim]) if not (l_corner[dim] < data[i, j, dim] and data[i, j, dim] < r_corner[dim]): - #print "Skipping ", data[i,j,dim] + #print("Skipping ", data[i,j,dim]) continue skipit = 0 # Add our left ... for k in range(n_unique): if uniques[k] == data[i, j, dim]: skipit = 1 - #print "Identified", uniques[k], data[i,j,dim], n_unique + #print("Identified", uniques[k], data[i,j,dim], n_unique) break if skipit == 0: uniques[n_unique] = data[i, j, dim] @@ -491,7 +483,7 @@ def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data, # I recognize how lame this is. cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64') for i in range(my_max): - #print "Setting tarr: ", i, uniquedims[best_dim][i] + #print("Setting tarr: ", i, uniquedims[best_dim][i]) tarr[i] = uniquedims[best_dim][i] tarr.sort() if my_split < 0: @@ -569,7 +561,8 @@ def get_box_grids_below_level( @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) -def obtain_position_vector(data): +def obtain_position_vector( + data, field_names = ('x', 'y', 'z')): # This is just to let the pointers exist and whatnot. We can't cdef them # inside conditionals. cdef np.ndarray[np.float64_t, ndim=1] xf @@ -583,14 +576,14 @@ def obtain_position_vector(data): cdef np.float64_t c[3] cdef int i, j, k - units = data['x'].units + units = data[field_names[0]].units center = data.get_field_parameter("center").to(units) c[0] = center[0]; c[1] = center[1]; c[2] = center[2] - if len(data['x'].shape) == 1: + if len(data[field_names[0]].shape) == 1: # One dimensional data - xf = data['x'] - yf = data['y'] - zf = data['z'] + xf = data[field_names[0]] + yf = data[field_names[1]] + zf = data[field_names[2]] rf = YTArray(np.empty((3, xf.shape[0]), 'float64'), xf.units) for i in range(xf.shape[0]): rf[0, i] = xf[i] - c[0] @@ -599,9 +592,9 @@ def obtain_position_vector(data): return rf else: # Three dimensional data - xg = data['x'] - yg = data['y'] - zg = data['z'] + xg = data[field_names[0]] + yg = data[field_names[1]] + zg = data[field_names[2]] shape = (3, xg.shape[0], xg.shape[1], xg.shape[2]) rg = YTArray(np.empty(shape, 'float64'), xg.units) #rg = YTArray(rg, xg.units) @@ -666,11 +659,7 @@ def obtain_relative_velocity_vector( if bulk_vector is None: bv[0] = bv[1] = bv[2] = 0.0 else: - if hasattr(bulk_vector, 'in_units'): - try: - bulk_vector = bulk_vector.in_units(vxg.units) - except YTEquivalentDimsError as e: - bulk_vector = bulk_vector.to_equivalent(e.new_units, e.base) + bulk_vector = bulk_vector.in_units(vxg.units) bv[0] = bulk_vector[0] bv[1] = bulk_vector[1] bv[2] = bulk_vector[2] @@ -786,6 +775,61 @@ def fill_region(input_fields, output_fields, tot += 1 return tot +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def flip_bitmask(np.ndarray[np.float64_t, ndim=1] vals, + np.float64_t left_edge, np.float64_t right_edge, + np.uint64_t nbins): + cdef np.uint64_t i, bin_id + cdef np.float64_t idx = nbins / (right_edge - left_edge) + cdef np.ndarray[np.uint8_t, ndim=1, cast=True] bitmask + bitmask = np.zeros(nbins, dtype="uint8") + for i in range(vals.shape[0]): + bin_id = ((vals[i] - left_edge)*idx) + bitmask[bin_id] = 1 + return bitmask + +#@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def flip_morton_bitmask(np.ndarray[np.uint64_t, ndim=1] morton_indices, + int max_order): + # We assume that the morton_indices are fed to us in a setup that allows + # for 20 levels. This means that we shift right by 3*(20-max_order) (or is + # that a fencepost?) + cdef np.uint64_t mi, i + cdef np.ndarray[np.uint8_t, ndim=1, cast=True] bitmask + # Note that this will fail if it's too big, since numpy will check nicely + # the memory availability. I guess. + bitmask = np.zeros(1 << (3*max_order), dtype="uint8") + for i in range(morton_indices.shape[0]): + mi = (morton_indices[i] >> (3 * (20-max_order))) + bitmask[mi] = 1 + return bitmask + +#@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def count_collisions(np.ndarray[np.uint8_t, ndim=2] masks): + cdef int i, j, k + cdef np.ndarray[np.uint32_t, ndim=1] counts + cdef np.ndarray[np.uint8_t, ndim=1] collides + counts = np.zeros(masks.shape[1], dtype="uint32") + collides = np.zeros(masks.shape[1], dtype="uint8") + for i in range(masks.shape[1]): + print i + for j in range(masks.shape[1]): + collides[j] = 0 + for k in range(masks.shape[0]): + if masks[k,i] == 0: continue + for j in range(masks.shape[1]): + if j == i: continue + if masks[k,j] == 1: + collides[j] = 1 + counts[i] = collides.sum() + return counts + @cython.cdivision(True) @cython.boundscheck(False) @cython.wraparound(False) diff --git a/yt/utilities/lib/origami.pyx b/yt/utilities/lib/origami.pyx index f7a643c10d0..0510eec4846 100644 --- a/yt/utilities/lib/origami.pyx +++ b/yt/utilities/lib/origami.pyx @@ -5,13 +5,6 @@ This calls the ORIGAMI routines """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np @@ -31,8 +24,8 @@ def run_origami(np.ndarray[np.float64_t, ndim=1] pos_x, # C-contiguous. global printed_citation if printed_citation == 0: - print "ORIGAMI was developed by Bridget Falck and Mark Neyrinck." - print "Please cite Falck, Neyrinck, & Szalay 2012, ApJ, 754, 2, 125." + print("ORIGAMI was developed by Bridget Falck and Mark Neyrinck.") + print("Please cite Falck, Neyrinck, & Szalay 2012, ApJ, 754, 2, 125.") printed_citation = 1 cdef int npart = pos_x.size if npart == 1: diff --git a/yt/utilities/lib/particle_kdtree_tools.pxd b/yt/utilities/lib/particle_kdtree_tools.pxd new file mode 100644 index 00000000000..93b3e038469 --- /dev/null +++ b/yt/utilities/lib/particle_kdtree_tools.pxd @@ -0,0 +1,16 @@ +cimport numpy as np +cimport cython + +from yt.utilities.lib.cykdtree.kdtree cimport KDTree, uint64_t +from yt.utilities.lib.bounded_priority_queue cimport BoundedPriorityQueue + +cdef struct axes_range: + int start + int stop + int step + +cdef int set_axes_range(axes_range *axes, int skipaxis) + +cdef int find_neighbors(np.float64_t * pos, np.float64_t[:, ::1] tree_positions, + BoundedPriorityQueue queue, KDTree * c_tree, + uint64_t skipidx, axes_range * axes) nogil except -1 diff --git a/yt/utilities/lib/particle_kdtree_tools.pyx b/yt/utilities/lib/particle_kdtree_tools.pyx new file mode 100644 index 00000000000..96a6fa6a91c --- /dev/null +++ b/yt/utilities/lib/particle_kdtree_tools.pyx @@ -0,0 +1,394 @@ +""" +Cython tools for working with the PyKDTree particle KDTree. + + + +""" + + +import numpy as np +cimport numpy as np + +cimport cython + +from cpython.exc cimport PyErr_CheckSignals +from yt.utilities.lib.cykdtree.kdtree cimport ( + PyKDTree, + KDTree, + Node, + uint64_t, + uint32_t, +) + +from libc.math cimport sqrt +from libcpp.vector cimport vector + +from yt.funcs import get_pbar +from yt.geometry.particle_deposit cimport ( + get_kernel_func, + kernel_func, +) +from yt.utilities.lib.bounded_priority_queue cimport ( + BoundedPriorityQueue, + NeighborList +) + +cdef int CHUNKSIZE = 4096 + +# This structure allows the nearest neighbor finding to consider a subset of +# spatial dimensions, i.e the spatial separation in the x and z coordinates +# could be consider by using set_axes_range(axes, 1), this would cause the while +# loops to skip the y dimensions, without the performance hit of an if statement +cdef struct axes_range: + int start + int stop + int step + +# skipaxis: x=0, y=1, z=2 +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +cdef int set_axes_range(axes_range *axes, int skipaxis): + axes.start = 0 + axes.stop = 3 + axes.step = 1 + if skipaxis == 0: + axes.start = 1 + if skipaxis == 1: + axes.step = 2 + if skipaxis == 2: + axes.stop = 2 + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def generate_smoothing_length(np.float64_t[:, ::1] tree_positions, + PyKDTree kdtree, int n_neighbors): + """Calculate array of distances to the nth nearest neighbor + + Parameters + ---------- + + tree_positions: arrays of floats with shape (n_particles, 3) + The positions of particles in kdtree sorted order. Currently assumed + to be 3D postions. + kdtree: A PyKDTree instance + A kdtree to do nearest neighbors searches with + n_neighbors: The neighbor number to calculate the distance to + + Returns + ------- + + smoothing_lengths: arrays of floats with shape (n_particles, ) + The calculated smoothing lengths + + """ + cdef int i + cdef KDTree * c_tree = kdtree._tree + cdef int n_particles = tree_positions.shape[0] + cdef np.float64_t * pos + cdef np.float64_t[:] smoothing_length = np.empty(n_particles) + cdef BoundedPriorityQueue queue = BoundedPriorityQueue(n_neighbors) + cdef np.int64_t skipaxis = -1 + + # We are using all spatial dimensions + cdef axes_range axes + set_axes_range(&axes, -1) + + pbar = get_pbar("Generate smoothing length", n_particles) + with nogil: + for i in range(n_particles): + # Reset queue to "empty" state, doing it this way avoids + # needing to reallocate memory + queue.size = 0 + + if i % CHUNKSIZE == 0: + with gil: + pbar.update(i-1) + PyErr_CheckSignals() + + pos = &(tree_positions[i, 0]) + find_neighbors(pos, tree_positions, queue, c_tree, i, &axes) + + smoothing_length[i] = sqrt(queue.heap_ptr[0]) + + pbar.update(n_particles-1) + pbar.finish() + return np.asarray(smoothing_length) + +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def estimate_density(np.float64_t[:, ::1] tree_positions, np.float64_t[:] mass, + np.float64_t[:] smoothing_length, + PyKDTree kdtree, kernel_name="cubic"): + """Estimate density using SPH gather method. + + Parameters + ---------- + + tree_positions: array of floats with shape (n_particles, 3) + The positions of particles in kdtree sorted order. Currently assumed + to be 3D postions. + mass: array of floats with shape (n_particles) + The masses of particles in kdtree sorted order. + smoothing_length: array of floats with shape (n_particles) + The smoothing lengths of particles in kdtree sorted order. + kdtree: A PyKDTree instance + A kdtree to do nearest neighbors searches with. + kernel_name: str + The name of the kernel function to use in density estimation. + + Returns + ------- + + density: array of floats with shape (n_particles) + The calculated density. + + """ + cdef int i, j, k + cdef KDTree * c_tree = kdtree._tree + cdef int n_particles = tree_positions.shape[0] + cdef np.float64_t h_i2, ih_i2, q_ij + cdef np.float64_t * pos + cdef np.float64_t[:] density = np.empty(n_particles) + cdef kernel_func kernel = get_kernel_func(kernel_name) + cdef NeighborList nblist = NeighborList() + + # We are using all spatial dimensions + cdef axes_range axes + set_axes_range(&axes, -1) + + pbar = get_pbar("Estimating density", n_particles) + with nogil: + for i in range(n_particles): + # Reset list to "empty" state, doing it this way avoids + # needing to reallocate memory + nblist.size = 0 + + if i % CHUNKSIZE == 0: + with gil: + pbar.update(i - 1) + PyErr_CheckSignals() + + pos = &(tree_positions[i, 0]) + h_i2 = smoothing_length[i] ** 2 + find_neighbors_ball(pos, h_i2, tree_positions, nblist, c_tree, i, &axes) + ih_i2 = 1.0 / h_i2 + + # See eq. 10 of Price 2012 + density[i] = mass[i] * kernel(0) + for k in range(nblist.size): + j = nblist.pids[k] + q_ij = sqrt(nblist.data[k] * ih_i2) + density[i] += mass[j] * kernel(q_ij) + + pbar.update(n_particles - 1) + pbar.finish() + return np.asarray(density) + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int find_neighbors(np.float64_t * pos, np.float64_t[:, ::1] tree_positions, + BoundedPriorityQueue queue, KDTree * c_tree, + uint64_t skipidx, axes_range * axes) nogil except -1: + cdef Node* leafnode + + # Make an initial guess based on the closest node + leafnode = c_tree.search(&pos[0]) + process_node_points(leafnode, queue, tree_positions, pos, skipidx, axes) + + # Traverse the rest of the kdtree to finish the neighbor list + find_knn(c_tree.root, queue, tree_positions, pos, leafnode.leafid, skipidx, + axes) + + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int find_knn(Node* node, + BoundedPriorityQueue queue, + np.float64_t[:, ::1] tree_positions, + np.float64_t* pos, + uint32_t skipleaf, + uint64_t skipidx, + axes_range * axes, + ) nogil except -1: + # if we aren't a leaf then we keep traversing until we find a leaf, else we + # we actually begin to check the leaf + if not node.is_leaf: + if not cull_node(node.less, pos, queue, skipleaf, axes): + find_knn(node.less, queue, tree_positions, pos, skipleaf, skipidx, + axes) + if not cull_node(node.greater, pos, queue, skipleaf, axes): + find_knn(node.greater, queue, tree_positions, pos, skipleaf, + skipidx, axes) + else: + if not cull_node(node, pos, queue, skipleaf, axes): + process_node_points(node, queue, tree_positions, pos, skipidx, + axes) + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline int cull_node(Node* node, + np.float64_t* pos, + BoundedPriorityQueue queue, + uint32_t skipleaf, + axes_range * axes, + ) nogil except -1: + cdef int k + cdef np.float64_t v + cdef np.float64_t tpos, ndist = 0 + cdef uint32_t leafid + + if node.leafid == skipleaf: + return True + + k = axes.start + while k < axes.stop: + v = pos[k] + if v < node.left_edge[k]: + tpos = node.left_edge[k] - v + elif v > node.right_edge[k]: + tpos = v - node.right_edge[k] + else: + tpos = 0 + ndist += tpos*tpos + k += axes.step + + return (ndist > queue.heap[0] and queue.size == queue.max_elements) + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline int process_node_points(Node* node, + BoundedPriorityQueue queue, + np.float64_t[:, ::1] positions, + np.float64_t* pos, + int skipidx, + axes_range * axes, + ) nogil except -1: + cdef uint64_t i, k + cdef np.float64_t tpos, sq_dist + for i in range(node.left_idx, node.left_idx + node.children): + if i == skipidx: + continue + + sq_dist = 0.0 + + k = axes.start + while k < axes.stop: + tpos = positions[i, k] - pos[k] + sq_dist += tpos*tpos + k += axes.step + + queue.add_pid(sq_dist, i) + + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int find_neighbors_ball(np.float64_t * pos, np.float64_t r2, + np.float64_t[:, ::1] tree_positions, + NeighborList nblist, KDTree * c_tree, + uint64_t skipidx, axes_range * axes + ) nogil except -1: + """Find neighbors within a ball.""" + cdef Node* leafnode + + # Make an initial guess based on the closest node + leafnode = c_tree.search(&pos[0]) + process_node_points_ball(leafnode, nblist, tree_positions, pos, r2, skipidx, axes) + + # Traverse the rest of the kdtree to finish the neighbor list + find_ball(c_tree.root, nblist, tree_positions, pos, r2, leafnode.leafid, + skipidx, axes) + + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef int find_ball(Node* node, + NeighborList nblist, + np.float64_t[:, ::1] tree_positions, + np.float64_t* pos, + np.float64_t r2, + uint32_t skipleaf, + uint64_t skipidx, + axes_range * axes, + ) nogil except -1: + """Traverse the k-d tree to process leaf nodes.""" + if not node.is_leaf: + if not cull_node_ball(node.less, pos, r2, skipleaf, axes): + find_ball(node.less, nblist, tree_positions, pos, r2, skipleaf, + skipidx, axes) + if not cull_node_ball(node.greater, pos, r2, skipleaf, axes): + find_ball(node.greater, nblist, tree_positions, pos, r2, skipleaf, + skipidx, axes) + else: + if not cull_node_ball(node, pos, r2, skipleaf, axes): + process_node_points_ball(node, nblist, tree_positions, pos, r2, + skipidx, axes) + return 0 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline int cull_node_ball(Node* node, + np.float64_t* pos, + np.float64_t r2, + uint32_t skipleaf, + axes_range * axes, + ) nogil except -1: + """Check if the node does not intersect with the ball at all.""" + cdef int k + cdef np.float64_t v + cdef np.float64_t tpos, ndist = 0 + cdef uint32_t leafid + + if node.leafid == skipleaf: + return True + + k = axes.start + while k < axes.stop: + v = pos[k] + if v < node.left_edge[k]: + tpos = node.left_edge[k] - v + elif v > node.right_edge[k]: + tpos = v - node.right_edge[k] + else: + tpos = 0 + ndist += tpos*tpos + k += axes.step + + return ndist > r2 + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline int process_node_points_ball(Node* node, + NeighborList nblist, + np.float64_t[:, ::1] positions, + np.float64_t* pos, + np.float64_t r2, + int skipidx, + axes_range * axes, + ) nogil except -1: + """Add points from the leaf node within the ball to the neighbor list.""" + cdef uint64_t i, k, n + cdef np.float64_t tpos, sq_dist + for i in range(node.left_idx, node.left_idx + node.children): + if i == skipidx: + continue + + sq_dist = 0.0 + + k = axes.start + while k < axes.stop: + tpos = positions[i, k] - pos[k] + sq_dist += tpos*tpos + k += axes.step + + if (sq_dist < r2): + nblist.add_pid(sq_dist, i) + + return 0 diff --git a/yt/utilities/lib/particle_mesh_operations.pyx b/yt/utilities/lib/particle_mesh_operations.pyx index cfae4e04d73..81c3e6ff99c 100644 --- a/yt/utilities/lib/particle_mesh_operations.pyx +++ b/yt/utilities/lib/particle_mesh_operations.pyx @@ -5,13 +5,6 @@ Simple integrators for the radiative transfer equation """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np cimport cython diff --git a/yt/utilities/lib/partitioned_grid.pxd b/yt/utilities/lib/partitioned_grid.pxd index dfa6c78a8d2..29affd91e15 100644 --- a/yt/utilities/lib/partitioned_grid.pxd +++ b/yt/utilities/lib/partitioned_grid.pxd @@ -6,13 +6,6 @@ Definitions for the partitioned grid """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/partitioned_grid.pyx b/yt/utilities/lib/partitioned_grid.pyx index 02dceffda76..12502b108fb 100644 --- a/yt/utilities/lib/partitioned_grid.pyx +++ b/yt/utilities/lib/partitioned_grid.pyx @@ -5,13 +5,6 @@ Image sampler definitions """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/perftools_wrap.pyx b/yt/utilities/lib/perftools_wrap.pyx deleted file mode 100644 index 110c0aa8cf7..00000000000 --- a/yt/utilities/lib/perftools_wrap.pyx +++ /dev/null @@ -1,29 +0,0 @@ -""" -Turn on and off perftools profiling - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -# For more info: -# https://pygabriel.wordpress.com/2010/04/14/profiling-python-c-extensions/ - -# prof.pyx -cdef extern from "google/profiler.h": - void ProfilerStart( char* fname ) - void ProfilerStop() - -def profiler_start(fname): - ProfilerStart(fname) - -def profiler_stop(): - ProfilerStop() - diff --git a/yt/utilities/lib/pixelization_constants.c b/yt/utilities/lib/pixelization_constants.c index b65e962f0aa..6b2ae57cb57 100644 --- a/yt/utilities/lib/pixelization_constants.c +++ b/yt/utilities/lib/pixelization_constants.c @@ -1,9 +1,4 @@ /******************************************************************************* -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. *******************************************************************************/ // // Some Cython versions don't like module-level constants, so we'll put them diff --git a/yt/utilities/lib/pixelization_constants.h b/yt/utilities/lib/pixelization_constants.h index b97c19aceeb..22d8af12730 100644 --- a/yt/utilities/lib/pixelization_constants.h +++ b/yt/utilities/lib/pixelization_constants.h @@ -1,9 +1,4 @@ /******************************************************************************* -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. *******************************************************************************/ // // Some Cython versions don't like module-level constants, so we'll put them diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 035962cb82b..147064c28e4 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -5,19 +5,15 @@ Pixelization routines """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np cimport cython + +from cython.view cimport array as cvarray cimport libc.math as math -from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, imax, fabs +from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, \ + imax, fabs, iclip from yt.utilities.exceptions import \ YTPixelizeError, \ YTElementTypeNotRecognized @@ -35,6 +31,24 @@ from yt.utilities.lib.element_mappings cimport \ W1Sampler3D, \ T2Sampler2D, \ Tet2Sampler3D +from yt.geometry.particle_deposit cimport \ + kernel_func, get_kernel_func +from cython.parallel cimport prange +from cpython.exc cimport PyErr_CheckSignals +from yt.funcs import get_pbar +from yt.utilities.lib.cykdtree.kdtree cimport ( + PyKDTree, + KDTree, + Node, + uint64_t, + uint32_t, +) +from yt.utilities.lib.particle_kdtree_tools cimport find_neighbors, \ + axes_range, \ + set_axes_range +from yt.utilities.lib.bounded_priority_queue cimport BoundedPriorityQueue + +cdef int TABLE_NVALS=512 cdef extern from "pixelization_constants.h": enum: @@ -865,6 +879,529 @@ def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords, free(field_vals) return img +# used as a cache to avoid repeatedly creating +# instances of SPHKernelInterpolationTable +kernel_tables = {} + +cdef class SPHKernelInterpolationTable: + cdef public object kernel_name + cdef kernel_func kernel + cdef np.float64_t[::1] table + cdef np.float64_t[::1] q2_vals + cdef np.float64_t q2_range, iq2_range + + def __init__(self, kernel_name): + self.kernel_name = kernel_name + self.kernel = get_kernel_func(kernel_name) + self.populate_table() + + @cython.initializedcheck(False) + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef np.float64_t integrate_q2(self, np.float64_t q2) nogil: + # See equation 30 of the SPLASH paper + cdef int i + # Our bounds are -sqrt(R*R - q2) and sqrt(R*R-q2) + # And our R is always 1; note that our smoothing kernel functions + # expect it to run from 0 .. 1, so we multiply the integrand by 2 + cdef int N = 200 + cdef np.float64_t qz + cdef np.float64_t R = 1 + cdef np.float64_t R0 = -math.sqrt(R*R-q2) + cdef np.float64_t R1 = math.sqrt(R*R-q2) + cdef np.float64_t dR = (R1-R0)/N + # Set to our bounds + cdef np.float64_t integral = 0.0 + integral += self.kernel(math.sqrt(R0*R0 + q2)) + integral += self.kernel(math.sqrt(R1*R1 + q2)) + # We're going to manually conduct a trapezoidal integration + for i in range(1, N): + qz = R0 + i * dR + integral += 2.0*self.kernel(math.sqrt(qz*qz + q2)) + integral *= (R1-R0)/(2*N) + return integral + + def populate_table(self): + cdef int i + self.table = cvarray(format="d", shape=(TABLE_NVALS,), + itemsize=sizeof(np.float64_t)) + self.q2_vals = cvarray(format="d", shape=(TABLE_NVALS,), + itemsize=sizeof(np.float64_t)) + # We run from 0 to 1 here over R + for i in range(TABLE_NVALS): + self.q2_vals[i] = i * 1.0/(TABLE_NVALS-1) + self.table[i] = self.integrate_q2(self.q2_vals[i]) + + self.q2_range = self.q2_vals[TABLE_NVALS-1] - self.q2_vals[0] + self.iq2_range = (TABLE_NVALS-1)/self.q2_range + + @cython.initializedcheck(False) + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef inline np.float64_t interpolate(self, np.float64_t q2) nogil: + cdef int index + cdef np.float64_t F_interpolate + index = ((q2 - self.q2_vals[0])*(self.iq2_range)) + if index >= TABLE_NVALS: + return 0.0 + F_interpolate = self.table[index] + ( + (self.table[index+1] - self.table[index]) + *(q2 - self.q2_vals[index])*self.iq2_range) + return F_interpolate + + def interpolate_array(self, np.float64_t[:] q2_vals): + cdef np.float64_t[:] ret = np.empty(q2_vals.shape[0]) + cdef int i + for i in range(q2_vals.shape[0]): + ret[i] = self.interpolate(q2_vals[i]) + return np.array(ret) + +@cython.initializedcheck(False) +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def pixelize_sph_kernel_projection( + np.float64_t[:, :] buff, + np.float64_t[:] posx, + np.float64_t[:] posy, + np.float64_t[:] hsml, + np.float64_t[:] pmass, + np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, + bounds, + kernel_name="cubic", + weight_field=None): + + cdef np.intp_t xsize, ysize + cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j + cdef np.int64_t xi, yi, x0, x1, y0, y1 + cdef np.float64_t q_ij2, posx_diff, posy_diff, ih_j2 + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2 + cdef int index, i, j + cdef np.float64_t[:] _weight_field + + if weight_field is not None: + _weight_field = weight_field + + # we find the x and y range over which we have pixels and we find how many + # pixels we have in each dimension + xsize, ysize = buff.shape[0], buff.shape[1] + x_min = bounds[0] + x_max = bounds[1] + y_min = bounds[2] + y_max = bounds[3] + + dx = (x_max - x_min) / xsize + dy = (y_max - y_min) / ysize + + idx = 1.0/dx + idy = 1.0/dy + + if kernel_name not in kernel_tables: + kernel_tables[kernel_name] = SPHKernelInterpolationTable(kernel_name) + cdef SPHKernelInterpolationTable itab = kernel_tables[kernel_name] + + with nogil: + # loop through every particle + for j in range(0, posx.shape[0]): + if j % 100000 == 0: + with gil: + PyErr_CheckSignals() + + # here we find the pixels which this particle contributes to + x0 = ( (posx[j] - hsml[j] - x_min) * idx) + x1 = ( (posx[j] + hsml[j] - x_min) * idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ( (posy[j] - hsml[j] - y_min) * idy) + y1 = ( (posy[j] + hsml[j] - y_min) * idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + # we set the smoothing length squared with lower limit of the pixel + h_j2 = fmax(hsml[j]*hsml[j], dx*dy) + ih_j2 = 1.0/h_j2 + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**2 + if weight_field is None: + prefactor_j *= quantity_to_smooth[j] + else: + prefactor_j *= quantity_to_smooth[j] * _weight_field[j] + + # found pixels we deposit on, loop through those pixels + for xi in range(x0, x1): + # we use the centre of the pixel to calculate contribution + x = (xi + 0.5) * dx + x_min + + posx_diff = posx[j] - x + posx_diff = posx_diff * posx_diff + + if posx_diff > h_j2: continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = posy[j] - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: continue + + q_ij2 = (posx_diff + posy_diff) * ih_j2 + if q_ij2 >= 1: + continue + + # see equation 32 of the SPLASH paper + # now we just use the kernel projection + buff[xi, yi] += prefactor_j * itab.interpolate(q_ij2) + +@cython.boundscheck(False) +@cython.wraparound(False) +def interpolate_sph_positions_gather(np.float64_t[:] buff, + np.float64_t[:, ::1] tree_positions, np.float64_t[:, ::1] field_positions, + np.float64_t[:] hsml, np.float64_t[:] pmass, np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, PyKDTree kdtree, + int use_normalization=1, kernel_name="cubic", pbar=None, + int num_neigh=32): + + """ + This function takes in arbitrary positions, field_positions, at which to + perform a nearest neighbor search and perform SPH interpolation. + + The results are stored in the buffer, buff, which is in the same order as + the field_positions are put in. + """ + + cdef np.float64_t q_ij, h_j2, ih_j2, prefactor_j, smoothed_quantity_j + cdef np.float64_t * pos_ptr + cdef int i, particle, index + cdef BoundedPriorityQueue queue = BoundedPriorityQueue(num_neigh, True) + cdef np.float64_t[:] buff_den + cdef KDTree * ctree = kdtree._tree + + # Which dimensions shall we use for spatial distances? + cdef axes_range axes + set_axes_range(&axes, -1) + + # Only allocate memory if we are using normalization + if use_normalization: + buff_den = np.zeros(buff.shape[0], dtype="float64") + + kernel_func = get_kernel_func(kernel_name) + + # Loop through all the positions we want to interpolate the SPH field onto + with nogil: + for i in range(0, buff.shape[0]): + queue.size = 0 + + # Update the current position + pos_ptr = &field_positions[i, 0] + + # Use the KDTree to find the nearest neighbors + find_neighbors(pos_ptr, tree_positions, queue, ctree, -1, &axes) + + # Set the smoothing length squared to the square of the distance + # of the furthest nearest neighbor + h_j2 = queue.heap[0] + ih_j2 = 1.0/h_j2 + + # Loop through each nearest neighbor and add contribution to the + # buffer + for index in range(queue.max_elements): + particle = queue.pids[index] + + # Calculate contribution of this particle + prefactor_j = (pmass[particle] / pdens[particle] / + hsml[particle]**3) + q_ij = math.sqrt(queue.heap[index]*ih_j2) + smoothed_quantity_j = (prefactor_j * + quantity_to_smooth[particle] * + kernel_func(q_ij)) + + # See equations 6, 9, and 11 of the SPLASH paper + buff[i] += smoothed_quantity_j + + if use_normalization: + buff_den[i] += prefactor_j * kernel_func(q_ij) + + if use_normalization: + normalization_1d_utility(buff, buff_den) + +@cython.boundscheck(False) +@cython.wraparound(False) +def interpolate_sph_grid_gather(np.float64_t[:, :, :] buff, + np.float64_t[:, ::1] tree_positions, np.float64_t[:] bounds, + np.float64_t[:] hsml, np.float64_t[:] pmass, np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, PyKDTree kdtree, + int use_normalization=1, kernel_name="cubic", pbar=None, + int num_neigh=32): + """ + This function takes in the bounds and number of cells in a grid (well, + actually we implicity calculate this from the size of buff). Then we can + perform nearest neighbor search and SPH interpolation at the centre of each + cell in the grid. + """ + + cdef np.float64_t q_ij, h_j2, ih_j2, prefactor_j, smoothed_quantity_j + cdef np.float64_t dx, dy, dz + cdef np.float64_t[::1] pos = np.zeros(3, dtype="float64") + cdef np.float64_t * pos_ptr = &pos[0] + cdef int i, j, k, particle, index + cdef BoundedPriorityQueue queue = BoundedPriorityQueue(num_neigh, True) + cdef np.float64_t[:, :, :] buff_den + cdef KDTree * ctree = kdtree._tree + cdef int prog + + # Which dimensions shall we use for spatial distances? + cdef axes_range axes + set_axes_range(&axes, -1) + + # Only allocate memory if we are using normalization + if use_normalization: + buff_den = np.zeros([buff.shape[0], buff.shape[1], + buff.shape[2]], dtype="float64") + + kernel_func = get_kernel_func(kernel_name) + dx = (bounds[1] - bounds[0]) / buff.shape[0] + dy = (bounds[3] - bounds[2]) / buff.shape[1] + dz = (bounds[5] - bounds[4]) / buff.shape[2] + + # Loop through all the positions we want to interpolate the SPH field onto + pbar = get_pbar(title="Interpolating (gather) SPH field", + maxval=(buff.shape[0]*buff.shape[1]*buff.shape[2] // + 10000)*10000) + + prog = 0 + with nogil: + for i in range(0, buff.shape[0]): + for j in range(0, buff.shape[1]): + for k in range(0, buff.shape[2]): + prog += 1 + if prog % 10000 == 0: + with gil: + PyErr_CheckSignals() + pbar.update(prog) + + queue.size = 0 + + # Update the current position + pos[0] = bounds[0] + (i + 0.5) * dx + pos[1] = bounds[2] + (j + 0.5) * dy + pos[2] = bounds[4] + (k + 0.5) * dz + + # Use the KDTree to find the nearest neighbors + find_neighbors(pos_ptr, tree_positions, queue, ctree, -1, &axes) + + # Set the smoothing length squared to the square of the distance + # of the furthest nearest neighbor + h_j2 = queue.heap[0] + ih_j2 = 1.0/h_j2 + + # Loop through each nearest neighbor and add contribution to the + # buffer + for index in range(queue.max_elements): + particle = queue.pids[index] + + # Calculate contribution of this particle + prefactor_j = (pmass[particle] / pdens[particle] / + hsml[particle]**3) + q_ij = math.sqrt(queue.heap[index]*ih_j2) + smoothed_quantity_j = (prefactor_j * + quantity_to_smooth[particle] * + kernel_func(q_ij)) + + # See equations 6, 9, and 11 of the SPLASH paper + buff[i, j, k] += smoothed_quantity_j + + if use_normalization: + buff_den[i, j, k] += prefactor_j * kernel_func(q_ij) + + if use_normalization: + normalization_3d_utility(buff, buff_den) + +@cython.initializedcheck(False) +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def pixelize_sph_kernel_slice( + np.float64_t[:, :] buff, + np.float64_t[:] posx, np.float64_t[:] posy, + np.float64_t[:] hsml, np.float64_t[:] pmass, + np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, + bounds, kernel_name="cubic"): + + # similar method to pixelize_sph_kernel_projection + cdef np.intp_t xsize, ysize + cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j + cdef np.int64_t xi, yi, x0, x1, y0, y1 + cdef np.float64_t q_ij, posx_diff, posy_diff, ih_j + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j + cdef int index, i, j + + xsize, ysize = buff.shape[0], buff.shape[1] + + x_min = bounds[0] + x_max = bounds[1] + y_min = bounds[2] + y_max = bounds[3] + + dx = (x_max - x_min) / xsize + dy = (y_max - y_min) / ysize + idx = 1.0/dx + idy = 1.0/dy + + kernel_func = get_kernel_func(kernel_name) + + with nogil: + for j in range(0, posx.shape[0]): + if j % 100000 == 0: + with gil: + PyErr_CheckSignals() + + x0 = ( (posx[j] - hsml[j] - x_min) * idx) + x1 = ( (posx[j] + hsml[j] - x_min) * idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ( (posy[j] - hsml[j] - y_min) * idy) + y1 = ( (posy[j] + hsml[j] - y_min) * idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + h_j2 = fmax(hsml[j]*hsml[j], dx*dy) + h_j = math.sqrt(h_j2) + ih_j = 1.0/h_j + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 + prefactor_j *= quantity_to_smooth[j] + + # Now we know which pixels to deposit onto for this particle, + # so loop over them and add this particle's contribution + for xi in range(x0, x1): + x = (xi + 0.5) * dx + x_min + + posx_diff = posx[j] - x + posx_diff = posx_diff * posx_diff + if posx_diff > h_j2: + continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = posy[j] - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: + continue + + # see equation 4 of the SPLASH paper + q_ij = math.sqrt(posx_diff + posy_diff) * ih_j + if q_ij >= 1: + continue + + # see equations 6, 9, and 11 of the SPLASH paper + buff[xi, yi] += prefactor_j * kernel_func(q_ij) + +@cython.initializedcheck(False) +@cython.boundscheck(False) +@cython.wraparound(False) +@cython.cdivision(True) +def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, + np.float64_t[:] posx, np.float64_t[:] posy, np.float64_t[:] posz, + np.float64_t[:] hsml, np.float64_t[:] pmass, + np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, + bounds, pbar=None, kernel_name="cubic"): + + cdef np.intp_t xsize, ysize, zsize + cdef np.float64_t x_min, x_max, y_min, y_max, z_min, z_max, prefactor_j + cdef np.int64_t xi, yi, zi, x0, x1, y0, y1, z0, z1 + cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff + cdef np.float64_t x, y, z, dx, dy, dz, idx, idy, idz, h_j3, h_j2, h_j, ih_j + cdef int index, i, j, k + + xsize, ysize, zsize = buff.shape[0], buff.shape[1], buff.shape[2] + x_min = bounds[0] + x_max = bounds[1] + y_min = bounds[2] + y_max = bounds[3] + z_min = bounds[4] + z_max = bounds[5] + + dx = (x_max - x_min) / xsize + dy = (y_max - y_min) / ysize + dz = (z_max - z_min) / zsize + idx = 1.0/dx + idy = 1.0/dy + idz = 1.0/dz + + kernel_func = get_kernel_func(kernel_name) + + with nogil: + for j in range(0, posx.shape[0]): + if j % 50000 == 0: + with gil: + if(pbar is not None): + pbar.update(50000) + PyErr_CheckSignals() + + x0 = ( (posx[j] - hsml[j] - x_min) * idx) + x1 = ( (posx[j] + hsml[j] - x_min) * idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ( (posy[j] - hsml[j] - y_min) * idy) + y1 = ( (posy[j] + hsml[j] - y_min) * idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + z0 = ( (posz[j] - hsml[j] - z_min) * idz) + z1 = ( (posz[j] + hsml[j] - z_min) * idz) + z0 = iclip(z0-1, 0, zsize) + z1 = iclip(z1+1, 0, zsize) + + h_j3 = fmax(hsml[j]*hsml[j]*hsml[j], dx*dy*dz) + h_j = math.cbrt(h_j3) + h_j2 = h_j*h_j + ih_j = 1/h_j + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 + prefactor_j *= quantity_to_smooth[j] + + # Now we know which voxels to deposit onto for this particle, + # so loop over them and add this particle's contribution + for xi in range(x0, x1): + x = (xi + 0.5) * dx + x_min + + posx_diff = posx[j] - x + posx_diff = posx_diff * posx_diff + if posx_diff > h_j2: + continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = posy[j] - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: + continue + + for zi in range(z0, z1): + z = (zi + 0.5) * dz + z_min + + posz_diff = posz[j] - z + posz_diff = posz_diff * posz_diff + if posz_diff > h_j2: + continue + + # see equation 4 of the SPLASH paper + q_ij = math.sqrt(posx_diff + posy_diff + posz_diff) * ih_j + if q_ij >= 1: + continue + + buff[xi, yi, zi] += prefactor_j * kernel_func(q_ij) + def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, np.ndarray[np.int64_t, ndim=2] conn, np.ndarray[np.float64_t, ndim=1] start_point, @@ -968,3 +1505,147 @@ def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, free(vertices) free(field_vals) return arc_length, plot_values + + +@cython.boundscheck(False) +@cython.wraparound(False) +def off_axis_projection_SPH(np.float64_t[:] px, + np.float64_t[:] py, + np.float64_t[:] pz, + np.float64_t[:] particle_masses, + np.float64_t[:] particle_densities, + np.float64_t[:] smoothing_lengths, + bounds, + center, + width, + np.float64_t[:] quantity_to_smooth, + np.float64_t[:, :] projection_array, + normal_vector, + north_vector, + weight_field=None): + # Do nothing in event of a 0 normal vector + if np.allclose(normal_vector, np.array([0., 0., 0.]), rtol=1e-09): + return + + # We want to do two rotations, one to first rotate our coordinates to have + # the normal vector be the z-axis (i.e., the viewer's perspective), and then + # another rotation to make the north-vector be the y-axis (i.e., north). + # Fortunately, total_rotation_matrix = rotation_matrix_1 x rotation_matrix_2 + cdef int num_particles = np.size(px) + cdef np.float64_t[:] z_axis = np.array([0., 0., 1.], dtype='float_') + cdef np.float64_t[:] y_axis = np.array([0., 1., 0.], dtype='float_') + cdef np.float64_t[:, :] normal_rotation_matrix + cdef np.float64_t[:] transformed_north_vector + cdef np.float64_t[:, :] north_rotation_matrix + cdef np.float64_t[:, :] rotation_matrix + + normal_rotation_matrix = get_rotation_matrix(normal_vector, z_axis) + transformed_north_vector = np.matmul(normal_rotation_matrix, north_vector) + north_rotation_matrix = get_rotation_matrix(transformed_north_vector, y_axis) + rotation_matrix = np.matmul(north_rotation_matrix, normal_rotation_matrix) + + cdef np.float64_t[:] px_rotated = np.empty(num_particles, dtype='float_') + cdef np.float64_t[:] py_rotated = np.empty(num_particles, dtype='float_') + cdef np.float64_t[:] coordinate_matrix = np.empty(3, dtype='float_') + cdef np.float64_t[:] rotated_coordinates + cdef np.float64_t[:] rotated_center + rotated_center = rotation_matmul( + rotation_matrix, np.array([center[0], center[1], center[2]])) + + # set up the rotated bounds + cdef np.float64_t rot_bounds_x0 = rotated_center[0] - width[0] / 2 + cdef np.float64_t rot_bounds_x1 = rotated_center[0] + width[0] / 2 + cdef np.float64_t rot_bounds_y0 = rotated_center[1] - width[1] / 2 + cdef np.float64_t rot_bounds_y1 = rotated_center[1] + width[1] / 2 + + for i in range(num_particles): + coordinate_matrix[0] = px[i] + coordinate_matrix[1] = py[i] + coordinate_matrix[2] = pz[i] + rotated_coordinates = rotation_matmul( + rotation_matrix, coordinate_matrix) + px_rotated[i] = rotated_coordinates[0] + py_rotated[i] = rotated_coordinates[1] + + pixelize_sph_kernel_projection(projection_array, + px_rotated, + py_rotated, + smoothing_lengths, + particle_masses, + particle_densities, + quantity_to_smooth, + [rot_bounds_x0, rot_bounds_x1, + rot_bounds_y0, rot_bounds_y1], + weight_field=weight_field) + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef np.float64_t[:] rotation_matmul(np.float64_t[:, :] rotation_matrix, + np.float64_t[:] coordinate_matrix): + cdef np.float64_t[:] out = np.zeros(3) + for i in range(3): + for j in range(3): + out[i] += rotation_matrix[i, j] * coordinate_matrix[j] + return out + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef np.float64_t[:, :] get_rotation_matrix(np.float64_t[:] normal_vector, + np.float64_t[:] final_vector): + """ Returns a numpy rotation matrix corresponding to the + rotation of the given normal vector to the specified final_vector. + See https://math.stackexchange.com/a/476311 although note we return the + inverse of what's specified there. + """ + cdef np.float64_t[:] normal_unit_vector = normal_vector / np.linalg.norm(normal_vector) + cdef np.float64_t[:] final_unit_vector = final_vector / np.linalg.norm(final_vector) + cdef np.float64_t[:] v = np.cross(final_unit_vector, normal_unit_vector) + cdef np.float64_t s = np.linalg.norm(v) + cdef np.float64_t c = np.dot(final_unit_vector, normal_unit_vector) + # if the normal vector is identical to the final vector, just return the + # identity matrix + if np.isclose(c, 1, rtol=1e-09): + return np.identity(3, dtype='float_') + # if the normal vector is the negative final vector, return the appropriate + # rotation matrix for flipping your coordinate system. + if np.isclose(s, 0, rtol=1e-09): + return np.array([[0, -1, 0],[-1, 0, 0],[0, 0, -1]], dtype='float_') + + cdef np.float64_t[:, :] cross_product_matrix = np.array([[0, -1 * v[2], v[1]], + [v[2], 0, -1 * v[0]], + [-1 * v[1], v[0], 0]], + dtype='float_') + return np.linalg.inv(np.identity(3, dtype='float_') + cross_product_matrix + + np.matmul(cross_product_matrix, cross_product_matrix) + * 1/(1+c)) + +@cython.boundscheck(False) +@cython.wraparound(False) +def normalization_3d_utility(np.float64_t[:, :, :] num, + np.float64_t[:, :, :] den): + cdef int i, j, k + for i in range(num.shape[0]): + for j in range(num.shape[1]): + for k in range(num.shape[2]): + if den[i, j, k] != 0.0: + num[i, j, k] = num[i, j, k] / den[i, j, k] + +@cython.boundscheck(False) +@cython.wraparound(False) +def normalization_2d_utility(np.float64_t[:, :] num, + np.float64_t[:, :] den): + cdef int i, j + for i in range(num.shape[0]): + for j in range(num.shape[1]): + if den[i, j] != 0.0: + num[i, j] = num[i, j] / den[i, j] + +@cython.boundscheck(False) +@cython.wraparound(False) +def normalization_1d_utility(np.float64_t[:] num, + np.float64_t[:] den): + cdef int i + for i in range(num.shape[0]): + if den[i] != 0.0: + num[i] = num[i] / den[i] diff --git a/yt/utilities/lib/points_in_volume.pyx b/yt/utilities/lib/points_in_volume.pyx index d9bf938169d..041e1775f29 100644 --- a/yt/utilities/lib/points_in_volume.pyx +++ b/yt/utilities/lib/points_in_volume.pyx @@ -5,13 +5,6 @@ Checks for points contained in a volume """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np @@ -134,7 +127,7 @@ cdef void get_cross_product(np.float64_t v1[3], cp[0] = v1[1]*v2[2] - v1[2]*v2[1] cp[1] = v1[3]*v2[0] - v1[0]*v2[3] cp[2] = v1[0]*v2[1] - v1[1]*v2[0] - #print cp[0], cp[1], cp[2] + #print(cp[0], cp[1], cp[2]) cdef int check_projected_overlap( np.float64_t sep_ax[3], np.float64_t sep_vec[3], int gi, @@ -151,8 +144,8 @@ cdef int check_projected_overlap( ba += fabs(tba) ga += fabs(tga) sep_dot += sep_vec[g_ax] * sep_ax[g_ax] - #print sep_vec[0], sep_vec[1], sep_vec[2], - #print sep_ax[0], sep_ax[1], sep_ax[2] + #print(sep_vec[0], sep_vec[1], sep_vec[2],) + #print(sep_ax[0], sep_ax[1], sep_ax[2]) return (fabs(sep_dot) > ba+ga) # Now we do @@ -204,7 +197,7 @@ def find_grids_in_inclined_box( g_vec[g_ax][g_ax] = 0.5 * (grid_right_edges[gi, g_ax] - grid_left_edges[gi, g_ax]) for b_ax in range(15): - #print b_ax, + #print(b_ax,) if check_projected_overlap( sep_ax[b_ax], sep_vec, gi, b_vec, g_vec): diff --git a/yt/utilities/lib/quad_tree.pyx b/yt/utilities/lib/quad_tree.pyx index dbf7b69653e..07e99375664 100644 --- a/yt/utilities/lib/quad_tree.pyx +++ b/yt/utilities/lib/quad_tree.pyx @@ -5,13 +5,6 @@ A refine-by-two AMR-specific quadtree """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np diff --git a/yt/utilities/lib/ragged_arrays.pyx b/yt/utilities/lib/ragged_arrays.pyx index 77779347e83..17541d55a68 100644 --- a/yt/utilities/lib/ragged_arrays.pyx +++ b/yt/utilities/lib/ragged_arrays.pyx @@ -5,13 +5,6 @@ Some simple operations for operating on ragged arrays """ -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/lib/tests/test_bitarray.py b/yt/utilities/lib/tests/test_bitarray.py index 20aee9349e5..067ebeb1c7b 100644 --- a/yt/utilities/lib/tests/test_bitarray.py +++ b/yt/utilities/lib/tests/test_bitarray.py @@ -1,7 +1,7 @@ import numpy as np import yt.utilities.lib.bitarray as ba -from yt.testing import assert_equal +from yt.testing import assert_equal, assert_array_equal def test_inout_bitarray(): # Check that we can do it for bitarrays that are funny-shaped @@ -40,3 +40,13 @@ def test_inout_bitarray(): arr = b.as_bool_array() assert_equal(arr[:i+1].all(), True) assert_equal(arr[i+1:].any(), False) + for i in range(10): + b.set_value(i, 0) + arr = b.as_bool_array() + assert_equal(arr.any(), False) + b.set_value(7, 1) + arr = b.as_bool_array() + assert_array_equal(arr, [0, 0, 0, 0, 0, 0, 0, 1, 0, 0]) + b.set_value(2, 1) + arr = b.as_bool_array() + assert_array_equal(arr, [0, 0, 1, 0, 0, 0, 0, 1, 0, 0]) diff --git a/yt/utilities/lib/tests/test_element_mappings.py b/yt/utilities/lib/tests/test_element_mappings.py index 9fb9b2cad0c..55b583e7d44 100644 --- a/yt/utilities/lib/tests/test_element_mappings.py +++ b/yt/utilities/lib/tests/test_element_mappings.py @@ -1,18 +1,3 @@ -""" -This file contains tests of the intracell interpolation code contained is -yt/utilities/lib/element_mappings.pyx. - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.testing import assert_almost_equal diff --git a/yt/utilities/lib/tests/test_geometry_utils.py b/yt/utilities/lib/tests/test_geometry_utils.py index 9b1d7dad5a8..e9cec61f348 100644 --- a/yt/utilities/lib/tests/test_geometry_utils.py +++ b/yt/utilities/lib/tests/test_geometry_utils.py @@ -2,14 +2,383 @@ from yt.testing import \ fake_random_ds, \ + assert_array_equal, \ assert_array_less, \ - assert_array_equal + assert_equal, \ + assert_raises from yt.utilities.lib.misc_utilities import \ obtain_position_vector, \ obtain_relative_velocity_vector _fields = ("density", "velocity_x", "velocity_y", "velocity_z") +# TODO: error compact/spread bits for incorrect size +# TODO: test msdb for [0,0], [1,1], [2,2] etc. + +def test_spread_bits(): + from yt.utilities.lib.geometry_utils import spread_bits + li = [(np.uint64(0b111111111111111111111), np.uint64(0b1001001001001001001001001001001001001001001001001001001001001))] + for i,ans in li: + out = spread_bits(i) + assert_equal(out,ans) + +def test_compact_bits(): + from yt.utilities.lib.geometry_utils import compact_bits + li = [(np.uint64(0b111111111111111111111), np.uint64(0b1001001001001001001001001001001001001001001001001001001001001))] + for ans,i in li: + out = compact_bits(i) + assert_equal(out,ans) + +def test_spread_and_compact_bits(): + from yt.utilities.lib.geometry_utils import spread_bits,compact_bits + li = [np.uint64(0b111111111111111111111)] + for ans in li: + mi = spread_bits(ans) + out = compact_bits(mi) + assert_equal(out,ans) + +def test_lsz(): + from yt.utilities.lib.geometry_utils import lsz + li = [(np.uint64(0b1001001001001001001001001001001001001001001001001001001001001) ,3*21, 3, 0), + (np.uint64(0b1001001001001001001001001001001001001001001001001001001001000) , 3*0, 3, 0), + (np.uint64(0b1001001001001001001001001001001001001001001001001001001000001) , 3*1, 3, 0), + (np.uint64(0b1001001001001001001001001001001001001001001001001001000001001) , 3*2, 3, 0), + (np.uint64(0b10010010010010010010010010010010010010010010010010010010010010) , 3*0, 3, 0), + (np.uint64(0b100100100100100100100100100100100100100100100100100100100100100), 3*0, 3, 0), + (np.uint64(0b100), 0, 1, 0), + (np.uint64(0b100), 1, 1, 1), + (np.uint64(0b100), 3, 1, 2), + (np.uint64(0b100), 3, 1, 3)] + for i,ans,stride,start in li: + out = lsz(i,stride=stride,start=start) + assert_equal(out,ans) + +def test_lsb(): + from yt.utilities.lib.geometry_utils import lsb + li = [(np.uint64(0b1001001001001001001001001001001001001001001001001001001001001) , 3*0), + (np.uint64(0b1001001001001001001001001001001001001001001001001001001001000) , 3*1), + (np.uint64(0b1001001001001001001001001001001001001001001001001001001000000) , 3*2), + (np.uint64(0b1001001001001001001001001001001001001001001001001001000000000) , 3*3), + (np.uint64(0b10010010010010010010010010010010010010010010010010010010010010) ,3*21), + (np.uint64(0b100100100100100100100100100100100100100100100100100100100100100),3*21)] + for i,ans in li: + out = lsb(i,stride=3) + assert_equal(out,ans) + +def test_bitwise_addition(): + from yt.utilities.lib.geometry_utils import bitwise_addition + # TODO: Handle negative & periodic boundaries + lz = [(0,1), +# (0,-1), + (1,1), + (1,2), + (1,4), + (1,-1), + (2,1), + (2,2), + (2,-1), + (2,-2), + (3,1), + (3,5), + (3,-1)] + for i,a in lz: + i = np.uint64(i) + a = np.int64(a) + out = bitwise_addition(i,a,stride=1,start=0) + assert_equal(out,i+a) + +#def test_add_to_morton_coord(): +# from yt.utilities.lib.geometry_utils import add_to_morton_coord + + +def test_get_morton_indices(): + from yt.utilities.lib.geometry_utils import get_morton_indices,get_morton_indices_unravel + INDEX_MAX_64 = np.uint64(2097151) + li = np.arange(6,dtype=np.uint64).reshape((2,3)) + mi_ans = np.array([10,229],dtype=np.uint64) + mi_out = get_morton_indices(li) + mi_out2 = get_morton_indices_unravel(li[:,0],li[:,1],li[:,2]) + assert_array_equal(mi_out,mi_ans) + assert_array_equal(mi_out2,mi_ans) + li[0,:] = INDEX_MAX_64*np.ones(3,dtype=np.uint64) + assert_raises(ValueError,get_morton_indices,li) + assert_raises(ValueError,get_morton_indices_unravel,li[:,0],li[:,1],li[:,2]) + + +def test_get_morton_points(): + from yt.utilities.lib.geometry_utils import get_morton_points + mi = np.array([10,229],dtype=np.uint64) + li_ans = np.arange(6,dtype=np.uint64).reshape((2,3)) + li_out = get_morton_points(mi) + assert_array_equal(li_out,li_ans) + + +def test_compare_morton(): + # TODO: Add error messages to assertions + from yt.utilities.lib.geometry_utils import compare_morton + # Diagonal + p = np.array([0.0,0.0,0.0],dtype=np.float64) + q = np.array([1.0,1.0,1.0],dtype=np.float64) + assert_equal(compare_morton(p,q),1) + assert_equal(compare_morton(q,p),0) + assert_equal(compare_morton(p,p),0) + # 1-1 vs 0-1 + p = np.array([1.0,1.0,0.0],dtype=np.float64) + q = np.array([1.0,1.0,1.0],dtype=np.float64) + assert_equal(compare_morton(p,q),1) + assert_equal(compare_morton(q,p),0) + assert_equal(compare_morton(p,p),0) + # x advance, y decrease + p = np.array([0.0,1.0,0.0],dtype=np.float64) + q = np.array([1.0,0.0,0.0],dtype=np.float64) + assert_equal(compare_morton(p,q),1) + assert_equal(compare_morton(q,p),0) + assert_equal(compare_morton(p,p),0) + # x&y advance, z decrease + p = np.array([0.0,0.0,1.0],dtype=np.float64) + q = np.array([1.0,1.0,0.0],dtype=np.float64) + assert_equal(compare_morton(p,q),1) + assert_equal(compare_morton(q,p),0) + assert_equal(compare_morton(p,p),0) + + +def test_get_morton_neighbors_coarse(): + from yt.utilities.lib.geometry_utils import get_morton_neighbors_coarse + imax = 5 + ngz = 1 + tests = {(7,1):np.array([35, 49,56,48, 33,40,32, 42,34, 3, 17,24,16, + 1,8,0, 10,2, 21, 28,20, 5, 12,4, 14, 6], dtype='uint64'), + (7,0):np.array([35, 49,56,48, 33,40,32, 42,34, 3, 17,24,16, + 1,8,0, 10,2, 21, 28,20, 5, 12,4, 14, 6], dtype='uint64'), + (0,1):np.array([4, 6,7,70, 132,133,196, 5,68, 256, 258,259,322, + 384,385,448, 257,320, 2, 3,66, 128, 129,192, 1, 64], dtype='uint64'), + (0,0):np.array([4, 6,7, 5, 2, 3, 1], dtype='uint64'), + (448,1):np.array([192, 64,0,9, 82,18,27, 128,137, 228, 100,36,45, + 118,54,63, 164,173, 320, 256,265, 338, 274,283, 384, 393], dtype='uint64'), + (448,0):np.array([228, 118,63, 173, 338, 283, 393], dtype='uint64')} + for (mi1, periodic), ans in tests.items(): + n1 = get_morton_neighbors_coarse(mi1, imax, periodic, ngz) + assert_equal(np.sort(n1),np.sort(ans)) + + +def test_get_morton_neighbors_refined(): + from yt.utilities.lib.geometry_utils import get_morton_neighbors_refined + imax1 = 5 + imax2 = 5 + ngz = 1 + tests = {( 7, 7,1):(np.array([ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,7,7, 7,7, 7, 7, 7, 7, 7,7, 7, 7], dtype='uint64'), + np.array([ 35, 49,56,48, 33,40,32, 42,34, 3, 17,24,16, 1,8,0, 10,2, 21, 28,20, 5, 12,4, 14, 6], dtype='uint64')), + ( 7, 7,0):(np.array([ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,7,7, 7,7, 7, 7, 7, 7, 7,7, 7, 7], dtype='uint64'), + np.array([ 35, 49,56,48, 33,40,32, 42,34, 3, 17,24,16, 1,8,0, 10,2, 21, 28,20, 5, 12,4, 14, 6], dtype='uint64')), + ( 0, 0,1):(np.array([0, 0,0,64, 128,128,192, 0,64, 256, 256,256,320, 384,384,448, 256,320, 0, 0,64, 128, 128,192, 0, 64], dtype='uint64'), + np.array([4, 6,7,70, 132,133,196, 5,68, 256, 258,259,322, 384,385,448, 257,320, 2, 3,66, 128, 129,192, 1, 64], dtype='uint64')), + ( 0, 0,0):(np.array([0, 0,0, 0, 0, 0, 0], dtype='uint64'), + np.array([4, 6,7, 5, 2, 3, 1], dtype='uint64')), + (448,448,1):(np.array([192, 64,0,64, 192,128,192, 128,192, 448, 320,256,320, 448,384,448, 384,448, 320, 256,320, 448, 384,448, 384, 448], dtype='uint64'), + np.array([192, 64,0, 9, 82, 18, 27, 128,137, 228, 100, 36, 45, 118, 54, 63, 164,173, 320, 256,265, 338, 274,283, 384, 393], dtype='uint64')), + (448,448,0):(np.array([448, 448,448, 448, 448, 448, 448], dtype='uint64'), + np.array([228, 118, 63, 173, 338, 283, 393], dtype='uint64'))} + for (mi1, mi2, periodic), (ans1, ans2) in tests.items(): + n1, n2 = get_morton_neighbors_refined(mi1, mi2, imax1, imax2, periodic, ngz) + assert_equal(np.sort(n1),np.sort(ans1)) + assert_equal(np.sort(n2),np.sort(ans2)) + + +def test_morton_neighbor(): + from yt.utilities.lib.geometry_utils import \ + morton_neighbor, \ + get_morton_indices + order = 20 + imax = np.uint64(1 << order) + p = np.array([[imax/2,imax/2,imax/2], + [imax/2,imax/2,0 ], + [imax/2,imax/2,imax ]],dtype=np.uint64) + p_ans = np.array([[imax/2,imax/2,imax/2+1], + [imax/2,imax/2,imax/2-1], + [imax/2,imax/2,imax-1 ], + [imax/2,imax/2,1 ], + [imax/2,imax/2+1,imax/2+1], + [imax/2-1,imax/2-1,imax/2], + [imax/2-1,imax/2,imax/2+1], + [imax/2,imax/2-1,imax-1 ], + [imax/2,imax/2+1,1 ]],dtype=np.uint64) + mi_ans = get_morton_indices(p_ans) + assert_equal(morton_neighbor(p[0,:],[2],[+1],imax),mi_ans[0]) + assert_equal(morton_neighbor(p[0,:],[2],[-1],imax),mi_ans[1]) + assert_equal(morton_neighbor(p[1,:],[2],[-1],imax,periodic=False),-1) + assert_equal(morton_neighbor(p[2,:],[2],[+1],imax,periodic=False),-1) + assert_equal(morton_neighbor(p[1,:],[2],[-1],imax,periodic=True ),mi_ans[2]) + assert_equal(morton_neighbor(p[2,:],[2],[+1],imax,periodic=True ),mi_ans[3]) + assert_equal(morton_neighbor(p[0,:],[1,2],[+1,+1],imax),mi_ans[4]) + assert_equal(morton_neighbor(p[0,:],[0,1],[-1,-1],imax),mi_ans[5]) + assert_equal(morton_neighbor(p[0,:],[0,2],[-1,+1],imax),mi_ans[6]) + assert_equal(morton_neighbor(p[1,:],[1,2],[-1,-1],imax,periodic=False),-1) + assert_equal(morton_neighbor(p[2,:],[1,2],[+1,+1],imax,periodic=False),-1) + assert_equal(morton_neighbor(p[1,:],[1,2],[-1,-1],imax,periodic=True ),mi_ans[7]) + assert_equal(morton_neighbor(p[2,:],[1,2],[+1,+1],imax,periodic=True ),mi_ans[8]) + + +def test_get_morton_neighbors(): + from yt.utilities.lib.geometry_utils import get_morton_neighbors, get_morton_indices + order = 20 + imax = 1 << order + p = np.array([[imax/2,imax/2,imax/2], + [imax/2,imax/2,0 ], + [imax/2,imax/2,imax ]],dtype=np.uint64) + pn_non = [ + np.array([ + # x +/- 1 + [imax/2+1,imax/2,imax/2], + [imax/2+1,imax/2+1,imax/2],[imax/2+1,imax/2+1,imax/2+1],[imax/2+1,imax/2+1,imax/2-1], + [imax/2+1,imax/2-1,imax/2],[imax/2+1,imax/2-1,imax/2+1],[imax/2+1,imax/2-1,imax/2-1], + [imax/2+1,imax/2,imax/2+1],[imax/2+1,imax/2,imax/2-1], + [imax/2-1,imax/2,imax/2], + [imax/2-1,imax/2+1,imax/2],[imax/2-1,imax/2+1,imax/2+1],[imax/2-1,imax/2+1,imax/2-1], + [imax/2-1,imax/2-1,imax/2],[imax/2-1,imax/2-1,imax/2+1],[imax/2-1,imax/2-1,imax/2-1], + [imax/2-1,imax/2,imax/2+1],[imax/2-1,imax/2,imax/2-1], + # y +/- 1 + [imax/2,imax/2+1,imax/2], + [imax/2,imax/2+1,imax/2+1],[imax/2,imax/2+1,imax/2-1], + [imax/2,imax/2-1,imax/2], + [imax/2,imax/2-1,imax/2+1],[imax/2,imax/2-1,imax/2-1], + # x +/- 1 + [imax/2,imax/2,imax/2+1], + [imax/2,imax/2,imax/2-1]],dtype=np.uint64), + np.array([ + # x +/- 1 + [imax/2+1,imax/2,0], + [imax/2+1,imax/2+1,0],[imax/2+1,imax/2+1,1], + [imax/2+1,imax/2-1,0],[imax/2+1,imax/2-1,1], + [imax/2+1,imax/2,1], + [imax/2-1,imax/2,0], + [imax/2-1,imax/2+1,0],[imax/2-1,imax/2+1,1], + [imax/2-1,imax/2-1,0],[imax/2-1,imax/2-1,1], + [imax/2-1,imax/2,1], + # y +/- 1 + [imax/2,imax/2+1,0], + [imax/2,imax/2+1,1], + [imax/2,imax/2-1,0], + [imax/2,imax/2-1,1], + # z +/- 1 + [imax/2,imax/2,0+1]],dtype=np.uint64), + np.array([ + # x +/- 1 + [imax/2+1,imax/2,imax], + [imax/2+1,imax/2+1,imax],[imax/2+1,imax/2+1,imax-1], + [imax/2+1,imax/2-1,imax],[imax/2+1,imax/2-1,imax-1], + [imax/2+1,imax/2,imax-1], + [imax/2-1,imax/2,imax], + [imax/2-1,imax/2+1,imax],[imax/2-1,imax/2+1,imax-1], + [imax/2-1,imax/2-1,imax],[imax/2-1,imax/2-1,imax-1], + [imax/2-1,imax/2,imax-1], + # y +/- 1 + [imax/2,imax/2+1,imax], + [imax/2,imax/2+1,imax-1], + [imax/2,imax/2-1,imax], + [imax/2,imax/2-1,imax-1], + # z +/- 1 + [imax/2,imax/2,imax-1]],dtype=np.uint64)] + pn_per = [ + np.array([ + # x +/- 1 + [imax/2+1,imax/2,imax/2], + [imax/2+1,imax/2+1,imax/2],[imax/2+1,imax/2+1,imax/2+1],[imax/2+1,imax/2+1,imax/2-1], + [imax/2+1,imax/2-1,imax/2],[imax/2+1,imax/2-1,imax/2+1],[imax/2+1,imax/2-1,imax/2-1], + [imax/2+1,imax/2,imax/2+1],[imax/2+1,imax/2,imax/2-1], + [imax/2-1,imax/2,imax/2], + [imax/2-1,imax/2+1,imax/2],[imax/2-1,imax/2+1,imax/2+1],[imax/2-1,imax/2+1,imax/2-1], + [imax/2-1,imax/2-1,imax/2],[imax/2-1,imax/2-1,imax/2+1],[imax/2-1,imax/2-1,imax/2-1], + [imax/2-1,imax/2,imax/2+1],[imax/2-1,imax/2,imax/2-1], + # y +/- 1 + [imax/2,imax/2+1,imax/2], + [imax/2,imax/2+1,imax/2+1],[imax/2,imax/2+1,imax/2-1], + [imax/2,imax/2-1,imax/2], + [imax/2,imax/2-1,imax/2+1],[imax/2,imax/2-1,imax/2-1], + # z +/- 1 + [imax/2,imax/2,imax/2+1], + [imax/2,imax/2,imax/2-1]],dtype=np.uint64), + np.array([ + # x +/- 1 + [imax/2+1,imax/2,0], + [imax/2+1,imax/2+1,0],[imax/2+1,imax/2+1,1],[imax/2+1,imax/2+1,imax-1], + [imax/2+1,imax/2-1,0],[imax/2+1,imax/2-1,1],[imax/2+1,imax/2-1,imax-1], + [imax/2+1,imax/2,1],[imax/2+1,imax/2,imax-1], + [imax/2-1,imax/2,0], + [imax/2-1,imax/2+1,0],[imax/2-1,imax/2+1,1],[imax/2-1,imax/2+1,imax-1], + [imax/2-1,imax/2-1,0],[imax/2-1,imax/2-1,1],[imax/2-1,imax/2-1,imax-1], + [imax/2-1,imax/2,1],[imax/2-1,imax/2,imax-1], + # y +/- 1 + [imax/2,imax/2+1,0], + [imax/2,imax/2+1,1],[imax/2,imax/2+1,imax-1], + [imax/2,imax/2-1,0], + [imax/2,imax/2-1,1],[imax/2,imax/2-1,imax-1], + # z +/- 1 + [imax/2,imax/2,0+1], + [imax/2,imax/2,imax-1]],dtype=np.uint64), + np.array([ + # x +/- 1 + [imax/2+1,imax/2,imax], + [imax/2+1,imax/2+1,imax],[imax/2+1,imax/2+1,1],[imax/2+1,imax/2+1,imax-1], + [imax/2+1,imax/2-1,imax],[imax/2+1,imax/2-1,1],[imax/2+1,imax/2-1,imax-1], + [imax/2+1,imax/2,1],[imax/2+1,imax/2,imax-1], + [imax/2-1,imax/2,imax], + [imax/2-1,imax/2+1,imax],[imax/2-1,imax/2+1,1],[imax/2-1,imax/2+1,imax-1], + [imax/2-1,imax/2-1,imax],[imax/2-1,imax/2-1,1],[imax/2-1,imax/2-1,imax-1], + [imax/2-1,imax/2,1],[imax/2-1,imax/2,imax-1], + # y +/- 1 + [imax/2,imax/2+1,imax], + [imax/2,imax/2+1,1],[imax/2,imax/2+1,imax-1], + [imax/2,imax/2-1,imax], + [imax/2,imax/2-1,1],[imax/2,imax/2-1,imax-1], + # z +/- 1 + [imax/2,imax/2,1], + [imax/2,imax/2,imax-1]],dtype=np.uint64)] + mi = get_morton_indices(p) + N = mi.shape[0] + # Non-periodic + for i in range(N): + out = get_morton_neighbors(np.array([mi[i]],dtype=np.uint64),order=order,periodic=False) + ans = get_morton_indices(np.vstack([p[i,:],pn_non[i]])) + assert_array_equal(np.unique(out),np.unique(ans),err_msg="Non-periodic: {}".format(i)) + # Periodic + for i in range(N): + out = get_morton_neighbors(np.array([mi[i]],dtype=np.uint64),order=order,periodic=True) + ans = get_morton_indices(np.vstack([p[i,:],pn_per[i]])) + assert_array_equal(np.unique(out),np.unique(ans),err_msg="Periodic: {}".format(i)) + +def test_dist(): + from yt.utilities.lib.geometry_utils import dist + p = np.array([0.0,0.0,0.0],dtype=np.float64) + q = np.array([0.0,0.0,0.0],dtype=np.float64) + assert_equal(dist(p,q),0.0) + p = np.array([0.0,0.0,0.0],dtype=np.float64) + q = np.array([1.0,0.0,0.0],dtype=np.float64) + assert_equal(dist(p,q),1.0) + p = np.array([0.0,0.0,0.0],dtype=np.float64) + q = np.array([1.0,1.0,0.0],dtype=np.float64) + assert_equal(dist(p,q),np.sqrt(2.0)) + p = np.array([0.0,0.0,0.0],dtype=np.float64) + q = np.array([1.0,1.0,1.0],dtype=np.float64) + assert_equal(dist(p,q),np.sqrt(3.0)) + + +def test_knn_direct(seed=1): + from yt.utilities.lib.geometry_utils import knn_direct + np.random.seed(seed) + k = 64 + N = 1e5 + idx = np.arange(N,dtype=np.uint64) + rad = np.arange(N,dtype=np.float64) + pos = np.vstack(3*[rad**2/3.0]).T + sort_shf = np.arange(N,dtype=np.uint64) + for i in range(20): + np.random.shuffle(sort_shf) + sort_ans = np.argsort(sort_shf)[:k] + sort_out = knn_direct(pos[sort_shf,:], k, sort_ans[0], idx) + assert_array_equal(sort_out,sort_ans) + +# TODO: test of quadtree (.pxd) + def test_obtain_position_vector(): ds = fake_random_ds(64, nprocs=8, fields=_fields, negative = [False, True, True, True]) diff --git a/yt/utilities/lib/tests/test_nn.py b/yt/utilities/lib/tests/test_nn.py new file mode 100644 index 00000000000..5c0b0833e0a --- /dev/null +++ b/yt/utilities/lib/tests/test_nn.py @@ -0,0 +1,30 @@ +import numpy as np + +from yt.utilities.lib.bounded_priority_queue import \ + validate, \ + validate_pid, \ + validate_nblist + +from yt.testing import assert_array_equal + +# These test functions use utility functions in +# yt.utilities.lib.bounded_priority_queue +# to test functions which are not exposed at a python level +def test_bounded_priority_queue(): + dists = validate() + answers = np.array([0.1, 0.001, -1., -1., -1.]) + assert_array_equal(answers, dists) + +def test_bounded_priority_queue_pid(): + dists, pids = validate_pid() + answers = np.array([0.1, 0.001, -1., -1., -1.]) + answers_pids = np.array([ 1, 10, -1, -1, -1]) + assert_array_equal(answers, dists) + assert_array_equal(answers_pids, pids) + +def test_neighbor_list(): + data, pids = validate_nblist() + answers_data = np.array([1.0, 1.0, 1.0, 1.0]) + answers_pids = np.array([0, 1, 2, 3]) + assert_array_equal(answers_data, data) + assert_array_equal(answers_pids, pids) diff --git a/yt/utilities/lib/volume_container.pxd b/yt/utilities/lib/volume_container.pxd index ada65aec97e..3905518f473 100644 --- a/yt/utilities/lib/volume_container.pxd +++ b/yt/utilities/lib/volume_container.pxd @@ -6,13 +6,6 @@ A volume container """ -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- cimport numpy as np diff --git a/yt/utilities/lib/write_array.pyx b/yt/utilities/lib/write_array.pyx index 98fe42d1e3f..291bbbbbb72 100644 --- a/yt/utilities/lib/write_array.pyx +++ b/yt/utilities/lib/write_array.pyx @@ -5,13 +5,6 @@ Faster, cythonized file IO """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np cimport numpy as np diff --git a/yt/utilities/linear_interpolators.py b/yt/utilities/linear_interpolators.py index 7adca0b086b..edc54faeead 100644 --- a/yt/utilities/linear_interpolators.py +++ b/yt/utilities/linear_interpolators.py @@ -1,19 +1,3 @@ -""" -A collection of helper functions, most generally for things -that SciPy doesn't have that I expected it to - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py new file mode 100644 index 00000000000..a5d0537945d --- /dev/null +++ b/yt/utilities/load_sample.py @@ -0,0 +1,143 @@ +""" +sample data manager for yt + +This utility will check to see if sample data exists on disc. +If not, it will download it. + +""" +import os +import yt.utilities.sample_data as sd +from yt.funcs import mylog +from yt.convenience import load +from yt.utilities.on_demand_imports import _pooch as pch + +def load_sample(name=None, specific_file=None): + """ + Load sample data with yt. Simple wrapper around yt.load to include fetching + data with pooch. + + Parameters + ---------- + name : str or None + The name of the sample data to load. This is generally the name of the + folder of the dataset. For IsolatedGalaxy, the name would be + `IsolatedGalaxy`. If `None` is supplied, the return value + will be a list of all known datasets (by name). + + specific_file : str, optional + optional argument -- the name of the file to load that is located + within sample dataset of `name`. For the dataset `enzo_cosmology_plus`, + which has a number of timesteps available, one may wish to choose + DD0003. The file specifically would be + `enzo_cosmology_plus/DD0003/DD0003`, and the argument passed to this + variable would be `DD0003/DD0003` + + """ + fido = sd.Fido() + if name is None: + keys = [] + for key in fido._registry: + for ext in sd._extensions_to_strip: + if key.endswith(ext): key = key[:-len(ext)] + keys.append(key) + return keys + + base_path = fido.fido.path + fileext, name, extension = _validate_sampledata_name(name) + + if extension == "h5": + fname = fetch_noncompressed_file(fileext, fido) + else: + # we are going to assume most files that exist on the hub are + # compressed in .tar folders. Some may not. + fname = fetch_compressed_file(fileext, fido) + + # The `folder_path` variable is used here to notify the user where the + # files have been unpacked to. However, we can't assume this is reliable + # because in some cases the common path will overlap with the `load_name` + # variable of the file. + folder_path = os.path.commonprefix(fname) + mylog.info("Files located at %s", folder_path) + + # Location of the file to load automatically, registered in the Fido class + info = fido[fileext] + file_lookup = info['load_name'] + optional_args = info['load_kwargs'] + + if specific_file is None: + # right now work on loading only untarred files. build out h5 later + mylog.info("Default to loading %s for %s dataset", file_lookup, name) + loaded_file = os.path.join(base_path, "%s.untar" %fileext, + name, file_lookup) + else: + mylog.info("Loading %s for %s dataset", specific_file, name) + loaded_file = os.path.join(base_path, "%s.untar" %fileext, name, + specific_file) + + return load(loaded_file, **optional_args) + +def _validate_sampledata_name(name): + """ + format name of sample data passed to function, accepts a named string + argument and parses it to determine the sample data name, what type of + extension it has, or other relevant information. + + returns + ------- + fileext : str + The name of the sample data, with the file extension + example: "IsolatedGalaxy.tar.gz" + basename : str + The name of the sample data, without the file extension + example: "IsolatedGalaxy" + extension : str + name of extension of remote sample data + example: "h5" or "tar" + """ + + if not isinstance(name, str): + mylog.error("The argument {} passed to ".format(name) + \ + "load_sample() is not a string.") + + # now get the extension if it exists + base, ext = os.path.splitext(name) + if ext == '': + # Right now we are assuming that any name passed without an explicit + # extension is packed in a tarball. This logic can be modified later to + # be more flexible. + fileext = "%s.tar.gz" %name + basename = name + extension = "tar" + elif ext == ".gz": + fileext = name + basename = os.path.splitext(base)[0] + extension = "tar" + elif ext in [".h5", ".hdf5"]: + fileext = name + basename = base + extension = "h5" + else: + mylog.info( + """extension of %s for dataset %s is unexpected. the `load_data` + function may not work as expected""", + ext, name ) + extension = ext + fileext = name + basename = base + return fileext, basename, extension + + +def fetch_compressed_file(name, fido): + """ + Load a large compressed file from the data registry + """ + fname = fido.fido.fetch(name, processor=pch.pooch.Untar()) + return fname + +def fetch_noncompressed_file(name, fido): + """ + Load an uncompressed file from the data registry + """ + fname = fido.fido.fetch(name) + return fname + diff --git a/yt/utilities/lodgeit.py b/yt/utilities/lodgeit.py index 18fabc494a2..682dd2b7250 100644 --- a/yt/utilities/lodgeit.py +++ b/yt/utilities/lodgeit.py @@ -110,10 +110,10 @@ def make_utf8(text, encoding): def get_xmlrpc_service(): """Create the XMLRPC server proxy and cache it.""" global _xmlrpc_service - from yt.extern.six.moves import xmlrpc_client + import xmlrpc.client if _xmlrpc_service is None: try: - _xmlrpc_service = xmlrpc_client.ServerProxy(SERVICE_URL + 'xmlrpc/', + _xmlrpc_service = xmlrpc.client.ServerProxy(SERVICE_URL + 'xmlrpc/', allow_none=True) except Exception as err: fail('Could not connect to Pastebin: %s' % err, -1) diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index 32228299fff..ad2aaf3395f 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -1,19 +1,3 @@ -""" -Logging facility for yt -Will initialize everything, and associate one with each module - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import logging import sys from yt.config import ytcfg diff --git a/yt/utilities/lru_cache.py b/yt/utilities/lru_cache.py index 3076cce1b47..a2bb1e599e6 100644 --- a/yt/utilities/lru_cache.py +++ b/yt/utilities/lru_cache.py @@ -1,21 +1,3 @@ -""" - -lru_cache compatible with py2.6->py3.2 copied directly from -http://code.activestate.com/ -recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/ - -adapted from sympy by Nathan Goldbaum - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import sys from collections import namedtuple diff --git a/yt/utilities/math_utils.py b/yt/utilities/math_utils.py index a40f49e9d2a..8aad1760131 100644 --- a/yt/utilities/math_utils.py +++ b/yt/utilities/math_utils.py @@ -1,18 +1,3 @@ -""" -Commonly used mathematical functions. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import math from yt.units.yt_array import \ @@ -142,6 +127,95 @@ def periodic_dist(a, b, period, periodicity=(True, True, True)): return np.sqrt(r2[0,0]) return np.sqrt(r2) +def periodic_ray(start, end, left=None, right=None): + """ + periodic_ray(start, end, left=None, right=None) + + Break up periodic ray into non-periodic segments. + Accepts start and end points of periodic ray as YTArrays. + Accepts optional left and right edges of periodic volume as YTArrays. + Returns a list of lists of coordinates, where each element of the + top-most list is a 2-list of start coords and end coords of the + non-periodic ray: + + [[[x0start,y0start,z0start], [x0end, y0end, z0end]], + [[x1start,y1start,z1start], [x1end, y1end, z1end]], + ...,] + + Parameters + ---------- + start : array + The starting coordinate of the ray. + end : array + The ending coordinate of the ray. + left : optional, array + The left corner of the periodic domain. If not given, an array + of zeros with same size as the starting coordinate us used. + right : optional, array + The right corner of the periodic domain. If not given, an array + of ones with same size as the starting coordinate us used. + + Examples + -------- + >>> import yt + >>> start = yt.YTArray([0.5, 0.5, 0.5]) + >>> end = yt.YTArray([1.25, 1.25, 1.25]) + >>> periodic_ray(start, end) + [[YTArray([0.5, 0.5, 0.5]) (dimensionless), YTArray([1., 1., 1.]) (dimensionless)], + [YTArray([0., 0., 0.]) (dimensionless), YTArray([0.25, 0.25, 0.25]) (dimensionless)]] + + """ + + if left is None: + left = np.zeros(start.shape) + if right is None: + right = np.ones(start.shape) + dim = right - left + + vector = end - start + wall = np.zeros_like(start) + close = np.zeros(start.shape, dtype=object) + + left_bound = vector < 0 + right_bound = vector > 0 + no_bound = vector == 0.0 + bound = vector != 0.0 + + wall[left_bound] = left[left_bound] + close[left_bound] = np.max + wall[right_bound] = right[right_bound] + close[right_bound] = np.min + wall[no_bound] = np.inf + close[no_bound] = np.min + + segments = [] + this_start = start.copy() + this_end = end.copy() + t = 0.0 + tolerance = 1e-6 + while t < 1.0 - tolerance: + hit_left = (this_start <= left) & (vector < 0) + if (hit_left).any(): + this_start[hit_left] += dim[hit_left] + this_end[hit_left] += dim[hit_left] + hit_right = (this_start >= right) & (vector > 0) + if (hit_right).any(): + this_start[hit_right] -= dim[hit_right] + this_end[hit_right] -= dim[hit_right] + + nearest = vector.unit_array * \ + np.array([close[q]([this_end[q], wall[q]]) \ + for q in range(start.size)]) + dt = ((nearest - this_start) / vector)[bound].min() + now = this_start + vector * dt + close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10 + now[close_enough] = nearest[close_enough] + segments.append([this_start.copy(), now.copy()]) + this_start = now.copy() + t += dt + + return segments + def euclidean_dist(a, b): r"""Find the Euclidean distance between two points. diff --git a/yt/utilities/mesh_code_generation.py b/yt/utilities/mesh_code_generation.py index 07a3b2ec2b1..e51fdc99356 100644 --- a/yt/utilities/mesh_code_generation.py +++ b/yt/utilities/mesh_code_generation.py @@ -1,26 +1,3 @@ -""" -This file contains code for automatically generating the functions and jacobians -used when sampling inside the supported finite element mesh types. The supported -mesh types are defined in yt/utilities/mesh_types.yaml. - -Usage (from the yt/utilities directory): - -python mesh_code_generation.py - -This will generate the necessary functions and write them to -yt/utilities/lib/autogenerated_element_samplers.pyx. - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from sympy import \ symarray, \ diff, \ diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index 337afaa0d28..04a35df2683 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -1,18 +1,3 @@ -""" -Skeleton objects that represent a few fundamental yt data types. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import abc import json @@ -20,13 +5,12 @@ from yt.utilities.on_demand_imports import _h5py as h5 import os from uuid import uuid4 -from yt.extern.six.moves import urllib -from yt.extern.six.moves import cPickle as pickle +import urllib +import pickle from tempfile import TemporaryFile from yt.config import ytcfg from yt.funcs import \ iterable, get_pbar, compare_dicts -from yt.extern.six import add_metaclass, string_types, b from yt.utilities.exceptions import \ YTHubRegisterError from yt.utilities.logger import ytLogger as mylog @@ -47,10 +31,10 @@ def _sanitize_list(flist): temp = [] for item in flist: - if isinstance(item, string_types): - temp.append(b(item)) + if isinstance(item, str): + temp.append(item.encode("latin-1")) elif isinstance(item, tuple) and \ - all(isinstance(i, string_types) for i in item): + all(isinstance(i, str) for i in item): temp.append(tuple(_sanitize_list(list(item)))) else: temp.append(item) @@ -69,7 +53,7 @@ def _serialize_to_h5(g, cdict): elif isinstance(cdict[item], list): g[item] = _sanitize_list(cdict[item]) elif isinstance(cdict[item], tuple) and \ - all(isinstance(i, string_types) for i in cdict[item]): + all(isinstance(i, str) for i in cdict[item]): g[item] = tuple(_sanitize_list(cdict[item])) else: g[item] = cdict[item] @@ -116,9 +100,7 @@ class ContainerClass(object): pass -@add_metaclass(abc.ABCMeta) -class MinimalRepresentation(object): - +class MinimalRepresentation(metaclass = abc.ABCMeta): def _update_attrs(self, obj, attr_list): for attr in attr_list: setattr(self, attr, getattr(obj, attr, None)) diff --git a/yt/utilities/on_demand_imports.py b/yt/utilities/on_demand_imports.py index 3c9f2c73ddf..50f6afe9d11 100644 --- a/yt/utilities/on_demand_imports.py +++ b/yt/utilities/on_demand_imports.py @@ -1,15 +1,3 @@ -""" -A set of convenient on-demand imports -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from pkg_resources import parse_version import sys @@ -202,6 +190,22 @@ def __version__(self): _cartopy = cartopy_imports() +class pooch_imports(object): + _name = "pooch" + + _pooch = None + @property + def pooch(self): + if self._pooch is None: + try: + import pooch as pooch + except ImportError: + pooch = NotAModule(self._name) + self._pooch = pooch + return self._pooch + +_pooch = pooch_imports() + class scipy_imports(object): _name = "scipy" _integrate = None @@ -281,6 +285,17 @@ def spatial(self): self._spatial = spatial return self._spatial + _ndimage = None + @property + def ndimage(self): + if self._ndimage is None: + try: + import scipy.ndimage as ndimage + except ImportError: + ndimage = NotAModule(self._name) + self._ndimage = ndimage + return self._ndimage + _scipy = scipy_imports() class h5py_imports(object): @@ -488,6 +503,30 @@ def FullLoader(self): _yaml = yaml_imports() +class NotMiniball(NotAModule): + def __init__(self, pkg_name): + super(NotMiniball, self).__init__(pkg_name) + str = ("This functionality requires the %s package to be installed. " + "Installation instructions can be found at " + "https://github.com/weddige/miniball or alternatively you can " + "install via `pip install MiniballCpp`.") + self.error = ImportError(str % self.pkg_name) + +class miniball_imports(object): + _name = 'miniball' + _Miniball = None + + @property + def Miniball(self): + if self._Miniball is None: + try: + from miniball import Miniball + except ImportError: + Miniball = NotMiniball(self._name) + self._Miniball = Miniball + return self._Miniball + +_miniball = miniball_imports() class f90nml_imports(object): _name = "f90nml" diff --git a/yt/utilities/operator_registry.py b/yt/utilities/operator_registry.py index 02f9696d815..c262a7eb835 100644 --- a/yt/utilities/operator_registry.py +++ b/yt/utilities/operator_registry.py @@ -1,24 +1,8 @@ -""" -Operation registry class - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import copy -from yt.extern.six import string_types class OperatorRegistry(dict): def find(self, op, *args, **kwargs): - if isinstance(op, string_types): + if isinstance(op, str): # Lookup, assuming string or hashable object op = copy.deepcopy(self[op]) op.args = args diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py index 9a5bedb3199..cffc30b199c 100644 --- a/yt/utilities/orientation.py +++ b/yt/utilities/orientation.py @@ -1,19 +1,3 @@ -""" -A class that manages the coordinate system for orientable data -containers and cameras. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog diff --git a/yt/utilities/parallel_tools/__init__.py b/yt/utilities/parallel_tools/__init__.py index 22fb07ed3e8..ad451d69e99 100644 --- a/yt/utilities/parallel_tools/__init__.py +++ b/yt/utilities/parallel_tools/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/utilities/parallel_tools/controller_system.py b/yt/utilities/parallel_tools/controller_system.py index b0538adaaff..115b3f93ccd 100644 --- a/yt/utilities/parallel_tools/controller_system.py +++ b/yt/utilities/parallel_tools/controller_system.py @@ -1,18 +1,3 @@ -""" -A queueing system based on MPI - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .parallel_analysis_interface import \ ProcessorPool from abc import abstractmethod diff --git a/yt/utilities/parallel_tools/io_runner.py b/yt/utilities/parallel_tools/io_runner.py index f5e0aa3dfb0..7debee4895f 100644 --- a/yt/utilities/parallel_tools/io_runner.py +++ b/yt/utilities/parallel_tools/io_runner.py @@ -1,18 +1,3 @@ -""" -A simple IO staging mechanism - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.utilities.logger import ytLogger as mylog from .parallel_analysis_interface import \ @@ -55,9 +40,9 @@ def __init__(self, ds, wg, pool): def initialize_data(self): ds = self.ds fields = [f for f in ds.field_list - if not ds.field_info[f].particle_type] + if not ds.field_info[f].sampling_type == "particle"] pfields = [f for f in ds.field_list - if ds.field_info[f].particle_type] + if ds.field_info[f].sampling_type == "particle"] # Preload is only defined for Enzo ... if ds.index.io._dataset_type == "enzo_packed_3d": self.queue = ds.index.io.queue @@ -77,7 +62,7 @@ def initialize_data(self): def _read(self, g, f): fi = self.ds.field_info[f] - if fi.particle_type and g.NumberOfParticles == 0: + if fi.sampling_type == "particle" and g.NumberOfParticles == 0: # because this gets upcast to float return np.array([],dtype='float64') try: @@ -128,7 +113,7 @@ def _read_data_set(self, grid, field): dest = self.proc_map[grid.id] msg = dict(grid_id = grid.id, field = field, op="read") mylog.debug("Requesting %s for %s from %s", field, grid, dest) - if self.ds.field_info[field].particle_type: + if self.ds.field_info[field].sampling_type == "particle": data = np.empty(grid.NumberOfParticles, 'float64') else: data = np.empty(grid.ActiveDimensions, 'float64') diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index f21ad33eb6e..37921389994 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -1,20 +1,4 @@ -""" -Parallel data mapping techniques for yt - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six.moves import cStringIO +from io import StringIO import itertools import logging import numpy as np @@ -471,7 +455,7 @@ def parallel_objects(objects, njobs = 0, storage = None, barrier = True, ... sto.result = sp.quantities["AngularMomentumVector"]() ... >>> for sphere_id, L in sorted(storage.items()): - ... print centers[sphere_id], L + ... print(centers[sphere_id], L) ... """ @@ -571,7 +555,7 @@ def parallel_ring(objects, generator_func, mutable = False): ... >>> obj = range(8) >>> for obj, arr in parallel_ring(obj, gfunc): - ... print arr['x'].sum(), arr['y'].sum(), arr['z'].sum() + ... print(arr['x'].sum(), arr['y'].sum(), arr['z'].sum()) ... """ @@ -809,7 +793,7 @@ def mpi_bcast(self, data, root = 0): registry = UnitRegistry(lut=info[3], add_default_symbols=False) if info[-1] == "ImageArray": data = ImageArray(np.empty(info[0], dtype=info[1]), - input_units=info[2], + units=info[2], registry=registry) else: data = YTArray(np.empty(info[0], dtype=info[1]), @@ -924,7 +908,7 @@ def write_on_root(self, fn): if self.comm.rank == 0: return open(fn, "w") else: - return cStringIO() + return StringIO() def get_filename(self, prefix, rank=None): if not self._distributed: return prefix @@ -976,14 +960,14 @@ def merge_quadtree_buffers(self, qt, merge_style): while mask < size: if (mask & rank) != 0: target = (rank & ~mask) % size - #print "SENDING FROM %02i to %02i" % (rank, target) + #print("SENDING FROM %02i to %02i" % (rank, target)) buf = qt.tobuffer() self.send_quadtree(target, buf, tgd, args) #qt = self.recv_quadtree(target, tgd, args) else: target = (rank | mask) if target < size: - #print "RECEIVING FROM %02i on %02i" % (target, rank) + #print("RECEIVING FROM %02i on %02i" % (target, rank)) buf = self.recv_quadtree(target, tgd, args) qto = QuadTree(tgd, args[2], qt.bounds) qto.frombuffer(buf[0], buf[1], buf[2], merge_style) @@ -1038,7 +1022,7 @@ def recv_array(self, source, tag = 0): if len(metadata) == 5: registry = UnitRegistry(lut=metadata[3], add_default_symbols=False) if metadata[-1] == "ImageArray": - arr = ImageArray(arr, input_units=metadata[2], + arr = ImageArray(arr, units=metadata[2], registry=registry) else: arr = YTArray(arr, metadata[2], registry=registry) @@ -1061,7 +1045,7 @@ def alltoallv_array(self, send, total_size, offsets, sizes): # We assume send.units is consistent with the units # on the receiving end. if isinstance(send, ImageArray): - recv = ImageArray(recv, input_units=send.units) + recv = ImageArray(recv, units=send.units) else: recv = YTArray(recv, send.units) recv[offset:offset+send.size] = send[:] diff --git a/yt/utilities/parallel_tools/task_queue.py b/yt/utilities/parallel_tools/task_queue.py index 85203692008..5e014972368 100644 --- a/yt/utilities/parallel_tools/task_queue.py +++ b/yt/utilities/parallel_tools/task_queue.py @@ -1,18 +1,3 @@ -""" -Task queue in yt - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.funcs import mylog diff --git a/yt/utilities/parameter_file_storage.py b/yt/utilities/parameter_file_storage.py index e9b149c085b..efa258083ce 100644 --- a/yt/utilities/parameter_file_storage.py +++ b/yt/utilities/parameter_file_storage.py @@ -1,18 +1,3 @@ -""" -A simple CSV database for grabbing and storing datasets - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import csv import os.path from itertools import islice diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index 870c1e8ed9d..9cf0c13da49 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -3,7 +3,6 @@ CICSample_3 from yt.funcs import get_pbar, issue_deprecation_warning from yt.units.yt_array import uconcatenate -from yt.extern.six import string_types class ParticleGenerator(object): @@ -18,7 +17,7 @@ def __init__(self, ds, num_particles, field_list, ptype="io"): """ self.ds = ds self.num_particles = num_particles - self.field_list = [(ptype, fd) if isinstance(fd, string_types) else fd + self.field_list = [(ptype, fd) if isinstance(fd, str) else fd for fd in field_list] self.field_list.append((ptype, "particle_index")) self.field_units = dict( @@ -120,7 +119,7 @@ def _setup_particles(self, x, y, z, setup_fields=None): self.ParticleGridIndices[1] = self.NumberOfParticles.squeeze() if setup_fields is not None: for key, value in setup_fields.items(): - field = (self.ptype, key) if isinstance(key, string_types) else key + field = (self.ptype, key) if isinstance(key, str) else key if field not in self.default_fields: self.particles[:,self.field_list.index(field)] = value[idxs] diff --git a/yt/utilities/performance_counters.py b/yt/utilities/performance_counters.py index eb3daad64bd..45025f9e268 100644 --- a/yt/utilities/performance_counters.py +++ b/yt/utilities/performance_counters.py @@ -1,18 +1,3 @@ -""" -Minimalist performance counting for yt - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import atexit import time diff --git a/yt/utilities/periodic_table.py b/yt/utilities/periodic_table.py index e3092ee695a..b25a6395cc0 100644 --- a/yt/utilities/periodic_table.py +++ b/yt/utilities/periodic_table.py @@ -1,21 +1,5 @@ -""" -A simple periodic table. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import numbers -from yt.extern.six import string_types _elements = ( (1, 1.0079400000, "Hydrogen", "H"), @@ -164,7 +148,7 @@ def __init__(self): def __getitem__(self, key): if isinstance(key, (np.number, numbers.Number)): d = self.elements_by_number - elif isinstance(key, string_types): + elif isinstance(key, str): if len(key) <= 2: d = self.elements_by_symbol elif len(key) == 3 and key[0] == "U": diff --git a/yt/utilities/physical_ratios.py b/yt/utilities/physical_ratios.py index 51d6618ee2c..ee446c07fd9 100644 --- a/yt/utilities/physical_ratios.py +++ b/yt/utilities/physical_ratios.py @@ -122,6 +122,10 @@ rho_crit_g_cm3_h2 = 1.8784710838431654e-29 primordial_H_mass_fraction = 0.76 +_primordial_mass_fraction = \ + {"H": primordial_H_mass_fraction, + "He": (1 - primordial_H_mass_fraction)} + # Misc. Approximations mass_mean_atomic_cosmology = 1.22 mass_mean_atomic_galactic = 2.3 diff --git a/yt/utilities/png_writer.py b/yt/utilities/png_writer.py index bafa2f2f025..8513a28e35f 100644 --- a/yt/utilities/png_writer.py +++ b/yt/utilities/png_writer.py @@ -1,22 +1,6 @@ -""" -Writing PNGs -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import matplotlib._png as _png -from yt.extern.six import PY2 -if PY2: - from cStringIO import StringIO -else: - from io import BytesIO as StringIO +from io import BytesIO def call_png_write_png(buffer, width, height, fileobj, dpi): _png.write_png(buffer, fileobj, dpi) @@ -30,7 +14,7 @@ def write_png(buffer, filename, dpi=100): def write_png_to_string(buffer, dpi=100, gray=0): width = buffer.shape[1] height = buffer.shape[0] - fileobj = StringIO() + fileobj = BytesIO() call_png_write_png(buffer, width, height, fileobj, dpi) png_str = fileobj.getvalue() fileobj.close() diff --git a/yt/utilities/poster/encode.py b/yt/utilities/poster/encode.py index a038ccfe99f..bcc655eca5c 100644 --- a/yt/utilities/poster/encode.py +++ b/yt/utilities/poster/encode.py @@ -22,7 +22,7 @@ def gen_boundary(): return sha.new(str(bits)).hexdigest() import re, os, mimetypes -from yt.extern.six.moves import urllib +import urllib try: from email.header import Header except ImportError: diff --git a/yt/utilities/poster/streaminghttp.py b/yt/utilities/poster/streaminghttp.py index 595b6f6862b..d8c0538e748 100644 --- a/yt/utilities/poster/streaminghttp.py +++ b/yt/utilities/poster/streaminghttp.py @@ -27,8 +27,8 @@ """ from __future__ import print_function -import yt.extern.six.moves.http_client as http_client -import yt.extern.six.moves.urllib as urllib +import http.client as http_client +import urllib import socket import sys diff --git a/yt/utilities/rpdb.py b/yt/utilities/rpdb.py index 60f488a5a71..169b4620c9f 100644 --- a/yt/utilities/rpdb.py +++ b/yt/utilities/rpdb.py @@ -1,28 +1,12 @@ -""" -Some simple localhost-only remote pdb hacks - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import cmd import pdb import socket import sys -from yt.extern.six.moves import StringIO +from io import StringIO import traceback import signal -from yt.extern.six.moves.xmlrpc_server import SimpleXMLRPCServer -from yt.extern.six.moves.xmlrpc_client import ServerProxy +from xmlrpc.server import SimpleXMLRPCServer +from xmlrpc.client import ServerProxy from yt.config import ytcfg class PdbXMLRPCServer(SimpleXMLRPCServer): diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py new file mode 100644 index 00000000000..ac6da707669 --- /dev/null +++ b/yt/utilities/sample_data.py @@ -0,0 +1,56 @@ +""" +Title: sample_data.py +Purpose: Contains functions used for automatic downloading and loading + of sample data that is not already present locally. +""" +import pkg_resources +import json +import os + +from yt.utilities.on_demand_imports import _pooch as pch + +from yt.config import ytcfg + +## The format of the data registry json: +## +## { +## 'dataset_archive_name.tar.gz': { +## 'hash': '...', +## 'url': '...', +## 'load_kwargs': {}, +## 'load_name': 'supplied_to_load' +## } +## } + +_extensions_to_strip = (".tgz", ".tar.gz", ".gz") + +class Fido: + r""" + Container for a pooch object used to fetch remote data that isn't + already stored locally. + """ + def __init__(self, filename="sample_data_registry.json", cache_dir=None): + self.filename = filename + self._registry = json.load(pkg_resources.resource_stream("yt", self.filename)) + if cache_dir is None: + if os.path.isdir(ytcfg.get("yt", "test_data_dir")): + cache_dir = ytcfg.get("yt", "test_data_dir") + else: + cache_dir = pch.pooch.os_cache("yt") + self.fido = pch.pooch.create( + path=cache_dir, + registry={_: self._registry[_]['hash'] for _ in self._registry}, + urls={_: self._registry[_]['url'] for _ in self._registry}, + env="YT_DATA_DIR", + base_url = "https://yt-project.org/data/" + ) + # Load the external registry file. It contains data file names, + # hashes used for validation, and the url for the data file + + def __getitem__(self, item): + if item in self._registry: + return self._registry[item] + for ext in _extensions_to_strip: + if item + ext in self._registry: + return self._registry[item + ext] + raise KeyError(item) diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index c223d192b35..c4a99373347 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -1,5 +1,5 @@ from __future__ import print_function -from yt.extern.six.moves import cStringIO +from io import StringIO import os import numpy as np @@ -280,8 +280,8 @@ def __init__(self, filename = None, header=None): -------- >>> sdf = SDFRead("data.sdf", header="data.hdr") - >>> print sdf.parameters - >>> print sdf['x'] + >>> print(sdf.parameters) + >>> print(sdf['x']) """ self.filename = filename @@ -453,8 +453,8 @@ class HTTPSDFRead(SDFRead): -------- >>> sdf = SDFRead("data.sdf", header="data.hdr") - >>> print sdf.parameters - >>> print sdf['x'] + >>> print(sdf.parameters) + >>> print(sdf['x']) """ @@ -470,7 +470,7 @@ def parse_header(self): # Pre-process ascfile = self.HTTPArray(self.header) max_header_size = 1024*1024 - lines = cStringIO(ascfile[:max_header_size].data[:]) + lines = StringIO(ascfile[:max_header_size].data[:]) while True: l = lines.readline() if self._eof in l: break @@ -515,8 +515,8 @@ def load_sdf(filename, header=None): -------- >>> sdf = SDFRead("data.sdf", header="data.hdr") - >>> print sdf.parameters - >>> print sdf['x'] + >>> print(sdf.parameters) + >>> print(sdf['x']) """ if 'http' in filename: @@ -752,7 +752,7 @@ def get_ibbox(self, ileft, iright): Given left and right indicies, return a mask and set of offsets+lengths into the sdf data. """ - #print 'Getting data from ileft to iright:', ileft, iright + #print('Getting data from ileft to iright:', ileft, iright) ix, iy, iz = (iright-ileft)*1j mylog.debug('MIDX IBBOX: %s %s %s %s %s' % (ileft, iright, ix, iy, iz)) @@ -779,7 +779,7 @@ def get_ibbox(self, ileft, iright): dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]]) dinds = dinds[dinds < self._max_key] dinds = dinds[self.indexdata['len'][dinds] > 0] - #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size + #print('Getting boundary layers for wanderers, cells: %i' % dinds.size) # Correct For periodicity X[X < self.domain_buffer] += self.domain_active_dims @@ -789,7 +789,7 @@ def get_ibbox(self, ileft, iright): Y[Y >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims Z[Z >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims - #print 'periodic:', X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max() + #print('periodic:', X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()) indices = self.get_keyv([X, Y, Z]) # Only mask out if we are actually getting data rather than getting indices into @@ -851,7 +851,7 @@ def get_next_nonzero_chunk(self, key, stop=None): stop = self._max_key while key < stop: if self.indexdata['len'][key] == 0: - #print 'Squeezing keys, incrementing' + #print('Squeezing keys, incrementing') key += 1 else: break @@ -864,7 +864,7 @@ def get_previous_nonzero_chunk(self, key, stop=None): stop = self.indexdata['index'][0] while key > stop: if self.indexdata['len'][key] == 0: - #print 'Squeezing keys, decrementing' + #print('Squeezing keys, decrementing') key -= 1 else: break @@ -884,7 +884,7 @@ def iter_data(self, inds, fields): combined = 0 while nexti < num_inds: nextind = inds[nexti] - # print 'b: %i l: %i end: %i next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] ) + # print('b: %i l: %i end: %i next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )) if combined < 1024 and base + length == self.indexdata['base'][nextind]: length += self.indexdata['len'][nextind] i += 1 @@ -933,7 +933,7 @@ def filter_bbox(self, left, right, myiter): # Now get all particles that are within the bbox mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1) - #print 'Mask shape, sum:', mask.shape, mask.sum() + #print('Mask shape, sum:', mask.shape, mask.sum()) mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0])) @@ -947,7 +947,7 @@ def filter_bbox(self, left, right, myiter): filtered[f] = data[f][mask] #for i, ax in enumerate('xyz'): - # #print left, right + # #print(left, right) # assert np.all(filtered[ax] >= left[i]) # assert np.all(filtered[ax] < right[i]) @@ -1020,7 +1020,7 @@ def iter_filtered_bbox_fields(self, left, right, data, for f in fields: if f in pos_fields: continue - # print 'yielding nonpos field', f + # print('yielding nonpos field', f) yield f, data[f][mask] def iter_bbox_data(self, left, right, fields): @@ -1121,8 +1121,8 @@ def get_key_bounds(self, level, cell_iarr): level_rk = self.get_key(cell_iarr + level_buff) + 1 lmax_lk = (level_lk << shift*3) lmax_rk = (((level_rk) << shift*3) -1) - #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3) - #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3) + #print("Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)) + #print("Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)) return lmax_lk, lmax_rk def find_max_cell(self): @@ -1189,7 +1189,7 @@ def iter_padded_bbox_data(self, level, cell_iarr, pad, fields): for chunk in midx.iter_padded_bbox_data( 6, np.array([128]*3), 8.0, ['x','y','z','ident']): - print chunk['x'].max() + print(chunk['x'].max()) """ diff --git a/yt/utilities/tests/test_amr_kdtree.py b/yt/utilities/tests/test_amr_kdtree.py index a7c4ec3b04e..88d3ed45fda 100644 --- a/yt/utilities/tests/test_amr_kdtree.py +++ b/yt/utilities/tests/test_amr_kdtree.py @@ -1,18 +1,3 @@ -""" -Unit test the ARMKDTree in yt. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.utilities.amr_kdtree.api import AMRKDTree import yt.utilities.initial_conditions as ic import yt.utilities.flagging_methods as fm diff --git a/yt/utilities/tests/test_chemical_formulas.py b/yt/utilities/tests/test_chemical_formulas.py index c4c7f102497..8dd00526d22 100644 --- a/yt/utilities/tests/test_chemical_formulas.py +++ b/yt/utilities/tests/test_chemical_formulas.py @@ -1,5 +1,5 @@ -from yt.testing import assert_equal -from yt.utilities.chemical_formulas import ChemicalFormula +from yt.testing import assert_equal, assert_allclose +from yt.utilities.chemical_formulas import ChemicalFormula, default_mu from yt.utilities.periodic_table import periodic_table _molecules = ( @@ -21,3 +21,7 @@ def test_formulas(): for (n, c1), (e, c2) in zip(components, f.elements): assert_equal(n, e.symbol) assert_equal(c1, c2) + + +def test_default_mu(): + assert_allclose(default_mu, 0.5924489101195808) diff --git a/yt/utilities/tests/test_config.py b/yt/utilities/tests/test_config.py index 477d7db1ee0..5caecd684b2 100644 --- a/yt/utilities/tests/test_config.py +++ b/yt/utilities/tests/test_config.py @@ -1,12 +1,3 @@ -# -*- coding: UTF-8 -*- -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import contextlib import os import sys @@ -22,8 +13,8 @@ import yt.config from yt.config import \ CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE, CONFIG_DIR, YTConfigParser -from yt.extern.six import StringIO -from yt.extern.six.moves.configparser import NoOptionError +from io import StringIO +from configparser import NoOptionError from yt.fields.tests.test_fields_plugins import TEST_PLUGIN_FILE _TEST_PLUGIN = '_test_plugin.py' diff --git a/yt/utilities/tests/test_cosmology.py b/yt/utilities/tests/test_cosmology.py index c8bb386db12..224d7500485 100644 --- a/yt/utilities/tests/test_cosmology.py +++ b/yt/utilities/tests/test_cosmology.py @@ -1,19 +1,3 @@ -""" -Test cosmology calculator. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import os from yt.utilities.on_demand_imports import \ diff --git a/yt/utilities/tests/test_decompose.py b/yt/utilities/tests/test_decompose.py index 1711865165b..2ad83192442 100644 --- a/yt/utilities/tests/test_decompose.py +++ b/yt/utilities/tests/test_decompose.py @@ -1,18 +1,3 @@ -""" -Test suite for cartesian domain decomposition. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.testing import assert_array_equal, assert_almost_equal import numpy as np import yt.utilities.decompose as dec diff --git a/yt/utilities/tests/test_load_sample.py b/yt/utilities/tests/test_load_sample.py new file mode 100644 index 00000000000..66de0ad946f --- /dev/null +++ b/yt/utilities/tests/test_load_sample.py @@ -0,0 +1,33 @@ +from yt.testing import assert_equal +from yt.utilities.load_sample import _validate_sampledata_name + +names = { "t1": {"load_name" : "IsolatedGalaxy.tar.gz", + "answers": { "fileext" : "IsolatedGalaxy.tar.gz", + "basename" : "IsolatedGalaxy", + "extension" : "tar"}}, + "t2": {"load_name" : "IsolatedGalaxy", + "answers": { "fileext" : "IsolatedGalaxy.tar.gz", + "basename" : "IsolatedGalaxy", + "extension" : "tar"}}, + "t3": {"load_name" : "apec_emissivity_v3.h5", + "answers": { "fileext" : "apec_emissivity_v3.h5", + "basename" : "apec_emissivity_v3", + "extension" : "h5"}}, + "t4": {"load_name" : "apec_emissivity_v3.hdf5", + "answers": { "fileext" : "apec_emissivity_v3.hdf5", + "basename" : "apec_emissivity_v3", + "extension" : "h5"}}, + "t5": {"load_name" : "solution-00027.0000.vtu", + "answers": { "fileext" : "solution-00027.0000.vtu", + "basename" : "solution-00027.0000", + "extension" : ".vtu"}} + } + +def test_name_validator(): + for test in names: + fileext, bname, ext = _validate_sampledata_name(names[test]["load_name"]) + expected_answers = names[test]["answers"] + assert_equal(fileext, expected_answers["fileext"]) + assert_equal(bname, expected_answers["basename"]) + assert_equal(ext, expected_answers["extension"]) + diff --git a/yt/utilities/tree_container.py b/yt/utilities/tree_container.py index 463e269bb6f..80865a22dfc 100644 --- a/yt/utilities/tree_container.py +++ b/yt/utilities/tree_container.py @@ -1,18 +1,3 @@ -""" -TreeContainer class and member functions - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - class TreeContainer(object): r"""A recursive data container for things like merger trees and clump-finder trees. diff --git a/yt/utilities/voropp.pyx b/yt/utilities/voropp.pyx index 86e6b49028c..13625774a4a 100644 --- a/yt/utilities/voropp.pyx +++ b/yt/utilities/voropp.pyx @@ -5,13 +5,6 @@ Wrapping code for voro++ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from cython.operator cimport dereference as deref, preincrement as inc from libc.stdlib cimport malloc, free, abs, calloc, labs diff --git a/yt/visualization/__init__.py b/yt/visualization/__init__.py index ccbbe15194c..938c0f5ff08 100644 --- a/yt/visualization/__init__.py +++ b/yt/visualization/__init__.py @@ -10,10 +10,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/visualization/api.py b/yt/visualization/api.py index 21fd8fc50a0..442d71181aa 100644 --- a/yt/visualization/api.py +++ b/yt/visualization/api.py @@ -1,18 +1,3 @@ -""" -API for yt.visualization - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .color_maps import \ add_colormap, \ show_colormaps, \ diff --git a/yt/visualization/base_plot_types.py b/yt/visualization/base_plot_types.py index cecbfe4b31d..1f69baee667 100644 --- a/yt/visualization/base_plot_types.py +++ b/yt/visualization/base_plot_types.py @@ -1,18 +1,3 @@ -""" -This is a place for base classes of the various plot types. - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import matplotlib import numpy as np diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index 9263e72e727..1f47bb8fe48 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -1,21 +1,8 @@ -""" - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np import matplotlib.colors as cc import matplotlib.cm as mcm from . import _colormap_data as _cm -from yt.extern.six import string_types try: import cmocean @@ -541,7 +528,7 @@ def make_colormap(ctuple_list, name=None, interpolate=True): # Figure out how many intervals there are total. rolling_index = 0 for i, (color, interval) in enumerate(ctuple_list): - if isinstance(color, string_types): + if isinstance(color, str): ctuple_list[i] = (color_dict[color], interval) rolling_index += interval scale = 256./rolling_index diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 6a95b685499..e15f53437ec 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -1,18 +1,3 @@ -""" -DualEPS: A class to combine bitmap compression and vector graphics - - - -""" -from __future__ import absolute_import, print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import pyx import numpy as np from matplotlib import cm diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index f1e32d39780..643ce0062e0 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -1,29 +1,23 @@ -""" -FITSImageData Class -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- -from yt.extern.six import string_types import numpy as np +from itertools import count from yt.fields.derived_field import DerivedField from yt.funcs import mylog, iterable, fix_axis, ensure_list, \ issue_deprecation_warning from yt.visualization.fixed_resolution import FixedResolutionBuffer +from yt.data_objects.image_array import ImageArray from yt.data_objects.construction_data_containers import YTCoveringGrid from yt.utilities.on_demand_imports import _astropy from yt.units.yt_array import YTQuantity, YTArray +from yt.units.unit_object import Unit from yt.units import dimensions from yt.utilities.parallel_tools.parallel_analysis_interface import \ parallel_root_only -from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection +from yt.visualization.volume_rendering.off_axis_projection import \ + off_axis_projection import re import sys +from numbers import Number as numeric_type + class UnitfulHDU(object): def __init__(self, hdu): @@ -41,9 +35,13 @@ def __repr__(self): im_shape = " x ".join([str(s) for s in self.shape]) return "FITSImage: %s (%s, %s)" % (self.name, im_shape, self.units) + class FITSImageData(object): - def __init__(self, data, fields=None, units=None, width=None, wcs=None): + def __init__(self, data, fields=None, length_unit=None, width=None, + img_ctr=None, wcs=None, current_time=None, time_unit=None, + mass_unit=None, velocity_unit=None, magnetic_unit=None, + ds=None, unit_header=None, **kwargs): r""" Initialize a FITSImageData object. FITSImageData contains a collection of FITS ImageHDU instances and @@ -62,15 +60,37 @@ def __init__(self, data, fields=None, units=None, width=None, wcs=None): The field names for the data. If *fields* is none and *data* has keys, it will use these for the fields. If *data* is just a single array one field name must be specified. - units : string - The units of the WCS coordinates. Defaults to "cm". + length_unit : string + The units of the WCS coordinates and the length unit of the file. + Defaults to the length unit of the dataset, if there is one, or + "cm" if there is not. width : float or YTQuantity The width of the image. Either a single value or iterable of values. If a float, assumed to be in *units*. Only used if this information is not already provided by *data*. - wcs : `astropy.wcs.WCS` instance, optional + img_ctr : array_like or YTArray + The center coordinates of the image. If a list or NumPy array, + it is assumed to be in *units*. Only used if this information + is not already provided by *data*. + wcs : `~astropy.wcs.WCS` instance, optional Supply an AstroPy WCS instance. Will override automatic WCS creation from FixedResolutionBuffers and YTCoveringGrids. + current_time : float, tuple, or YTQuantity, optional + The current time of the image(s). If not specified, one will + be set from the dataset if there is one. If a float, it will + be assumed to be in *time_unit* units. + time_unit : string + The default time units of the file. Defaults to "s". + mass_unit : string + The default time units of the file. Defaults to "g". + velocity_unit : string + The default velocity units of the file. Defaults to "cm/s". + magnetic_unit : string + The default magnetic units of the file. Defaults to "gauss". + ds : `~yt.static_output.Dataset` instance, optional + The dataset associated with the image(s), typically used + to transfer metadata to the header(s). Does not need to be + specified if *data* has a dataset as an attribute. Examples -------- @@ -80,7 +100,8 @@ def __init__(self, data, fields=None, units=None, width=None, wcs=None): >>> prj = ds.proj(2, "kT", weight_field="density") >>> frb = prj.to_frb((0.5, "Mpc"), 800) >>> # This example just uses the FRB and puts the coords in kpc. - >>> f_kpc = FITSImageData(frb, fields="kT", units="kpc") + >>> f_kpc = FITSImageData(frb, fields="kT", length_unit="kpc", + ... time_unit=(1.0, "Gyr")) >>> # This example specifies a specific WCS. >>> from astropy.wcs import WCS >>> w = WCS(naxis=self.dimensionality) @@ -95,16 +116,47 @@ def __init__(self, data, fields=None, units=None, width=None, wcs=None): >>> f_deg.writeto("temp.fits") """ + if fields is not None: + fields = ensure_list(fields) + + if "units" in kwargs: + issue_deprecation_warning("The 'units' keyword argument has been replaced " + "by the 'length_unit' keyword argument and the " + "former has been deprecated. Setting 'length_unit' " + "to 'units'.") + length_unit = kwargs.pop("units") + + if ds is None: + ds = getattr(data, "ds", None) + self.fields = [] self.field_units = {} - if units is None: - units = "cm" + if unit_header is None: + self._set_units(ds, [length_unit, mass_unit, time_unit, + velocity_unit, magnetic_unit]) + else: + self._set_units_from_header(unit_header) + + wcs_unit = str(self.length_unit.units) + + self._fix_current_time(ds, current_time) + if width is None: width = 1.0 + if isinstance(width, tuple): + if ds is None: + width = YTQuantity(width[0], width[1]) + else: + width = ds.quan(width[0], width[1]) + if img_ctr is None: + img_ctr = np.zeros(3) + + exclude_fields = ['x', 'y', 'z', 'px', 'py', 'pz', + 'pdx', 'pdy', 'pdz', 'weight_field'] - exclude_fields = ['x','y','z','px','py','pz', - 'pdx','pdy','pdz','weight_field'] + if isinstance(data, _astropy.pyfits.PrimaryHDU): + data = _astropy.pyfits.HDUList([data]) if isinstance(data, _astropy.pyfits.HDUList): self.hdulist = data @@ -129,9 +181,6 @@ def __init__(self, data, fields=None, units=None, width=None, wcs=None): self.hdulist = _astropy.pyfits.HDUList() - if isinstance(fields, string_types): - fields = [fields] - if hasattr(data, 'keys'): img_data = data if fields is None: @@ -154,64 +203,175 @@ def __init__(self, data, fields=None, units=None, width=None, wcs=None): else: self.fields.append(fd) + # Sanity checking names + s = set() + duplicates = set(f for f in self.fields if f in s or s.add(f)) + if len(duplicates) > 0: + for i, fd in enumerate(self.fields): + if fd in duplicates: + if isinstance(fields[i], tuple): + ftype, fname = fields[i] + elif isinstance(fields[i], DerivedField): + ftype, fname = fields[i].name + else: + raise RuntimeError("Cannot distinguish between fields " + "with same name %s!" % fd) + self.fields[i] = "%s_%s" % (ftype, fname) + first = True - for name, field in zip(self.fields, fields): + for i, name, field in zip(count(), self.fields, fields): if name not in exclude_fields: + this_img = img_data[field] if hasattr(img_data[field], "units"): - self.field_units[name] = str(img_data[field].units) + if this_img.units.is_code_unit: + mylog.warning("Cannot generate an image with code " + "units. Converting to units in CGS.") + funits = this_img.units.get_base_equivalent("cgs") + else: + funits = this_img.units + self.field_units[name] = str(funits) else: self.field_units[name] = "dimensionless" mylog.info("Making a FITS image of field %s" % name) + if isinstance(this_img, ImageArray): + if i == 0: + self.shape = this_img.shape[::-1] + this_img = np.asarray(this_img) + else: + if i == 0: + self.shape = this_img.shape + this_img = np.asarray(this_img.T) if first: - hdu = _astropy.pyfits.PrimaryHDU(np.array(img_data[field])) + hdu = _astropy.pyfits.PrimaryHDU(this_img) first = False else: - hdu = _astropy.pyfits.ImageHDU(np.array(img_data[field])) + hdu = _astropy.pyfits.ImageHDU(this_img) hdu.name = name hdu.header["btype"] = name hdu.header["bunit"] = re.sub('()', '', self.field_units[name]) + for unit in ("length", "time", "mass", "velocity", "magnetic"): + if unit == "magnetic": + short_unit = "bf" + else: + short_unit = unit[0] + key = "{}unit".format(short_unit) + value = getattr(self, "{}_unit".format(unit)) + if value is not None: + hdu.header[key] = float(value.value) + hdu.header.comments[key] = "[%s]" % value.units + hdu.header["time"] = float(self.current_time.value) self.hdulist.append(hdu) - self.shape = self.hdulist[0].shape self.dimensionality = len(self.shape) if wcs is None: w = _astropy.pywcs.WCS(header=self.hdulist[0].header, naxis=self.dimensionality) + # FRBs and covering grids are special cases where + # we have coordinate information, so we take advantage + # of this and construct the WCS object if isinstance(img_data, FixedResolutionBuffer): - # FRBs are a special case where we have coordinate - # information, so we take advantage of this and - # construct the WCS object - dx = (img_data.bounds[1]-img_data.bounds[0]).to(units).v - dy = (img_data.bounds[3]-img_data.bounds[2]).to(units).v + dx = (img_data.bounds[1]-img_data.bounds[0]).to_value(wcs_unit) + dy = (img_data.bounds[3]-img_data.bounds[2]).to_value(wcs_unit) dx /= self.shape[0] dy /= self.shape[1] - xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).to(units).v - yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).to(units).v + xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).to_value(wcs_unit) + yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).to_value(wcs_unit) center = [xctr, yctr] cdelt = [dx, dy] elif isinstance(img_data, YTCoveringGrid): - cdelt = img_data.dds.to(units).v - center = 0.5*(img_data.left_edge+img_data.right_edge).to(units).v + cdelt = img_data.dds.to_value(wcs_unit) + center = 0.5*(img_data.left_edge+img_data.right_edge).to_value(wcs_unit) else: - # If img_data is just an array, we assume the center is the - # origin and use the image width to determine the cell widths + # If img_data is just an array we use the width and img_ctr + # parameters to determine the cell widths if not iterable(width): width = [width]*self.dimensionality if isinstance(width[0], YTQuantity): - cdelt = [wh.to(units).v/n for wh, n in zip(width, self.shape)] + cdelt = [wh.to_value(wcs_unit)/n for wh, n in zip(width, self.shape)] else: cdelt = [float(wh)/n for wh, n in zip(width, self.shape)] - center = [0.0]*self.dimensionality + center = img_ctr[:self.dimensionality] w.wcs.crpix = 0.5*(np.array(self.shape)+1) w.wcs.crval = center w.wcs.cdelt = cdelt w.wcs.ctype = ["linear"]*self.dimensionality - w.wcs.cunit = [units]*self.dimensionality + w.wcs.cunit = [wcs_unit]*self.dimensionality self.set_wcs(w) else: self.set_wcs(wcs) + def _fix_current_time(self, ds, current_time): + if ds is None: + registry = None + else: + registry = ds.unit_registry + tunit = Unit(self.time_unit, registry=registry) + if current_time is None: + if ds is not None: + current_time = ds.current_time + else: + self.current_time = YTQuantity(0.0, 's') + return + elif isinstance(current_time, numeric_type): + current_time = YTQuantity(current_time, tunit) + elif isinstance(current_time, tuple): + current_time = YTQuantity(current_time[0], current_time[1]) + self.current_time = current_time.to(tunit) + + def _set_units(self, ds, base_units): + attrs = ('length_unit', 'mass_unit', 'time_unit', + 'velocity_unit', 'magnetic_unit') + cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss') + for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): + if unit is None: + if ds is not None: + u = getattr(ds, attr, None) + elif attr == "velocity_unit": + u = self.length_unit / self.time_unit + elif attr == "magnetic_unit": + u = np.sqrt(4.0*np.pi * self.mass_unit / + (self.time_unit**2 * self.length_unit)) + else: + u = cgs_unit + else: + u = unit + + if isinstance(u, str): + uq = YTQuantity(1.0, u) + elif isinstance(u, numeric_type): + uq = YTQuantity(u, cgs_unit) + elif isinstance(u, YTQuantity): + uq = u.copy() + elif isinstance(u, tuple): + uq = YTQuantity(u[0], u[1]) + else: + uq = None + + if uq is not None and uq.units.is_code_unit: + mylog.warning("Cannot use code units of '%s' " % uq.units + + "when creating a FITSImageData instance! " + "Converting to a cgs equivalent.") + uq.convert_to_cgs() + + if attr == "length_unit" and uq.value != 1.0: + mylog.warning("Converting length units " + "from %s to %s." % (uq, uq.units)) + uq = YTQuantity(1.0, uq.units) + + setattr(self, attr, uq) + + def _set_units_from_header(self, header): + for unit in ["length", "time", "mass", "velocity", "magnetic"]: + if unit == "magnetic": + key = "BFUNIT" + else: + key = unit[0].upper()+"UNIT" + if key not in header: + continue + u = YTQuantity(header[key], header.comments[key].strip("[]")) + setattr(self, unit+"_unit", u) + def set_wcs(self, wcs, wcsname=None, suffix=None): """ Set the WCS coordinate information for all images @@ -233,6 +393,68 @@ def set_wcs(self, wcs, wcsname=None, suffix=None): kk += suffix img.header[kk] = v + def change_image_name(self, old_name, new_name): + """ + Change the name of a FITS image. + + Parameters + ---------- + old_name : string + The old name of the image. + new_name : string + The new name of the image. + """ + idx = self.fields.index(old_name) + self.hdulist[idx].name = new_name + self.hdulist[idx].header['BTYPE'] = new_name + self.field_units[new_name] = self.field_units.pop(old_name) + self.fields[idx] = new_name + + def convolve(self, field, kernel, **kwargs): + """ + Convolve an image with a kernel, either a simple + Gaussian kernel or one provided by AstroPy. Currently, + this only works for 2D images. + + All keyword arguments are passed to + :meth:`~astropy.convolution.convolve`. + + Parameters + ---------- + field : string + The name of the field to convolve. + kernel : float, YTQuantity, (value, unit) tuple, or AstroPy Kernel object + The kernel to convolve the image with. If this is an AstroPy Kernel + object, the image will be convolved with it. Otherwise, it is + assumed that the kernel is a Gaussian and that this value is + the standard deviation. If a float, it is assumed that the units + are pixels, but a (value, unit) tuple or YTQuantity can be supplied + to specify the standard deviation in physical units. + + Examples + -------- + >>> fid = FITSSlice(ds, "z", "density") + >>> fid.convolve("density", (3.0, "kpc")) + """ + if self.dimensionality == 3: + raise RuntimeError("Convolution currently only works for 2D FITSImageData!") + conv = _astropy.conv + if field not in self.keys(): + raise KeyError("%s not an image!" % field) + idx = self.fields.index(field) + if not isinstance(kernel, conv.Kernel): + if not isinstance(kernel, numeric_type): + unit = str(self.wcs.wcs.cunit[0]) + pix_scale = YTQuantity(self.wcs.wcs.cdelt[0], unit) + if isinstance(kernel, tuple): + stddev = YTQuantity(kernel[0], kernel[1]).to(unit) + else: + stddev = kernel.to(unit) + kernel = stddev/pix_scale + kernel = conv.Gaussian2DKernel(x_stddev=kernel) + self.hdulist[idx].data = conv.convolve(self.hdulist[idx].data, + kernel, **kwargs) + def update_header(self, field, key, value): """ Update the FITS header for *field* with a @@ -415,9 +637,8 @@ def pop(self, key): im = self.hdulist.pop(idx) self.field_units.pop(key) self.fields.remove(key) - data = _astropy.pyfits.HDUList([im]) - return FITSImageData(data) - + return FITSImageData(_astropy.pyfits.PrimaryHDU(im.data, header=im.header)) + def close(self): self.hdulist.close() @@ -433,7 +654,7 @@ def from_file(cls, filename): The name of the file to open. """ f = _astropy.pyfits.open(filename, lazy_load_hdus=False) - return cls(f) + return cls(f, current_time=f[0].header["TIME"], unit_header=f[0].header) @classmethod def from_images(cls, image_list): @@ -446,6 +667,7 @@ def from_images(cls, image_list): image_list : list of FITSImageData instances The images to be combined. """ + image_list = ensure_list(image_list) w = image_list[0].wcs img_shape = image_list[0].shape data = [] @@ -456,21 +678,20 @@ def from_images(cls, image_list): raise RuntimeError("Images do not have the same shape!") for hdu in fid.hdulist: if first: - data.append(hdu) + data.append(_astropy.pyfits.PrimaryHDU(hdu.data, header=hdu.header)) first = False else: data.append(_astropy.pyfits.ImageHDU(hdu.data, header=hdu.header)) data = _astropy.pyfits.HDUList(data) - return cls(data) + return cls(data, current_time=image_list[0].current_time) def create_sky_wcs(self, sky_center, sky_scale, - ctype=["RA---TAN","DEC--TAN"], - crota=None, cd=None, pc=None, - wcsname="celestial", + ctype=None, crota=None, cd=None, + pc=None, wcsname="celestial", replace_old_wcs=True): """ Takes a Cartesian WCS and converts it to one in a - celestial coordinate system. + sky-based coordinate system. Parameters ---------- @@ -480,7 +701,8 @@ def create_sky_wcs(self, sky_center, sky_scale, Conversion between an angle unit and a length unit, e.g. (3.0, "arcsec/kpc") ctype : list of strings, optional - The type of the coordinate system to create. + The type of the coordinate system to create. Default: + A "tangential" projection. crota : 2-element ndarray, optional Rotation angles between cartesian coordinates and the celestial coordinates. @@ -488,11 +710,15 @@ def create_sky_wcs(self, sky_center, sky_scale, Dimensioned coordinate transformation matrix. pc : 2x2-element ndarray, optional Coordinate transformation matrix. + wcsname : string, optional + The name of the WCS to be stored in the FITS header. replace_old_wcs : boolean, optional Whether or not to overwrite the default WCS of the FITSImageData instance. If false, a second WCS will be added to the header. Default: True. """ + if ctype is None: + ctype = ["RA---TAN", "DEC--TAN"] old_wcs = self.wcs naxis = old_wcs.naxis crval = [sky_center[0], sky_center[1]] @@ -531,20 +757,25 @@ def create_sky_wcs(self, sky_center, sky_scale, else: self.set_wcs(new_wcs, wcsname=wcsname, suffix="a") + class FITSImageBuffer(FITSImageData): pass + def sanitize_fits_unit(unit): if unit == "Mpc": - mylog.info("Changing FITS file unit to kpc.") + mylog.info("Changing FITS file length unit to kpc.") unit = "kpc" elif unit == "au": unit = "AU" return unit + axis_wcs = [[1,2],[0,2],[0,1]] -def construct_image(ds, axis, data_source, center, width=None, image_res=None): + +def construct_image(ds, axis, data_source, center, image_res, width, + length_unit): if width is None: width = ds.domain_width[axis_wcs[axis]] unit = ds.get_smallest_appropriate_unit(width[0]) @@ -553,18 +784,10 @@ def construct_image(ds, axis, data_source, center, width=None, image_res=None): else: width = ds.coordinates.sanitize_width(axis, width, None) unit = str(width[0].units) - if image_res is None: - ddims = ds.domain_dimensions*ds.refine_by**ds.index.max_level - if iterable(axis): - nx = ddims.max() - ny = ddims.max() - else: - nx, ny = [ddims[idx] for idx in axis_wcs[axis]] + if iterable(image_res): + nx, ny = image_res else: - if iterable(image_res): - nx, ny = image_res - else: - nx, ny = image_res, image_res + nx, ny = image_res, image_res dx = width[0]/nx dy = width[1]/ny crpix = [0.5*(nx+1), 0.5*(ny+1)] @@ -573,13 +796,17 @@ def construct_image(ds, axis, data_source, center, width=None, image_res=None): elif unit == "code_length": unit = ds.get_smallest_appropriate_unit(ds.quan(1.0, "code_length")) unit = sanitize_fits_unit(unit) - cunit = [unit]*2 + if length_unit is None: + length_unit = unit + if any(char.isdigit() for char in length_unit) and "*" in length_unit: + length_unit = length_unit.split("*")[-1] + cunit = [length_unit]*2 ctype = ["LINEAR"]*2 - cdelt = [dx.in_units(unit), dy.in_units(unit)] + cdelt = [dx.in_units(length_unit), dy.in_units(length_unit)] if iterable(axis): - crval = center.in_units(unit) + crval = center.in_units(length_unit) else: - crval = [center[idx].in_units(unit) for idx in axis_wcs[axis]] + crval = [center[idx].in_units(length_unit) for idx in axis_wcs[axis]] if hasattr(data_source, 'to_frb'): if iterable(axis): frb = data_source.to_frb(width[0], (nx, ny), height=width[1]) @@ -594,7 +821,8 @@ def construct_image(ds, axis, data_source, center, width=None, image_res=None): w.wcs.crval = crval w.wcs.cunit = cunit w.wcs.ctype = ctype - return w, frb + return w, frb, length_unit + def assert_same_wcs(wcs1, wcs2): from numpy.testing import assert_allclose @@ -624,6 +852,7 @@ def assert_same_wcs(wcs1, wcs2): else: assert_allclose(wcs1.wcs.pc, wcs2.wcs.pc) + class FITSSlice(FITSImageData): r""" Generate a FITSImageData of an on-axis slice. @@ -636,49 +865,53 @@ class FITSSlice(FITSImageData): The axis of the slice. One of "x","y","z", or 0,1,2. fields : string or list of strings The fields to slice + image_res : an int or 2-tuple of ints + Specify the resolution of the resulting image. A single value will be + used for both axes, whereas a tuple of values will be used for the + individual axes. Default: 512 center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set - to 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") - or ("max","dark_matter_density"). Units can be specified by passing in - *center* as a tuple containing a coordinate and string unit name or by - passing in a YTArray. If a list or unitless array is supplied, code - units are assumed. + The coordinate of the center of the image. If set to 'c', 'center' or + left blank, the plot is centered on the middle of the domain. If set + to 'max' or 'm', the center will be located at the maximum of the + ('gas', 'density') field. Centering on the max or min of a specific + field is supported by providing a tuple such as ("min","temperature") + or ("max","dark_matter_density"). Units can be specified by passing in + *center* as a tuple containing a coordinate and string unit name or by + passing in a YTArray. If a list or unitless array is supplied, code + units are assumed. width : tuple or a float. - Width can have four different formats to support windows with variable - x and y widths. They are: - - ================================== ======================= - format example - ================================== ======================= - (float, string) (10,'kpc') - ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) - float 0.2 - (float, float) (0.2, 0.3) - ================================== ======================= - - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 - kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. - image_res : an int or 2-tuple of ints - Specify the resolution of the resulting image. If not provided, it will - be determined based on the minimum cell size of the dataset. + Width can have four different formats to support variable + x and y widths. They are: + + ================================== ======================= + format example + ================================== ======================= + (float, string) (10,'kpc') + ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) + float 0.2 + (float, float) (0.2, 0.3) + ================================== ======================= + + For example, (10, 'kpc') specifies a width that is 10 kiloparsecs + wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a + width that is 10 kiloparsecs wide along the x axis and 15 + kiloparsecs wide along the y axis. In the other two examples, code + units are assumed, for example (0.2, 0.3) specifies a width that has an + x width of 0.2 and a y width of 0.3 in code units. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. """ - def __init__(self, ds, axis, fields, center="c", width=None, - image_res=None, **kwargs): + def __init__(self, ds, axis, fields, image_res=512, center="c", + width=None, length_unit=None, **kwargs): fields = ensure_list(fields) axis = fix_axis(axis, ds) center, dcenter = ds.coordinates.sanitize_center(center, axis) slc = ds.slice(axis, center[axis], **kwargs) - w, frb = construct_image(ds, axis, slc, dcenter, width=width, - image_res=image_res) - super(FITSSlice, self).__init__(frb, fields=fields, wcs=w) + w, frb, lunit = construct_image(ds, axis, slc, dcenter, image_res, + width, length_unit) + super(FITSSlice, self).__init__(frb, fields=fields, + length_unit=lunit, wcs=w) class FITSProjection(FITSImageData): @@ -693,51 +926,57 @@ class FITSProjection(FITSImageData): The axis along which to project. One of "x","y","z", or 0,1,2. fields : string or list of strings The fields to project - weight_field : string - The field used to weight the projection. + image_res : an int or 2-tuple of ints + Specify the resolution of the resulting image. A single value will be + used for both axes, whereas a tuple of values will be used for the + individual axes. Default: 512 center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set - to 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") - or ("max","dark_matter_density"). Units can be specified by passing in - *center* as a tuple containing a coordinate and string unit name or by - passing in a YTArray. If a list or unitless array is supplied, code - units are assumed. + The coordinate of the center of the image. If set to 'c', 'center' or + left blank, the plot is centered on the middle of the domain. If set + to 'max' or 'm', the center will be located at the maximum of the + ('gas', 'density') field. Centering on the max or min of a specific + field is supported by providing a tuple such as ("min","temperature") + or ("max","dark_matter_density"). Units can be specified by passing in + *center* as a tuple containing a coordinate and string unit name or by + passing in a YTArray. If a list or unitless array is supplied, code + units are assumed. width : tuple or a float. - Width can have four different formats to support windows with variable - x and y widths. They are: - - ================================== ======================= - format example - ================================== ======================= - (float, string) (10,'kpc') - ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) - float 0.2 - (float, float) (0.2, 0.3) - ================================== ======================= - - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 - kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. - image_res : an int or 2-tuple of ints - Specify the resolution of the resulting image. If not provided, it will - be determined based on the minimum cell size of the dataset. + Width can have four different formats to support variable + x and y widths. They are: + + ================================== ======================= + format example + ================================== ======================= + (float, string) (10,'kpc') + ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) + float 0.2 + (float, float) (0.2, 0.3) + ================================== ======================= + + For example, (10, 'kpc') specifies a width that is 10 kiloparsecs + wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a + width that is 10 kiloparsecs wide along the x axis and 15 + kiloparsecs wide along the y axis. In the other two examples, code + units are assumed, for example (0.2, 0.3) specifies a width that has an + x width of 0.2 and a y width of 0.3 in code units. + weight_field : string + The field used to weight the projection. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. """ - def __init__(self, ds, axis, fields, center="c", width=None, - weight_field=None, image_res=None, **kwargs): + def __init__(self, ds, axis, fields, image_res=512, + center="c", width=None, weight_field=None, + length_unit=None, **kwargs): fields = ensure_list(fields) axis = fix_axis(axis, ds) center, dcenter = ds.coordinates.sanitize_center(center, axis) prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs) - w, frb = construct_image(ds, axis, prj, dcenter, width=width, - image_res=image_res) - super(FITSProjection, self).__init__(frb, fields=fields, wcs=w) + w, frb, lunit = construct_image(ds, axis, prj, dcenter, image_res, + width, length_unit) + super(FITSProjection, self).__init__(frb, fields=fields, + length_unit=lunit, wcs=w) + class FITSOffAxisSlice(FITSImageData): r""" @@ -751,18 +990,22 @@ class FITSOffAxisSlice(FITSImageData): The vector normal to the projection plane. fields : string or list of strings The fields to slice + image_res : an int or 2-tuple of ints + Specify the resolution of the resulting image. A single value will be + used for both axes, whereas a tuple of values will be used for the + individual axes. Default: 512 center : A sequence of floats, a string, or a tuple. The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set to - 'max' or 'm', the center will be located at the maximum of the + left blank, the plot is centered on the middle of the domain. If set + to 'max' or 'm', the center will be located at the maximum of the ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") - or ("max","dark_matter_density"). Units can be specified by passing in - *center* as a tuple containing a coordinate and string unit name or by + field is supported by providing a tuple such as ("min","temperature") + or ("max","dark_matter_density"). Units can be specified by passing in + *center* as a tuple containing a coordinate and string unit name or by passing in a YTArray. If a list or unitless array is supplied, code units are assumed. width : tuple or a float. - Width can have four different formats to support windows with variable + Width can have four different formats to support variable x and y widths. They are: ================================== ======================= @@ -774,29 +1017,31 @@ class FITSOffAxisSlice(FITSImageData): (float, float) (0.2, 0.3) ================================== ======================= - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 + For example, (10, 'kpc') specifies a width that is 10 kiloparsecs + wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a + width that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. - image_res : an int or 2-tuple of ints - Specify the resolution of the resulting image. + units are assumed, for example (0.2, 0.3) specifies a width that has an + x width of 0.2 and a y width of 0.3 in code units. north_vector : a sequence of floats A vector defining the 'up' direction in the plot. This option sets the orientation of the slicing plane. If not set, an arbitrary grid-aligned north-vector is chosen. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. """ - def __init__(self, ds, normal, fields, center='c', width=None, - image_res=512, north_vector=None): + def __init__(self, ds, normal, fields, image_res=512, + center='c', width=None, north_vector=None, + length_unit=None): fields = ensure_list(fields) center, dcenter = ds.coordinates.sanitize_center(center, 4) cut = ds.cutting(normal, center, north_vector=north_vector) - center = ds.arr([0.0] * 2, 'code_length') - w, frb = construct_image(ds, normal, cut, center, width=width, - image_res=image_res) - super(FITSOffAxisSlice, self).__init__(frb, fields=fields, wcs=w) + center = ds.arr([0.0]*2, 'code_length') + w, frb, lunit = construct_image(ds, normal, cut, center, + image_res, width, length_unit) + super(FITSOffAxisSlice, self).__init__(frb, fields=fields, + length_unit=lunit, wcs=w) class FITSOffAxisProjection(FITSImageData): @@ -812,44 +1057,45 @@ class FITSOffAxisProjection(FITSImageData): The vector normal to the projection plane. fields : string, list of strings The name of the field(s) to be plotted. + image_res : an int or 2-tuple of ints + Specify the resolution of the resulting image. A single value will be + used for both axes, whereas a tuple of values will be used for the + individual axes. Default: 512 center : A sequence of floats, a string, or a tuple. - The coordinate of the center of the image. If set to 'c', 'center' or - left blank, the plot is centered on the middle of the domain. If set - to 'max' or 'm', the center will be located at the maximum of the - ('gas', 'density') field. Centering on the max or min of a specific - field is supported by providing a tuple such as ("min","temperature") - or ("max","dark_matter_density"). Units can be specified by passing in - *center* as a tuple containing a coordinate and string unit name or by - passing in a YTArray. If a list or unitless array is supplied, code - units are assumed. + The coordinate of the center of the image. If set to 'c', 'center' or + left blank, the plot is centered on the middle of the domain. If set + to 'max' or 'm', the center will be located at the maximum of the + ('gas', 'density') field. Centering on the max or min of a specific + field is supported by providing a tuple such as ("min","temperature") + or ("max","dark_matter_density"). Units can be specified by passing in + *center* as a tuple containing a coordinate and string unit name or by + passing in a YTArray. If a list or unitless array is supplied, code + units are assumed. width : tuple or a float. - Width can have four different formats to support windows with variable - x and y widths. They are: - - ================================== ======================= - format example - ================================== ======================= - (float, string) (10,'kpc') - ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) - float 0.2 - (float, float) (0.2, 0.3) - ================================== ======================= - - For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs - wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a - window that is 10 kiloparsecs wide along the x axis and 15 - kiloparsecs wide along the y axis. In the other two examples, code - units are assumed, for example (0.2, 0.3) requests a plot that has an - x width of 0.2 and a y width of 0.3 in code units. If units are - provided the resulting plot axis labels will use the supplied units. + Width can have four different formats to support variable + x and y widths. They are: + + ================================== ======================= + format example + ================================== ======================= + (float, string) (10,'kpc') + ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) + float 0.2 + (float, float) (0.2, 0.3) + ================================== ======================= + + For example, (10, 'kpc') specifies a width that is 10 kiloparsecs + wide in the x and y directions, ((10,'kpc'),(15,'kpc')) specifies a + width that is 10 kiloparsecs wide along the x axis and 15 + kiloparsecs wide along the y axis. In the other two examples, code + units are assumed, for example (0.2, 0.3) specifies a width that has an + x width of 0.2 and a y width of 0.3 in code units. depth : A tuple or a float - A tuple containing the depth to project through and the string - key of the unit: (width, 'unit'). If set to a float, code units - are assumed + A tuple containing the depth to project through and the string + key of the unit: (width, 'unit'). If set to a float, code units + are assumed weight_field : string The name of the weighting field. Set to None for no weight. - image_res : an int or 2-tuple of ints - Specify the resolution of the resulting image. north_vector : a sequence of floats A vector defining the 'up' direction in the plot. This option sets the orientation of the slicing plane. If not @@ -871,10 +1117,14 @@ class FITSOffAxisProjection(FITSImageData): data_source : yt.data_objects.data_containers.YTSelectionContainer, optional If specified, this will be the data source used for selecting regions to project. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. """ def __init__(self, ds, normal, fields, center='c', width=(1.0, 'unitary'), weight_field=None, image_res=512, data_source=None, - north_vector=None, depth=(1.0, "unitary"), method='integrate'): + north_vector=None, depth=(1.0, "unitary"), + method='integrate', length_unit=None): fields = ensure_list(fields) center, dcenter = ds.coordinates.sanitize_center(center, 4) buf = {} @@ -891,6 +1141,8 @@ def __init__(self, ds, normal, fields, center='c', width=(1.0, 'unitary'), buf[field] = off_axis_projection(source, center, normal, wd, res, field, north_vector=north_vector, method=method, weight=weight_field).swapaxes(0,1) - center = ds.arr([0.0] * 2, 'code_length') - w, not_an_frb = construct_image(ds, normal, buf, center, width=width, image_res=image_res) - super(FITSOffAxisProjection, self).__init__(buf, fields=fields, wcs=w) + center = ds.arr([0.0]*2, 'code_length') + w, not_an_frb, lunit = construct_image(ds, normal, buf, center, + image_res, width, length_unit) + super(FITSOffAxisProjection, self).__init__(buf, fields=fields, wcs=w, + length_unit=lunit, ds=ds) diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index b1384389b52..e03abd79e98 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -1,25 +1,11 @@ -""" -Fixed resolution buffer support, along with a primitive image analysis tool. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from yt.frontends.ytdata.utilities import \ save_as_dataset from yt.funcs import \ get_output_filename, \ mylog, \ ensure_list, \ - deprecate + deprecate, \ + issue_deprecation_warning from .volume_rendering.api import off_axis_projection from .fixed_resolution_filters import apply_filter, filter_registry from yt.data_objects.image_array import ImageArray @@ -75,9 +61,9 @@ class FixedResolutionBuffer(object): >>> proj = ds.proj(0, "density") >>> frb1 = FixedResolutionBuffer(proj, (0.2, 0.3, 0.4, 0.5), ... (1024, 1024)) - >>> print frb1["density"].max() + >>> print(frb1["density"].max()) 1.0914e-9 g/cm**3 - >>> print frb1["temperature"].max() + >>> print(frb1["temperature"].max()) 104923.1 K """ _exclude_fields = ('pz','pdz','dx','x','y','z', @@ -86,8 +72,8 @@ class FixedResolutionBuffer(object): ('index', 'r'), ('index', 'dr'), ('index', 'phi'), ('index', 'dphi'), ('index', 'theta'), ('index', 'dtheta')) - def __init__(self, data_source, bounds, buff_size, antialias = True, - periodic = False): + def __init__(self, data_source, bounds, buff_size, antialias=True, + periodic=False): self.data_source = data_source self.ds = data_source.ds self.bounds = bounds @@ -130,6 +116,7 @@ def __getitem__(self, item): if hasattr(b, "in_units"): b = float(b.in_units("code_length")) bounds.append(b) + buff = self.ds.coordinates.pixelize(self.data_source.axis, self.data_source, item, bounds, self.buff_size, int(self.antialias)) @@ -137,10 +124,19 @@ def __getitem__(self, item): for name, (args, kwargs) in self._filters: buff = filter_registry[name](*args[1:], **kwargs).apply(buff) - # Need to add _period and self.periodic - # self._period, int(self.periodic) - ia = ImageArray(buff, input_units=self.data_source[item].units, - info=self._get_info(item)) + # FIXME FIXME FIXME we shouldn't need to do this for projections + # but that will require fixing data object access for particle + # projections + try: + if hasattr(item, 'name'): + it = item.name + else: + it = item + units = self.data_source._projected_units[it] + except (KeyError, AttributeError): + units = self.data_source[item].units + + ia = ImageArray(buff, units=units, info=self._get_info(item)) self.data[item] = ia return self.data[item] @@ -276,7 +272,6 @@ def set_unit(self, field, unit, equivalency=None, equivalency_kwargs=None): equiv_array, equiv_array.units, equiv_array.units.registry, self[field].info) - def export_hdf5(self, filename, fields = None): r"""Export a set of fields to a set of HDF5 datasets. @@ -296,30 +291,38 @@ def export_hdf5(self, filename, fields = None): output.create_dataset(field,data=self[field]) output.close() - def export_fits(self, filename, fields=None, overwrite=False, - other_keys=None, units="cm", **kwargs): - r"""Export a set of pixelized fields to a FITS file. + def to_fits_data(self, fields=None, other_keys=None, length_unit=None, + **kwargs): + r"""Export the fields in this FixedResolutionBuffer instance + to a FITSImageData instance. This will export a set of FITS images of either the fields specified or all the fields already in the object. Parameters ---------- - filename : string - The name of the FITS file to be written. fields : list of strings - These fields will be pixelized and output. If "None", the keys of the - FRB will be used. - overwrite : boolean - If the file exists, this governs whether we will overwrite. + These fields will be pixelized and output. If "None", the keys of + the FRB will be used. other_keys : dictionary, optional A set of header keys and values to write into the FITS header. - units : string, optional - the length units that the coordinates are written in, default 'cm'. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. """ - from yt.visualization.fits_image import FITSImageData + if length_unit is None: + length_unit = self.ds.length_unit + + if "units" in kwargs: + issue_deprecation_warning("The 'units' keyword argument has been " + "replaced by the 'length_unit' keyword " + "argument and the former has been " + "deprecated. Setting 'length_unit' " + "to 'units'.") + length_unit = kwargs.pop("units") + if fields is None: fields = list(self.data.keys()) else: @@ -328,15 +331,46 @@ def export_fits(self, filename, fields=None, overwrite=False, if len(fields) == 0: raise RuntimeError( "No fields to export. Either pass a field or list of fields to " - "export_fits or access a field from the fixed resolution buffer " + "to_fits_data or access a field from the FixedResolutionBuffer " "object." ) - fib = FITSImageData(self, fields=fields, units=units) + fid = FITSImageData(self, fields=fields, length_unit=length_unit) if other_keys is not None: for k,v in other_keys.items(): - fib.update_all_headers(k,v) - fib.writeto(filename, overwrite=overwrite, **kwargs) + fid.update_all_headers(k, v) + return fid + + def export_fits(self, filename, fields=None, overwrite=False, + other_keys=None, length_unit=None, **kwargs): + r"""Export a set of pixelized fields to a FITS file. + + This will export a set of FITS images of either the fields specified + or all the fields already in the object. + + Parameters + ---------- + filename : string + The name of the FITS file to be written. + fields : list of strings + These fields will be pixelized and output. If "None", the keys of the + FRB will be used. + overwrite : boolean + If the file exists, this governs whether we will overwrite. + other_keys : dictionary, optional + A set of header keys and values to write into the FITS header. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. + """ + issue_deprecation_warning("The 'export_fits' method of " + "FixedResolutionBuffer is deprecated. " + "Use the 'to_fits_data' method to create " + "a FITSImageData instance and then " + "use its `writeto` method.") + fid = self.to_fits_data(fields=fields, other_keys=other_keys, + length_unit=length_unit, **kwargs) + fid.writeto(filename, overwrite=overwrite, **kwargs) def export_dataset(self, fields=None, nprocs=1): r"""Export a set of pixelized fields to an in-memory dataset that can be @@ -619,7 +653,7 @@ def __getitem__(self, item): splat_vals) # remove values in no-particle region buff[buff_mask==0] = np.nan - ia = ImageArray(buff, input_units=data.units, + ia = ImageArray(buff, units=data.units, info=self._get_info(item)) # divide by the weight_field, if needed @@ -632,7 +666,7 @@ def __getitem__(self, item): py[mask], weight_data[mask]) weight_array = ImageArray(weight_buff, - input_units=weight_data.units, + units=weight_data.units, info=self._get_info(item)) # remove values in no-particle region weight_buff[weight_buff_mask==0] = np.nan diff --git a/yt/visualization/fixed_resolution_filters.py b/yt/visualization/fixed_resolution_filters.py index 4e5229846f6..801b6f776de 100644 --- a/yt/visualization/fixed_resolution_filters.py +++ b/yt/visualization/fixed_resolution_filters.py @@ -1,18 +1,4 @@ -""" -Fixed resolution buffer filters. - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np -from yt.extern.six import add_metaclass from functools import wraps filter_registry = {} @@ -33,8 +19,7 @@ def __init__(cls, name, b, d): filter_registry[name] = cls -@add_metaclass(RegisteredFilter) -class FixedResolutionBufferFilter(object): +class FixedResolutionBufferFilter(metaclass = RegisteredFilter): """ This object allows to apply data transformation directly to diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index b9cc72d8b7b..c36f3303a3a 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -1,18 +1,3 @@ -""" - - -""" -from __future__ import print_function -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.config import \ @@ -27,8 +12,7 @@ from . import _colormap_data as cmd import yt.utilities.lib.image_utilities as au import yt.utilities.png_writer as pw -from yt.extern.six.moves import builtins -from yt.extern.six import string_types +import builtins def scale_image(image, mi=None, ma=None): @@ -299,7 +283,7 @@ def strip_colormap_data(fn = "color_map_data.py", f.write("color_map_luts = {}\n\n\n") if cmaps is None: cmaps = rcm.ColorMaps - if isinstance(cmaps, string_types): + if isinstance(cmaps, str): cmaps = [cmaps] for cmap_name in sorted(cmaps): vals = rcm._extract_lookup_table(cmap_name) diff --git a/yt/visualization/line_plot.py b/yt/visualization/line_plot.py index 7915fb2cdc8..66576c09758 100644 --- a/yt/visualization/line_plot.py +++ b/yt/visualization/line_plot.py @@ -1,18 +1,3 @@ -""" -A mechanism for plotting field values along a line through a dataset - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from collections import defaultdict diff --git a/yt/visualization/mapserver/pannable_map.py b/yt/visualization/mapserver/pannable_map.py index 9e6b9d8e3ef..7b079215ac2 100644 --- a/yt/visualization/mapserver/pannable_map.py +++ b/yt/visualization/mapserver/pannable_map.py @@ -1,17 +1,3 @@ -""" -A simple leaflet-based pannable map server - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import os import numpy as np diff --git a/yt/visualization/particle_plots.py b/yt/visualization/particle_plots.py index dcec7c56826..20300fb6c00 100644 --- a/yt/visualization/particle_plots.py +++ b/yt/visualization/particle_plots.py @@ -1,18 +1,3 @@ -""" -This is a simple mechanism for interfacing with Particle plots - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.visualization.fixed_resolution import \ @@ -54,6 +39,11 @@ def __init__(self, center, ds, axis, width, fields, LE = center - 0.5*YTArray(width) RE = center + 0.5*YTArray(width) + for ax in range(3): + if not ds.periodicity[ax]: + LE[ax] = max(LE[ax], ds.domain_left_edge[ax]) + RE[ax] = min(RE[ax], ds.domain_right_edge[ax]) + self.dd = ds.region(center, LE, RE, fields, field_parameters=field_parameters, data_source=data_source) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index b85361e049a..1bc96bd8ea0 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -1,23 +1,6 @@ -""" -A base class for "image" plots with colorbars. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +import builtins from yt.funcs import mylog from yt.units import YTQuantity -from yt.extern.six.moves import builtins -from yt.extern.six import \ - iteritems, \ - string_types import base64 import numpy as np @@ -37,8 +20,6 @@ ensure_dir, \ ensure_list, \ issue_deprecation_warning -from yt.units.unit_lookup_table import \ - prefixable_units, latex_prefixes from yt.units.unit_object import \ Unit from yt.utilities.definitions import \ @@ -48,6 +29,10 @@ from yt.visualization.color_maps import \ yt_colormaps +latex_prefixes = { + "u": r"\mu", + } + def invalidate_data(f): @wraps(f) @@ -489,7 +474,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if suffix is None: suffix = get_image_suffix(name) if suffix != '': - for k, v in iteritems(self.plots): + for k, v in self.plots.items(): names.append(v.save(name, mpl_kwargs)) return names if hasattr(self.data_source, 'axis'): @@ -505,7 +490,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): weight = weight[1].replace(' ', '_') if 'Cutting' in self.data_source.__class__.__name__: type = 'OffAxisSlice' - for k, v in iteritems(self.plots): + for k, v in self.plots.items(): if isinstance(k, tuple): k = k[1] if axis: @@ -546,7 +531,7 @@ def show(self): """ interactivity = self.plots[list(self.plots.keys())[0]].interactivity if interactivity: - for k,v in sorted(iteritems(self.plots)): + for k,v in sorted(self.plots.items()): v.show() else: if "__IPYTHON__" in dir(builtins): @@ -662,7 +647,7 @@ def _get_axes_unit_labels(self, unit_x, unit_y): pp = un[0] if pp in latex_prefixes: symbol_wo_prefix = un[1:] - if symbol_wo_prefix in prefixable_units: + if symbol_wo_prefix in self.ds.unit_registry.prefixable_units: un = un.replace( pp, "{"+latex_prefixes[pp]+"}", 1) axes_unit_labels[i] = r'\ \ ('+un+')' @@ -860,7 +845,7 @@ def set_background_color(self, field, color=None): """ if color is None: cmap = self._colormaps[field] - if isinstance(cmap, string_types): + if isinstance(cmap, str): try: cmap = yt_colormaps[cmap] except KeyError: @@ -896,14 +881,13 @@ def set_zlim(self, field, zmin, zmax, dynamic_range=None): zmin = zmax / dynamic_range. """ - - def _sanitize_units(z): + def _sanitize_units(z, _field): # convert dimensionful inputs to float if isinstance(z, tuple): z = self.ds.quan(*z) if isinstance(z, YTQuantity): try: - plot_units = self.frb[field].units + plot_units = self.frb[_field].units z = z.to(plot_units).value except AttributeError: # only certain subclasses have a frb attribute they can rely on for inspecting units @@ -911,22 +895,26 @@ def _sanitize_units(z): z = z.value return z - myzmin = _sanitize_units(zmin) - myzmax = _sanitize_units(zmax) - if zmin == 'min': - myzmin = self.plots[field].image._A.min() - if zmax == 'max': - myzmax = self.plots[field].image._A.max() - if dynamic_range is not None: - if zmax is None: - myzmax = myzmin * dynamic_range - else: - myzmin = myzmax / dynamic_range - - if myzmin > 0.0 and self._field_transform[field] == symlog_transform: - self._field_transform[field] = log_transform - self.plots[field].zmin = myzmin - self.plots[field].zmax = myzmax + if field == 'all': + fields = list(self.plots.keys()) + else: + fields = ensure_list(field) + for field in self.data_source._determine_fields(fields): + myzmin = _sanitize_units(zmin, field) + myzmax = _sanitize_units(zmax, field) + if zmin == 'min': + myzmin = self.plots[field].image._A.min() + if zmax == 'max': + myzmax = self.plots[field].image._A.max() + if dynamic_range is not None: + if zmax is None: + myzmax = myzmin * dynamic_range + else: + myzmin = myzmax / dynamic_range + if myzmin > 0.0 and self._field_transform[field] == symlog_transform: + self._field_transform[field] = log_transform + self.plots[field].zmin = myzmin + self.plots[field].zmax = myzmax return self @invalidate_plot diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index d7f1683b4c7..2145597020f 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -1,18 +1,3 @@ -""" - -Callbacks to add additional functionality on to plots. - - - -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from __future__ import absolute_import import warnings @@ -27,13 +12,6 @@ from yt.data_objects.level_sets.clump_handling import \ Clump -try: - from yt.extensions.astro_analysis.halo_analysis.halo_catalog import \ - HaloCatalog as AAHaloCatalog -except ImportError: - AAHaloCatalog = None -from yt.analysis_modules.halo_analysis.halo_catalog import \ - HaloCatalog from yt.frontends.ytdata.data_structures import \ YTClumpContainer from yt.data_objects.selection_data_containers import YTCutRegion @@ -47,7 +25,6 @@ validate_width_tuple from yt.geometry.geometry_handler import \ is_curvilinear -from yt.extern.six import add_metaclass from yt.units import dimensions from yt.units.yt_array import YTQuantity, YTArray, uhstack from yt.units.unit_object import Unit @@ -56,14 +33,16 @@ from yt.utilities.lib.pixelization_routines import \ pixelize_off_axis_cartesian, \ pixelize_cartesian -from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \ - periodic_ray from yt.utilities.lib.line_integral_convolution import \ line_integral_convolution_2d from yt.geometry.unstructured_mesh_handler import UnstructuredIndex from yt.utilities.lib.mesh_triangulation import triangulate_indices from yt.utilities.exceptions import \ YTDataTypeUnsupported +from yt.utilities.math_utils import \ + periodic_ray +from yt.utilities.on_demand_imports import \ + NotAModule callback_registry = {} @@ -86,8 +65,7 @@ def __init__(cls, name, b, d): callback_registry[name] = cls cls.__call__ = _verify_geometry(cls.__call__) -@add_metaclass(RegisteredCallback) -class PlotCallback(object): +class PlotCallback(metaclass = RegisteredCallback): # _supported_geometries is set by subclasses of PlotCallback to a tuple of # strings corresponding to the names of the geometries that a callback # supports. By default it is None, which means it supports everything. @@ -704,8 +682,8 @@ def __call__(self, plot): block_ids.append(block.id) if len(GLE) == 0: return # Retain both units and registry - GLE = YTArray(GLE, input_units = GLE[0].units) - GRE = YTArray(GRE, input_units = GRE[0].units) + GLE = plot.ds.arr(GLE, units = GLE[0].units) + GRE = plot.ds.arr(GRE, units = GRE[0].units) levels = np.array(levels) min_level = self.min_level or 0 max_level = self.max_level or levels.max() @@ -1535,6 +1513,10 @@ class HaloCatalogCallback(PlotCallback): in a halo catalog with radii corresponding to the virial radius of each halo. + Note, this functionality requires the yt_astro_analysis + package. See https://yt-astro-analysis.readthedocs.io/ + for more information. + Parameters ---------- halo_catalog : Dataset, DataContainer, or ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog @@ -1599,7 +1581,7 @@ class HaloCatalogCallback(PlotCallback): >>> # plot halos from a HaloCatalog >>> import yt - >>> from yt.extensions.astro_analysis.halo_analysis.halo_catalog import HaloCatalog + >>> from yt.extensions.astro_analysis.halo_analysis.api import HaloCatalog >>> dds = yt.load("Enzo_64/DD0043/data0043") >>> hds = yt.load("rockstar_halos/halos_0.0.bin") >>> hc = HaloCatalog(data_ds=dds, halos_ds=hds) @@ -1619,18 +1601,21 @@ def __init__(self, halo_catalog, circle_args=None, circle_kwargs=None, center_field_prefix="particle_position", text_args=None, font_kwargs=None, factor=1.0): + try: + from yt_astro_analysis.halo_analysis.api import \ + HaloCatalog + except ImportError: + HaloCatalog = NotAModule('yt_astro_analysis') + PlotCallback.__init__(self) def_circle_args = {'edgecolor':'white', 'facecolor':'None'} def_text_args = {'color':'white'} - is_hc = (isinstance(halo_catalog, HaloCatalog) or - (AAHaloCatalog and isinstance(halo_catalog, AAHaloCatalog))) - if isinstance(halo_catalog, YTDataContainer): self.halo_data = halo_catalog elif isinstance(halo_catalog, Dataset): self.halo_data = halo_catalog.all_data() - elif is_hc: + elif isinstance(halo_catalog, HaloCatalog): if halo_catalog.data_source.ds == halo_catalog.halos_ds: self.halo_data = halo_catalog.data_source else: @@ -2428,8 +2413,7 @@ class RayCallback(PlotCallback): ray : YTOrthoRay, YTRay, or LightRay Ray is the object that we want to include. We overplot the projected - trajectory of the ray. If the object is a - analysis_modules.cosmological_observation.light_ray.light_ray.LightRay + trajectory of the ray. If the object is a trident.LightRay object, it will only plot the segment of the LightRay that intersects the dataset currently displayed. @@ -2457,7 +2441,7 @@ class RayCallback(PlotCallback): >>> # Overplot a LightRay object on a projection >>> import yt - >>> from yt.analysis_modules.cosmological_observation.api import LightRay + >>> from trident import LightRay >>> ds = yt.load('enzo_cosmology_plus/RD0004/RD0004') >>> lr = LightRay("enzo_cosmology_plus/AMRCosmology.enzo", ... 'Enzo', 0.0, 0.1, time_data=False) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 3dcbd67fd63..af577397a9b 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1,22 +1,6 @@ -""" -A plotting mechanism based on the idea of a "window" into the data. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np import matplotlib import types -import six import sys from collections import defaultdict @@ -39,7 +23,6 @@ from yt.data_objects.image_array import \ ImageArray -from yt.extern.six import string_types from yt.frontends.ytdata.data_structures import \ YTSpatialPlotDataset from yt.funcs import \ @@ -60,8 +43,8 @@ YTPlotCallbackError, \ YTDataTypeUnsupported, \ YTInvalidFieldType, \ - YTUnitNotRecognized, \ - YTUnitConversionError + YTUnitNotRecognized +from unyt.exceptions import UnitConversionError from .geo_plot_utils import get_mpl_transform @@ -111,7 +94,7 @@ def get_axes_unit(width, ds): if ds.no_cgs_equiv_length: return ("code_length",)*2 if iterable(width): - if isinstance(width[1], string_types): + if isinstance(width[1], str): axes_unit = (width[1], width[1]) elif iterable(width[1]): axes_unit = (width[0][1], width[1][1]) @@ -135,7 +118,12 @@ def validate_mesh_fields(data_source, fields): canonical_fields = data_source._determine_fields(fields) invalid_fields = [] for field in canonical_fields: - if data_source.ds.field_info[field].particle_type: + finfo = data_source.ds.field_info[field] + if finfo.sampling_type == "particle": + if not hasattr(data_source.ds, '_sph_ptypes'): + pass + elif finfo.is_sph_field: + continue invalid_fields.append(field) if len(invalid_fields) > 0: @@ -741,12 +729,12 @@ def set_axes_unit(self, unit_name): """ # blind except because it could be in conversion_factors or units if unit_name is not None: - if isinstance(unit_name, string_types): + if isinstance(unit_name, str): unit_name = (unit_name, unit_name) for un in unit_name: try: self.ds.length_unit.in_units(un) - except (YTUnitConversionError, UnitParseError): + except (UnitConversionError, UnitParseError): raise YTUnitNotRecognized(un) self._axes_unit_names = unit_name return self @@ -755,6 +743,28 @@ def set_axes_unit(self, unit_name): def toggle_right_handed(self): self._right_handed = not self._right_handed + def to_fits_data(self, fields=None, other_keys=None, length_unit=None, + **kwargs): + r"""Export the fields in this PlotWindow instance + to a FITSImageData instance. + + This will export a set of FITS images of either the fields specified + or all the fields already in the object. + + Parameters + ---------- + fields : list of strings + These fields will be pixelized and output. If "None", the keys of + the FRB will be used. + other_keys : dictionary, optional + A set of header keys and values to write into the FITS header. + length_unit : string, optional + the length units that the coordinates are written in. The default + is to use the default length unit of the dataset. + """ + return self.frb.to_fits_data(fields=fields, other_keys=other_keys, + length_unit=length_unit, **kwargs) + class PWViewerMPL(PlotWindow): """Viewer using matplotlib as a backend via the WindowPlotMPL. @@ -779,7 +789,7 @@ def _setup_origin(self): xc = None yc = None - if isinstance(origin, string_types): + if isinstance(origin, str): origin = tuple(origin.split('-'))[:3] if 1 == len(origin): origin = ('lower', 'left') + origin @@ -792,8 +802,8 @@ def _setup_origin(self): xc = self.ds.quan(origin[0], 'code_length') yc = self.ds.quan(origin[1], 'code_length') elif 3 == len(origin) and isinstance(origin[0], tuple): - xc = YTQuantity(origin[0][0], origin[0][1]) - yc = YTQuantity(origin[1][0], origin[0][1]) + xc = self.ds.quan(origin[0][0], origin[0][1]) + yc = self.ds.quan(origin[1][0], origin[0][1]) assert origin[-1] in ['window', 'domain', 'native'] @@ -1129,6 +1139,15 @@ def annotate_clear(self, index=None): self.setup_callbacks() return self + def list_annotations(self): + """ + List the current callbacks for the plot, along with their index. This + index can be used with annotate_clear to remove a callback from the + current plot. + """ + for i, cb in enumerate(self._callbacks): + print(i, cb) + def run_callbacks(self): for f in self.fields: keys = self.frb.keys() @@ -1140,11 +1159,10 @@ def run_callbacks(self): try: callback(cbw) except YTDataTypeUnsupported as e: - six.reraise(YTDataTypeUnsupported, e) + raise e except Exception as e: - six.reraise(YTPlotCallbackError, - YTPlotCallbackError(callback._type_name, e), - sys.exc_info()[2]) + new_exc = YTPlotCallbackError(callback._type_name, e) + raise new_exc.with_traceback(sys.exc_info()[2]) for key in self.frb.keys(): if key not in keys: del self.frb[key] @@ -1251,6 +1269,10 @@ class AxisAlignedSlicePlot(PWViewerMPL): data_source: YTSelectionContainer object Object to be used for data selection. Defaults to ds.all_data(), a region covering the full domain + buff_size: length 2 sequence + Size of the buffer to use for the image, i.e. the number of resolution elements + used. Effectively sets a resolution limit to the image if buff_size is + smaller than the finest gridding. Examples -------- @@ -1268,7 +1290,7 @@ class AxisAlignedSlicePlot(PWViewerMPL): def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None, origin='center-window', right_handed=True, fontsize=18, field_parameters=None, - window_size=8.0, aspect=None, data_source=None): + window_size=8.0, aspect=None, data_source=None, buff_size=(800,800)): # this will handle time series data and controllers axis = fix_axis(axis, ds) (bounds, center, display_center) = \ @@ -1294,7 +1316,7 @@ def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None, PWViewerMPL.__init__(self, slc, bounds, origin=origin, fontsize=fontsize, fields=fields, window_size=window_size, aspect=aspect, - right_handed=right_handed) + right_handed=right_handed, buff_size=buff_size) if axes_unit is None: axes_unit = get_axes_unit(width, ds) self.set_axes_unit(axes_unit) @@ -1430,6 +1452,10 @@ class ProjectionPlot(PWViewerMPL): data_source: YTSelectionContainer object Object to be used for data selection. Defaults to ds.all_data(), a region covering the full domain + buff_size: length 2 sequence + Size of the buffer to use for the image, i.e. the number of resolution elements + used. Effectively sets a resolution limit to the image if buff_size is + smaller than the finest gridding. Examples -------- @@ -1448,8 +1474,8 @@ class ProjectionPlot(PWViewerMPL): def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None, weight_field=None, max_level=None, origin='center-window', right_handed=True, fontsize=18, field_parameters=None, data_source=None, - method = "integrate", proj_style = None, window_size=8.0, - aspect=None): + method = "integrate", proj_style = None, window_size=8.0, + buff_size=(800,800), aspect=None): axis = fix_axis(axis, ds) if ds.geometry in ("spherical", "cylindrical", "geographic", "internal_geographic"): mylog.info("Setting origin='native' for %s geometry." % ds.geometry) @@ -1488,7 +1514,7 @@ def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None, max_level=max_level) PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin, right_handed=right_handed, fontsize=fontsize, window_size=window_size, - aspect=aspect) + aspect=aspect, buff_size=buff_size) if axes_unit is None: axes_unit = get_axes_unit(width, ds) self.set_axes_unit(axes_unit) @@ -1563,6 +1589,10 @@ class OffAxisSlicePlot(PWViewerMPL): data_source : YTSelectionContainer Object Object to be used for data selection. Defaults ds.all_data(), a region covering the full domain. + buff_size: length 2 sequence + Size of the buffer to use for the image, i.e. the number of resolution elements + used. Effectively sets a resolution limit to the image if buff_size is + smaller than the finest gridding. """ _plot_type = 'OffAxisSlice' @@ -1570,7 +1600,7 @@ class OffAxisSlicePlot(PWViewerMPL): def __init__(self, ds, normal, fields, center='c', width=None, axes_unit=None, north_vector=None, right_handed=True, fontsize=18, - field_parameters=None, data_source=None): + field_parameters=None, data_source=None, buff_size=(800,800)): (bounds, center_rot) = get_oblique_window_parameters(normal,center,width,ds) if field_parameters is None: field_parameters = {} @@ -1589,7 +1619,8 @@ def __init__(self, ds, normal, fields, center='c', width=None, # aren't well-defined for off-axis data objects PWViewerMPL.__init__(self, cutting, bounds, fields=fields, origin='center-window',periodic=False, - right_handed=right_handed, oblique=True, fontsize=fontsize) + right_handed=right_handed, oblique=True, + fontsize=fontsize, buff_size=buff_size) if axes_unit is None: axes_unit = get_axes_unit(width, ds) self.set_axes_unit(axes_unit) @@ -1717,6 +1748,10 @@ class OffAxisProjectionPlot(PWViewerMPL): data_source: YTSelectionContainer object Object to be used for data selection. Defaults to ds.all_data(), a region covering the full domain + buff_size: length 2 sequence + Size of the buffer to use for the image, i.e. the number of resolution elements + used. Effectively sets a resolution limit to the image if buff_size is + smaller than the finest gridding. """ _plot_type = 'OffAxisProjection' _frb_generator = OffAxisProjectionFixedResolutionBuffer @@ -1726,7 +1761,7 @@ def __init__(self, ds, normal, fields, center='c', width=None, max_level=None, north_vector=None, right_handed=True, volume=None, no_ghost=False, le=None, re=None, interpolated=False, fontsize=18, method="integrate", - data_source=None): + data_source=None, buff_size=(800,800)): (bounds, center_rot) = \ get_oblique_window_parameters(normal,center,width,ds,depth=depth) fields = ensure_list(fields)[:] @@ -1754,7 +1789,7 @@ def __init__(self, ds, normal, fields, center='c', width=None, PWViewerMPL.__init__( self, OffAxisProj, bounds, fields=fields, origin='center-window', periodic=False, oblique=True, right_handed=right_handed, - fontsize=fontsize) + fontsize=fontsize, buff_size=buff_size) if axes_unit is None: axes_unit = get_axes_unit(width, ds) self.set_axes_unit(axes_unit) @@ -1969,7 +2004,7 @@ def SlicePlot(ds, normal=None, fields=None, axis=None, *args, **kwargs): # use an AxisAlignedSlicePlot where possible, e.g.: # maybe someone passed normal=[0,0,0.2] when they should have just used "z" - if iterable(normal) and not isinstance(normal, string_types): + if iterable(normal) and not isinstance(normal, str): if np.count_nonzero(normal) == 1: normal = ("x","y","z")[np.nonzero(normal)[0][0]] else: @@ -1977,7 +2012,7 @@ def SlicePlot(ds, normal=None, fields=None, axis=None, *args, **kwargs): np.divide(normal, np.dot(normal,normal), normal) # by now the normal should be properly set to get either a On/Off Axis plot - if iterable(normal) and not isinstance(normal, string_types): + if iterable(normal) and not isinstance(normal, str): # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs if 'origin' in kwargs: msg = "Ignoring 'origin' keyword as it is ill-defined for " \ @@ -2103,9 +2138,9 @@ def plot_2d(ds, fields, center='c', width=None, axes_unit=None, raise NotImplementedError("plot_2d does not yet support datasets with {} geometries".format(ds.geometry)) # Part of the convenience of plot_2d is to eliminate the use of the # superfluous coordinate, so we do that also with the center argument - if not isinstance(center, string_types) and obj_length(center) == 2: - c0_string = isinstance(center[0], string_types) - c1_string = isinstance(center[1], string_types) + if not isinstance(center, str) and obj_length(center) == 2: + c0_string = isinstance(center[0], str) + c1_string = isinstance(center[1], str) if not c0_string and not c1_string: if obj_length(center[0]) == 2 and c1_string: center = ds.arr(center[0], center[1]) diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 30d1e4d12c4..16639ece4e4 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -1,22 +1,4 @@ -""" -This is a simple mechanism for interfacing with Profile and Phase plots - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six.moves import builtins -from yt.extern.six.moves import zip as izip -from yt.extern.six import string_types, iteritems +import builtins from collections import OrderedDict from distutils.version import LooseVersion import base64 @@ -120,7 +102,7 @@ def sanitize_label(label, nprofiles): raise RuntimeError("Number of labels must match number of profiles") for l in label: - if l is not None and not isinstance(l, string_types): + if l is not None and not isinstance(l, str): raise RuntimeError("All labels must be None or a string") return label @@ -283,9 +265,9 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): self._setup_plots() unique = set(self.plots.values()) if len(unique) < len(self.plots): - iters = izip(range(len(unique)), sorted(unique)) + iters = zip(range(len(unique)), sorted(unique)) else: - iters = iteritems(self.plots) + iters = self.plots.items() if not suffix: suffix = "png" suffix = ".%s" % suffix @@ -353,9 +335,9 @@ def _repr_html_(self): ret = '' unique = set(self.plots.values()) if len(unique) < len(self.plots): - iters = izip(range(len(unique)), sorted(unique)) + iters = zip(range(len(unique)), sorted(unique)) else: - iters = iteritems(self.plots) + iters = self.plots.items() for uid, plot in iters: with matplotlib_style_context(): img = plot._repr_png_() @@ -1263,7 +1245,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if suffix is None: suffix = get_image_suffix(name) if suffix != '': - for k, v in iteritems(self.plots): + for k, v in self.plots.items(): names.append(v.save(name, mpl_kwargs)) return names else: diff --git a/yt/visualization/streamlines.py b/yt/visualization/streamlines.py index 05b60b3ed59..acd6872a36c 100644 --- a/yt/visualization/streamlines.py +++ b/yt/visualization/streamlines.py @@ -1,18 +1,3 @@ -""" -Import the components of the volume rendering extension - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from yt.data_objects.construction_data_containers import YTStreamline from yt.funcs import get_pbar @@ -129,9 +114,9 @@ def __init__(self, ds, positions, xfield='velocity_x', yfield='velocity_x', if length is None: length = np.max(self.ds.domain_right_edge-self.ds.domain_left_edge) self.length = sanitize_length(length, ds) - self.steps = int(length/dx)+1 + self.steps = int(self.length / self.dx) + 1 # Fix up the dx. - self.dx = 1.0*self.length/self.steps + self.dx = 1.0 * self.length / self.steps self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64') self.magnitudes = None if self.get_magnitude: diff --git a/yt/visualization/tests/test_callbacks.py b/yt/visualization/tests/test_callbacks.py index 2e6bd5a8f2f..26d8441e8a7 100644 --- a/yt/visualization/tests/test_callbacks.py +++ b/yt/visualization/tests/test_callbacks.py @@ -1,18 +1,3 @@ -""" -Tests for callbacks - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import tempfile import shutil from numpy.testing import \ @@ -184,7 +169,7 @@ def test_ray_callback(): ax = 'z' vector = [1.0,1.0,1.0] ds = fake_amr_ds(fields = ("density",)) - ray = ds.ray((0.1, 0.2, 0.3), (1.6, 1.8, 1.5)) + ray = ds.ray((0.1, 0.2, 0.3), (.6, .8, .5)) oray = ds.ortho_ray(0, (0.3, 0.4)) p = ProjectionPlot(ds, ax, "density") p.annotate_ray(oray) @@ -206,7 +191,7 @@ def test_ray_callback(): with _cleanup_fname() as prefix: ds = fake_amr_ds(fields = ("density",), geometry="spherical") - ray = ds.ray((0.1, 0.2, 0.3), (1.6, 1.8, 1.5)) + ray = ds.ray((0.1, 0.2, 0.3), (0.6, 0.8, 0.5)) oray = ds.ortho_ray(0, (0.3, 0.4)) p = ProjectionPlot(ds, "r", "density") p.annotate_ray(oray) diff --git a/yt/visualization/tests/test_color_maps.py b/yt/visualization/tests/test_color_maps.py index f6ecaf31daf..c83bd0987ff 100644 --- a/yt/visualization/tests/test_color_maps.py +++ b/yt/visualization/tests/test_color_maps.py @@ -1,13 +1,3 @@ -""" -Tests for visualization.color_maps -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2018, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import os import shutil import tempfile diff --git a/yt/visualization/tests/test_export_frb.py b/yt/visualization/tests/test_export_frb.py index c506e945736..61c2ab68768 100644 --- a/yt/visualization/tests/test_export_frb.py +++ b/yt/visualization/tests/test_export_frb.py @@ -1,18 +1,3 @@ -""" -Tests for exporting an FRB as a dataset - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np from yt.testing import \ fake_random_ds, assert_equal, \ diff --git a/yt/visualization/tests/test_filters.py b/yt/visualization/tests/test_filters.py index d3ccde66cf9..8ee04734e83 100644 --- a/yt/visualization/tests/test_filters.py +++ b/yt/visualization/tests/test_filters.py @@ -1,9 +1,3 @@ -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -#----------------------------------------------------------------------------- """ Tests for frb filters diff --git a/yt/visualization/tests/test_fits_image.py b/yt/visualization/tests/test_fits_image.py index 9943aad4f55..487306d0363 100644 --- a/yt/visualization/tests/test_fits_image.py +++ b/yt/visualization/tests/test_fits_image.py @@ -1,25 +1,12 @@ -""" -Unit test FITS image creation in yt. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import tempfile import os import shutil from yt.testing import fake_random_ds, requires_module from yt.convenience import load from numpy.testing import \ - assert_equal + assert_equal, \ + assert_allclose +from yt.utilities.on_demand_imports import _astropy from yt.visualization.fits_image import \ FITSImageData, FITSProjection, \ FITSSlice, FITSOffAxisSlice, \ @@ -43,9 +30,10 @@ def test_fits_image(): prj = ds.proj("density", 2) prj_frb = prj.to_frb((0.5, "unitary"), 128) - fid1 = FITSImageData(prj_frb, fields=["density","temperature"], units="cm") - fits_prj = FITSProjection(ds, "z", [ds.fields.gas.density,"temperature"], image_res=128, - width=(0.5,"unitary")) + fid1 = prj_frb.to_fits_data(fields=[("gas", "density"), ("gas", "temperature")], + length_unit="cm") + fits_prj = FITSProjection(ds, "z", [ds.fields.gas.density,"temperature"], + image_res=128, width=(0.5, "unitary")) assert_equal(fid1["density"].data, fits_prj["density"].data) assert_equal(fid1["temperature"].data, fits_prj["temperature"].data) @@ -55,12 +43,18 @@ def test_fits_image(): assert_equal(fid1["density"].data, new_fid1["density"].data) assert_equal(fid1["temperature"].data, new_fid1["temperature"].data) + assert_equal(fid1.length_unit, new_fid1.length_unit) + assert_equal(fid1.time_unit, new_fid1.time_unit) + assert_equal(fid1.mass_unit, new_fid1.mass_unit) + assert_equal(fid1.velocity_unit, new_fid1.velocity_unit) + assert_equal(fid1.magnetic_unit, new_fid1.magnetic_unit) + assert_equal(fid1.current_time, new_fid1.current_time) ds2 = load("fid1.fits") ds2.index - assert ("fits","density") in ds2.field_list - assert ("fits","temperature") in ds2.field_list + assert ("fits", "density") in ds2.field_list + assert ("fits", "temperature") in ds2.field_list dw_cm = ds2.domain_width.in_units("cm") @@ -70,9 +64,10 @@ def test_fits_image(): slc = ds.slice(2, 0.5) slc_frb = slc.to_frb((0.5, "unitary"), 128) - fid2 = FITSImageData(slc_frb, fields=["density","temperature"], units="cm") - fits_slc = FITSSlice(ds, "z", ["density",("gas","temperature")], image_res=128, - width=(0.5,"unitary")) + fid2 = slc_frb.to_fits_data(fields=[("gas", "density"), ("gas", "temperature")], + length_unit="cm") + fits_slc = FITSSlice(ds, "z", [("gas", "density"), ("gas", "temperature")], + image_res=128, width=(0.5,"unitary")) assert_equal(fid2["density"].data, fits_slc["density"].data) assert_equal(fid2["temperature"].data, fits_slc["temperature"].data) @@ -80,17 +75,23 @@ def test_fits_image(): dens_img = fid2.pop("density") temp_img = fid2.pop("temperature") - # This already has some assertions in it, so we don't need to do anything - # with it other than just make one - FITSImageData.from_images([dens_img, temp_img]) + combined_fid = FITSImageData.from_images([dens_img, temp_img]) + assert_equal(combined_fid.length_unit, dens_img.length_unit) + assert_equal(combined_fid.time_unit, dens_img.time_unit) + assert_equal(combined_fid.mass_unit, dens_img.mass_unit) + assert_equal(combined_fid.velocity_unit, dens_img.velocity_unit) + assert_equal(combined_fid.magnetic_unit, dens_img.magnetic_unit) + assert_equal(combined_fid.current_time, dens_img.current_time) cut = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6]) cut_frb = cut.to_frb((0.5, "unitary"), 128) - fid3 = FITSImageData(cut_frb, fields=[("gas","density"), ds.fields.gas.temperature], units="cm") - fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"], + fid3 = cut_frb.to_fits_data(fields=[("gas", "density"), + ds.fields.gas.temperature], + length_unit="cm") + fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density", "temperature"], image_res=128, center=[0.5, 0.42, 0.6], - width=(0.5,"unitary")) + width=(0.5, "unitary")) assert_equal(fid3["density"].data, fits_cut["density"].data) assert_equal(fid3["temperature"].data, fits_cut["temperature"].data) @@ -125,7 +126,7 @@ def test_fits_image(): cvg = ds.covering_grid(ds.index.max_level, [0.25, 0.25, 0.25], [32, 32, 32], fields=["density", "temperature"]) - fid5 = FITSImageData(cvg, fields=["density", "temperature"]) + fid5 = cvg.to_fits_data(fields=["density", "temperature"]) assert fid5.dimensionality == 3 fid5.update_header("density", "time", 0.1) @@ -135,5 +136,26 @@ def test_fits_image(): assert fid5["temperature"].header["units"] == "cgs" assert fid5["density"].header["units"] == "cgs" + fid6 = FITSImageData.from_images(fid5) + + fid5.change_image_name("density", "mass_per_volume") + assert fid5["mass_per_volume"].name == "mass_per_volume" + assert fid5["mass_per_volume"].header["BTYPE"] == "mass_per_volume" + assert "mass_per_volume" in fid5.fields + assert "mass_per_volume" in fid5.field_units + assert "density" not in fid5.fields + assert "density" not in fid5.field_units + + assert "density" in fid6.fields + assert_equal(fid6["density"].data, fid5["mass_per_volume"].data) + + fid7 = FITSImageData.from_images(fid4) + fid7.convolve("density", (3.0, "cm")) + + sigma = 3.0/fid7.wcs.wcs.cdelt[0] + kernel = _astropy.conv.Gaussian2DKernel(x_stddev=sigma) + data_conv = _astropy.conv.convolve(fid4["density"].data.d, kernel) + assert_allclose(data_conv, fid7["density"].data.d) + os.chdir(curdir) shutil.rmtree(tmpdir) diff --git a/yt/visualization/tests/test_geo_projections.py b/yt/visualization/tests/test_geo_projections.py index a4914bc9a13..f1808ebf59b 100644 --- a/yt/visualization/tests/test_geo_projections.py +++ b/yt/visualization/tests/test_geo_projections.py @@ -1,15 +1,3 @@ -""" -Tests for making unstructured mesh slices - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from nose.plugins.attrib import attr import yt diff --git a/yt/visualization/tests/test_image_writer.py b/yt/visualization/tests/test_image_writer.py index 3b0b9674078..3fa6b6c2849 100644 --- a/yt/visualization/tests/test_image_writer.py +++ b/yt/visualization/tests/test_image_writer.py @@ -1,13 +1,3 @@ -""" -Tests for visualization.image_writer -""" -# ----------------------------------------------------------------------------- -# Copyright (c) 2018, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- import os import shutil import tempfile diff --git a/yt/visualization/tests/test_line_plots.py b/yt/visualization/tests/test_line_plots.py index cd4f40086d3..df719566f3c 100644 --- a/yt/visualization/tests/test_line_plots.py +++ b/yt/visualization/tests/test_line_plots.py @@ -1,15 +1,3 @@ -""" -Tests for making line plots - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from nose.plugins.attrib import attr from nose.tools import assert_raises diff --git a/yt/visualization/tests/test_mesh_slices.py b/yt/visualization/tests/test_mesh_slices.py index 240df1b70f5..9c96b61c6be 100644 --- a/yt/visualization/tests/test_mesh_slices.py +++ b/yt/visualization/tests/test_mesh_slices.py @@ -1,15 +1,3 @@ -""" -Tests for making unstructured mesh slices - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import numpy as np from nose.plugins.attrib import attr diff --git a/yt/visualization/tests/test_offaxisprojection.py b/yt/visualization/tests/test_offaxisprojection.py index 03337f60202..59ee958b0e8 100644 --- a/yt/visualization/tests/test_offaxisprojection.py +++ b/yt/visualization/tests/test_offaxisprojection.py @@ -1,17 +1,3 @@ -""" -Test for off_axis_projection and write_projection - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- import os import shutil import tempfile diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index 01b1230bc96..872b1bb9b90 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -1,17 +1,3 @@ -""" -Test suite for Particle Plots - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import os import tempfile import shutil diff --git a/yt/visualization/tests/test_plotwindow.py b/yt/visualization/tests/test_plotwindow.py index a08a5d3f023..c237c9c3b29 100644 --- a/yt/visualization/tests/test_plotwindow.py +++ b/yt/visualization/tests/test_plotwindow.py @@ -1,17 +1,5 @@ -""" -Testsuite for PlotWindow class - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +#!/usr/bin/env python +# -*- coding: utf-8 -*- import matplotlib import numpy as np import os @@ -138,6 +126,8 @@ def setup(): 'mip' ) +BUFF_SIZES = [(800, 800), (1600, 1600), (1254, 1254), (800, 600)] + def simple_contour(test_obj, plot): plot.annotate_contour(test_obj.plot_field) @@ -336,6 +326,15 @@ def test_projection_plot_m(self): proj = ProjectionPlot(test_ds, 0, 'density', method=method) proj.save() + def test_projection_plot_bs(self): + test_ds = fake_random_ds(16) + for bf in BUFF_SIZES: + proj = ProjectionPlot(test_ds, 0, ('gas', 'density'), buff_size=bf) + image = proj.frb['gas', 'density'] + + # note that image.shape is inverted relative to the passed in buff_size + assert_equal(image.shape[::-1], bf) + def test_offaxis_slice_plot(self): test_ds = fake_random_ds(16) slc = OffAxisSlicePlot(test_ds, [1, 1, 1], "density") @@ -476,7 +475,7 @@ def test_set_unit(): slc.set_unit('temperature', 'degF') - assert str(slc.frb['gas', 'temperature'].units) == 'degF' + assert str(slc.frb['gas', 'temperature'].units) == '°F' assert_array_almost_equal(np.array(slc.frb['gas', 'temperature']), np.array(orig_array)*1.8 - 459.67) @@ -484,7 +483,7 @@ def test_set_unit(): # new unit slc.set_buff_size(1000) - assert str(slc.frb['gas', 'temperature'].units) == 'degF' + assert str(slc.frb['gas', 'temperature'].units) == '°F' slc.set_buff_size(800) diff --git a/yt/visualization/tests/test_profile_plots.py b/yt/visualization/tests/test_profile_plots.py index 67f132ddfae..9f68ad932f4 100644 --- a/yt/visualization/tests/test_profile_plots.py +++ b/yt/visualization/tests/test_profile_plots.py @@ -1,17 +1,3 @@ -""" -Testsuite for ProfilePlot and PhasePlot - - - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- import os import tempfile import shutil diff --git a/yt/visualization/tests/test_raw_field_slices.py b/yt/visualization/tests/test_raw_field_slices.py index 71561a495e1..891d416433d 100644 --- a/yt/visualization/tests/test_raw_field_slices.py +++ b/yt/visualization/tests/test_raw_field_slices.py @@ -1,16 +1,3 @@ -""" -Tests for making slices through raw fields - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2017, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import yt from yt.utilities.answer_testing.framework import \ requires_ds, \ diff --git a/yt/visualization/tests/test_splat.py b/yt/visualization/tests/test_splat.py index 0d499c9b5be..131ba2e25cd 100644 --- a/yt/visualization/tests/test_splat.py +++ b/yt/visualization/tests/test_splat.py @@ -1,17 +1,3 @@ -""" -Test for write_bitmap and add_rgba_points - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import os import os.path import tempfile @@ -30,7 +16,6 @@ def setup(): def test_splat(): - """Tests functionality of off_axis_projection and write_projection.""" # Perform I/O in safe place instead of yt main dir tmpdir = tempfile.mkdtemp() curdir = os.getcwd() diff --git a/yt/visualization/volume_rendering/UBVRI.py b/yt/visualization/volume_rendering/UBVRI.py index cacc7c8f799..abdeab49f74 100644 --- a/yt/visualization/volume_rendering/UBVRI.py +++ b/yt/visualization/volume_rendering/UBVRI.py @@ -1,19 +1,3 @@ -""" -Johnson Filters - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np diff --git a/yt/visualization/volume_rendering/__init__.py b/yt/visualization/volume_rendering/__init__.py index b5bd4a23214..ee2b9899dde 100644 --- a/yt/visualization/volume_rendering/__init__.py +++ b/yt/visualization/volume_rendering/__init__.py @@ -5,10 +5,3 @@ """ -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- diff --git a/yt/visualization/volume_rendering/_cuda_caster.cu b/yt/visualization/volume_rendering/_cuda_caster.cu index c8dbdad1b6f..e5c60e4f04e 100644 --- a/yt/visualization/volume_rendering/_cuda_caster.cu +++ b/yt/visualization/volume_rendering/_cuda_caster.cu @@ -1,11 +1,3 @@ -/******************************************************************************* -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -*******************************************************************************/ - // An attempt at putting the ray-casting operation into CUDA //extern __shared__ float array[]; diff --git a/yt/visualization/volume_rendering/api.py b/yt/visualization/volume_rendering/api.py index 8377b8ff957..21f9f4ee19a 100644 --- a/yt/visualization/volume_rendering/api.py +++ b/yt/visualization/volume_rendering/api.py @@ -1,18 +1,3 @@ -""" -API for yt.visualization.volume_rendering - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .transfer_functions import TransferFunction, ColorTransferFunction, \ PlanckTransferFunction, \ MultiVariateTransferFunction, \ diff --git a/yt/visualization/volume_rendering/camera.py b/yt/visualization/volume_rendering/camera.py index 36cca04dfe7..adfac3cbbe9 100644 --- a/yt/visualization/volume_rendering/camera.py +++ b/yt/visualization/volume_rendering/camera.py @@ -1,23 +1,9 @@ -""" -Volume Rendering Camera Class - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - from yt.funcs import iterable, ensure_numpy_array from yt.utilities.orientation import Orientation from yt.units.yt_array import \ YTArray, \ YTQuantity from yt.utilities.math_utils import get_rotation_matrix -from yt.extern.six import string_types from .utils import data_source_or_all from .lens import \ lenses, \ @@ -33,12 +19,12 @@ def _sanitize_camera_property_units(value, scene): elif isinstance(value, YTArray) and len(value) == 3: return scene.arr(value).in_units('unitary') elif (len(value) == 2 and isinstance(value[0], numeric_type) - and isinstance(value[1], string_types)): + and isinstance(value[1], str)): return scene.arr([scene.arr(value[0], value[1]).in_units('unitary')]*3) if len(value) == 3: if all([iterable(v) for v in value]): if all([isinstance(v[0], numeric_type) and - isinstance(v[1], string_types) for v in value]): + isinstance(v[1], str) for v in value]): return scene.arr( [scene.arr(v[0], v[1]) for v in value]) else: @@ -325,18 +311,18 @@ def set_defaults_from_data_source(self, data_source): (zma - zmi) ** 2) focus = data_source.get_field_parameter('center') - if iterable(width) and len(width) > 1 and isinstance(width[1], string_types): - width = data_source.ds.quan(width[0], input_units=width[1]) + if iterable(width) and len(width) > 1 and isinstance(width[1], str): + width = data_source.ds.quan(width[0], units=width[1]) # Now convert back to code length for subsequent manipulation width = width.in_units("code_length") # .value if not iterable(width): width = data_source.ds.arr([width, width, width], - input_units='code_length') + units='code_length') # left/right, top/bottom, front/back if not isinstance(width, YTArray): - width = data_source.ds.arr(width, input_units="code_length") + width = data_source.ds.arr(width, units="code_length") if not isinstance(focus, YTArray): - focus = data_source.ds.arr(focus, input_units="code_length") + focus = data_source.ds.arr(focus, units="code_length") # We can't use the property setters yet, since they rely on attributes # that will not be set up until the base class initializer is called. diff --git a/yt/visualization/volume_rendering/camera_path.py b/yt/visualization/volume_rendering/camera_path.py index 5ec0f6de59a..ab816fb762a 100644 --- a/yt/visualization/volume_rendering/camera_path.py +++ b/yt/visualization/volume_rendering/camera_path.py @@ -1,19 +1,3 @@ -""" -Create smooth camera paths from keyframes. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import random import numpy as np from yt.visualization.volume_rendering.create_spline import create_spline @@ -250,7 +234,7 @@ def get_shortest_path(self): current = next self.current_score = next_score if self.current_score > self.best_score: - #print num_eval, self.current_score, self.best_score, current + #print(num_eval, self.current_score, self.best_score, current) self.best_score = self.current_score self.best = current break diff --git a/yt/visualization/volume_rendering/create_spline.py b/yt/visualization/volume_rendering/create_spline.py index 78efeabf114..7f7315892f4 100644 --- a/yt/visualization/volume_rendering/create_spline.py +++ b/yt/visualization/volume_rendering/create_spline.py @@ -1,19 +1,3 @@ -""" -Create a Catmull-Rom spline. - - - -""" -from __future__ import print_function - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import sys diff --git a/yt/visualization/volume_rendering/glfw_inputhook.py b/yt/visualization/volume_rendering/glfw_inputhook.py index ff123bdd006..9ec88743ff4 100644 --- a/yt/visualization/volume_rendering/glfw_inputhook.py +++ b/yt/visualization/volume_rendering/glfw_inputhook.py @@ -70,11 +70,11 @@ def inputhook_glfw(): used_time = glfw.GetTime() - t if used_time > 10.0: - # print 'Sleep for 1 s' # dbg + # print('Sleep for 1 s' # dbg) time.sleep(1.0) elif used_time > 0.1: # Few GUI events coming in, so we can sleep longer - # print 'Sleep for 0.05 s' # dbg + # print('Sleep for 0.05 s' # dbg) time.sleep(0.05) else: # Many GUI events coming in, so sleep only very little diff --git a/yt/visualization/volume_rendering/image_handling.py b/yt/visualization/volume_rendering/image_handling.py index 33c17c09ee4..c9e9a5b7166 100644 --- a/yt/visualization/volume_rendering/image_handling.py +++ b/yt/visualization/volume_rendering/image_handling.py @@ -1,17 +1,3 @@ -""" -Export/Import of volume rendered images. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np diff --git a/yt/visualization/volume_rendering/input_events.py b/yt/visualization/volume_rendering/input_events.py index c2839821818..65e073d5624 100644 --- a/yt/visualization/volume_rendering/input_events.py +++ b/yt/visualization/volume_rendering/input_events.py @@ -1,17 +1,3 @@ -# encoding: utf-8 -""" -Input event handlers for Interactive Data Visualization - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - # This is a part of the experimental Interactive Data Visualization from collections import defaultdict, namedtuple diff --git a/yt/visualization/volume_rendering/interactive_loop.py b/yt/visualization/volume_rendering/interactive_loop.py index f80634e9e14..9f9b1b0f0a6 100644 --- a/yt/visualization/volume_rendering/interactive_loop.py +++ b/yt/visualization/volume_rendering/interactive_loop.py @@ -1,17 +1,3 @@ -# encoding: utf-8 -""" -Event loop for Interactive Data Visualization - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - # This is a part of the experimental Interactive Data Visualization import os diff --git a/yt/visualization/volume_rendering/interactive_vr.py b/yt/visualization/volume_rendering/interactive_vr.py index cef7a69a845..fa0487776e6 100644 --- a/yt/visualization/volume_rendering/interactive_vr.py +++ b/yt/visualization/volume_rendering/interactive_vr.py @@ -1,17 +1,3 @@ -# encoding: utf-8 -""" -Interactive Data Visualization classes for Scene, Camera and BlockCollection - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - # This is a part of the experimental Interactive Data Visualization import OpenGL.GL as GL diff --git a/yt/visualization/volume_rendering/interactive_vr_helpers.py b/yt/visualization/volume_rendering/interactive_vr_helpers.py index 38a243e4e9d..f2985d46207 100644 --- a/yt/visualization/volume_rendering/interactive_vr_helpers.py +++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py @@ -1,16 +1,3 @@ -# encoding: utf-8 -""" -Helper routines for Interactive Data Visualization -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - # This part of the experimental Interactive Data Visualization import numpy as np diff --git a/yt/visualization/volume_rendering/lens.py b/yt/visualization/volume_rendering/lens.py index 22c11678d42..00ad1d2b2a8 100644 --- a/yt/visualization/volume_rendering/lens.py +++ b/yt/visualization/volume_rendering/lens.py @@ -1,18 +1,3 @@ -""" -Lens Classes - - - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - from __future__ import division from yt.funcs import mylog from yt.utilities.parallel_tools.parallel_analysis_interface import \ diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index bd876574dd5..8327579f04c 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -1,16 +1,3 @@ -""" -Volume rendering - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - from .scene import Scene from .render_source import VolumeSource @@ -19,7 +6,11 @@ from yt.funcs import mylog, iterable from yt.utilities.lib.partitioned_grid import \ PartitionedGrid +from yt.units.unit_object import Unit from yt.data_objects.api import ImageArray +from yt.utilities.lib.pixelization_routines import \ + off_axis_projection_SPH, \ + normalization_2d_utility import numpy as np @@ -104,20 +95,173 @@ def off_axis_projection(data_source, center, normal_vector, >>> write_image(np.log10(image), "offaxis.png") """ - if method not in ['integrate','sum']: raise NotImplementedError("Only 'integrate' or 'sum' methods are valid for off-axis-projections") if interpolated: raise NotImplementedError("Only interpolated=False methods are currently implemented for off-axis-projections") - data_source = data_source_or_all(data_source) + + item = data_source._determine_fields([item])[0] + + # Assure vectors are numpy arrays as expected by cython code + normal_vector = np.array(normal_vector, dtype='float64') + if north_vector is not None: + north_vector = np.array(north_vector, dtype='float64') + # Add the normal as a field parameter to the data source + # so line of sight fields can use it + data_source.set_field_parameter("axis", normal_vector) + # Sanitize units if not hasattr(center, "units"): center = data_source.ds.arr(center, 'code_length') if not hasattr(width, "units"): width = data_source.ds.arr(width, 'code_length') + + if hasattr(data_source.ds, '_sph_ptypes'): + if method != 'integrate': + raise NotImplementedError("SPH Only allows 'integrate' method") + + sph_ptypes = data_source.ds._sph_ptypes + fi = data_source.ds.field_info[item] + + raise_error = False + + ptype = sph_ptypes[0] + ppos = ["particle_position_%s" % ax for ax in "xyz"] + # Assure that the field we're trying to off-axis project + # has a field type as the SPH particle type or if the field is an + # alias to an SPH field or is a 'gas' field + if item[0] in data_source.ds.known_filters: + if item[0] not in sph_ptypes: + raise_error = True + else: + ptype = item[0] + ppos = ["x", "y", "z"] + elif fi.alias_field: + if fi.alias_name[0] not in sph_ptypes: + raise_error = True + elif item[0] != 'gas': + ptype = item[0] + else: + if fi.name[0] not in sph_ptypes and fi.name[0] != 'gas': + raise_error = True + + if raise_error: + raise RuntimeError( + "Can only perform off-axis projections for SPH fields, " + "Received '%s'" % (item,) + ) + + normal = np.array(normal_vector) + normal = normal / np.linalg.norm(normal) + + # If north_vector is None, we set the default here. + # This is chosen so that if normal_vector is one of the + # cartesian coordinate axes, the projection will match + # the corresponding on-axis projection. + if north_vector is None: + vecs = np.identity(3) + t = np.cross(vecs, normal).sum(axis=1) + ax = t.argmax() + east_vector = np.cross(vecs[ax, :], normal).ravel() + north = np.cross(normal, east_vector).ravel() + else: + north = np.array(north_vector) + north = north / np.linalg.norm(north) + east_vector = np.cross(north, normal).ravel() + + #if weight is None: + buf = np.zeros((resolution[0], resolution[1]), dtype='float64') + + x_min = center[0] - width[0] / 2 + x_max = center[0] + width[0] / 2 + y_min = center[1] - width[1] / 2 + y_max = center[1] + width[1] / 2 + z_min = center[2] - width[2] / 2 + z_max = center[2] + width[2] / 2 + finfo = data_source.ds.field_info[item] + ounits = finfo.output_units + bounds = [x_min, x_max, y_min, y_max, z_min, z_max] + + if weight is None: + for chunk in data_source.chunks([], 'io'): + off_axis_projection_SPH( + chunk[ptype, ppos[0]].to('code_length').d, + chunk[ptype, ppos[1]].to('code_length').d, + chunk[ptype, ppos[2]].to('code_length').d, + chunk[ptype, "mass"].to('code_mass').d, + chunk[ptype, "density"].to('code_density').d, + chunk[ptype, "smoothing_length"].to('code_length').d, + bounds, + center.to('code_length').d, + width.to('code_length').d, + chunk[item].in_units(ounits), + buf, + normal_vector, + north) + + # Assure that the path length unit is in the default length units + # for the dataset by scaling the units of the smoothing length + path_length_unit = data_source.ds._get_field_info((ptype, 'smoothing_length')).units + path_length_unit = Unit(path_length_unit, registry=data_source.ds.unit_registry) + default_path_length_unit = data_source.ds.unit_system['length'] + buf *= data_source.ds.quan(1, path_length_unit).in_units(default_path_length_unit) + item_unit = data_source.ds._get_field_info(item).units + item_unit = Unit(item_unit, registry=data_source.ds.unit_registry) + funits = item_unit * default_path_length_unit + + else: + # if there is a weight field, take two projections: + # one of field*weight, the other of just weight, and divide them + weight_buff = np.zeros((resolution[0], resolution[1]), dtype='float64') + wounits = data_source.ds.field_info[weight].output_units + + for chunk in data_source.chunks([], 'io'): + off_axis_projection_SPH( + chunk[ptype, ppos[0]].to('code_length').d, + chunk[ptype, ppos[1]].to('code_length').d, + chunk[ptype, ppos[2]].to('code_length').d, + chunk[ptype, "mass"].to('code_mass').d, + chunk[ptype, "density"].to('code_density').d, + chunk[ptype, "smoothing_length"].to('code_length').d, + bounds, + center.to('code_length').d, + width.to('code_length').d, + chunk[item].in_units(ounits), + buf, + normal_vector, + north, + weight_field=chunk[weight].in_units(wounits)) + + for chunk in data_source.chunks([], 'io'): + off_axis_projection_SPH( + chunk[ptype, ppos[0]].to('code_length').d, + chunk[ptype, ppos[1]].to('code_length').d, + chunk[ptype, ppos[2]].to('code_length').d, + chunk[ptype, "mass"].to('code_mass').d, + chunk[ptype, "density"].to('code_density').d, + chunk[ptype, "smoothing_length"].to('code_length').d, + bounds, + center.to('code_length').d, + width.to('code_length').d, + chunk[weight].to(wounits), + weight_buff, + normal_vector, + north) + + normalization_2d_utility(buf, weight_buff) + item_unit = data_source.ds._get_field_info(item).units + item_unit = Unit(item_unit, registry=data_source.ds.unit_registry) + funits = item_unit + + myinfo = {'field':item, 'east_vector':east_vector, + 'north_vector':north_vector, 'normal_vector':normal_vector, + 'width':width, 'units':funits, 'type':'SPH smoothed projection'} + + return ImageArray(buf, funits, registry=data_source.ds.unit_registry, info=myinfo) + sc = Scene() data_source.ds.index if item is None: @@ -135,10 +279,11 @@ def off_axis_projection(data_source, center, normal_vector, def _make_wf(f, w): def temp_weightfield(a, b): tr = b[f].astype("float64") * b[w] - return b.apply_units(tr, a.units) + return tr.d return temp_weightfield - data_source.ds.field_info.add_field(weightfield, sampling_type="cell", - function=_make_wf(item, weight)) + data_source.ds.field_info.add_field(weightfield, + sampling_type="cell", + function=_make_wf(item, weight)) # Now we have to tell the dataset to add it and to calculate # its dependencies.. deps, _ = data_source.ds.field_info.check_derived_fields([weightfield]) diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 5d6ce7d86b1..eb92cf4f865 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1,19 +1,4 @@ -""" -Import the components of the volume rendering extension - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -from yt.extern.six.moves import builtins +import builtins import numpy as np from yt.config import \ @@ -173,15 +158,15 @@ def __init__(self, center, normal_vector, width, self.sub_samples = sub_samples self.rotation_vector = north_vector if iterable(width) and len(width) > 1 and isinstance(width[1], str): - width = self.ds.quan(width[0], input_units=width[1]) + width = self.ds.quan(width[0], units=width[1]) # Now convert back to code length for subsequent manipulation width = width.in_units("code_length").value if not iterable(width): width = (width, width, width) # left/right, top/bottom, front/back if not isinstance(width, YTArray): - width = self.ds.arr(width, input_units="code_length") + width = self.ds.arr(width, units="code_length") if not isinstance(center, YTArray): - center = self.ds.arr(center, input_units="code_length") + center = self.ds.arr(center, units="code_length") # Ensure that width and center are in the same units # Cf. https://bitbucket.org/yt_analysis/yt/issue/1080 width.convert_to_units("code_length") @@ -925,13 +910,13 @@ def move_to(self, final, n_steps, final_width=None, exponential=False, clip_rati """ dW = None if not isinstance(final, YTArray): - final = self.ds.arr(final, input_units = "code_length") + final = self.ds.arr(final, units = "code_length") if exponential: if final_width is not None: if not iterable(final_width): final_width = [final_width, final_width, final_width] if not isinstance(final_width, YTArray): - final_width = self.ds.arr(final_width, input_units="code_length") + final_width = self.ds.arr(final_width, units="code_length") # left/right, top/bottom, front/back if (self.center == 0.0).all(): self.center += (final - self.center) / (10. * n_steps) @@ -946,7 +931,7 @@ def move_to(self, final, n_steps, final_width=None, exponential=False, clip_rati if not iterable(final_width): final_width = [final_width, final_width, final_width] if not isinstance(final_width, YTArray): - final_width = self.ds.arr(final_width, input_units="code_length") + final_width = self.ds.arr(final_width, units="code_length") # left/right, top/bottom, front/back dW = (1.0*final_width-self.width)/n_steps else: @@ -1250,7 +1235,7 @@ def get_sampler_args(self, image): positions[:,:,1] = self.center[1] positions[:,:,2] = self.center[2] - positions = self.ds.arr(positions, input_units="code_length") + positions = self.ds.arr(positions, units="code_length") dummy = np.ones(3, dtype='float64') image.shape = (self.resolution[0], self.resolution[1],4) @@ -1297,7 +1282,7 @@ def project_to_plane(self, pos, res=None): for i in range(0, sight_vector.shape[0]): sight_vector_norm = np.sqrt(np.dot(sight_vector[i], sight_vector[i])) sight_vector[i] = sight_vector[i]/sight_vector_norm - sight_vector = self.ds.arr(sight_vector.value, input_units='dimensionless') + sight_vector = self.ds.arr(sight_vector.value, units='dimensionless') sight_center = self.center + self.width[2] * self.orienter.unit_vectors[2] for i in range(0, sight_vector.shape[0]): @@ -2040,8 +2025,8 @@ class StereoSphericalCamera(Camera): def __init__(self, *args, **kwargs): self.disparity = kwargs.pop('disparity', 0.) Camera.__init__(self, *args, **kwargs) - self.disparity = self.ds.arr(self.disparity, input_units="code_length") - self.disparity_s = self.ds.arr(0., input_units="code_length") + self.disparity = self.ds.arr(self.disparity, units="code_length") + self.disparity_s = self.ds.arr(0., units="code_length") if(self.resolution[0]/self.resolution[1] != 2): mylog.info('Warning: It\'s recommended to set the aspect ratio to be 2:1') self.resolution = np.asarray(self.resolution) + 2 diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index b1e5da0714a..de7a6277944 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -1,16 +1,3 @@ -""" -RenderSource Class - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - import numpy as np from functools import wraps from yt.config import \ diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index c7119e541ff..8fd90e14126 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -1,16 +1,3 @@ -""" -The volume rendering Scene class. - -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - import functools import numpy as np @@ -18,7 +5,6 @@ from yt.config import \ ytcfg from yt.funcs import mylog, get_image_suffix -from yt.extern.six import iteritems, itervalues, string_types from yt.units.dimensions import \ length from yt.units.unit_registry import \ @@ -38,7 +24,7 @@ PointSource, \ LineSource from .zbuffer_array import ZBuffer -from yt.extern.six.moves import builtins +import builtins from yt.utilities.exceptions import YTNotInsideNotebook class Scene(object): @@ -100,7 +86,7 @@ def __init__(self): def get_source(self, source_num=0): """Returns the volume rendering source indexed by ``source_num``""" - return list(itervalues(self.sources))[source_num] + return list(self.sources.values())[source_num] def __getitem__(self, item): if item in self.sources: @@ -113,7 +99,7 @@ def opaque_sources(self): Iterate over opaque RenderSource objects, returning a tuple of (key, source) """ - for k, source in iteritems(self.sources): + for k, source in self.sources.items(): if isinstance(source, OpaqueSource) or \ issubclass(OpaqueSource, type(source)): yield k, source @@ -124,7 +110,7 @@ def transparent_sources(self): Iterate over transparent RenderSource objects, returning a tuple of (key, source) """ - for k, source in iteritems(self.sources): + for k, source in self.sources.items(): if not isinstance(source, OpaqueSource): yield k, source @@ -282,13 +268,13 @@ def save(self, fname=None, sigma_clip=None): """ if fname is None: - sources = list(itervalues(self.sources)) + sources = list(self.sources.values()) rensources = [s for s in sources if isinstance(s, RenderSource)] # if a volume source present, use its affiliated ds for fname if len(rensources) > 0: rs = rensources[0] basename = rs.data_source.ds.basename - if isinstance(rs.field, string_types): + if isinstance(rs.field, str): field = rs.field else: field = rs.field[-1] @@ -398,7 +384,7 @@ def save_annotated(self, fname=None, label_fmt=None, """ from yt.visualization._mpl_imports import \ FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS - sources = list(itervalues(self.sources)) + sources = list(self.sources.values()) rensources = [s for s in sources if isinstance(s, RenderSource)] if fname is None: @@ -406,7 +392,7 @@ def save_annotated(self, fname=None, label_fmt=None, if len(rensources) > 0: rs = rensources[0] basename = rs.data_source.ds.basename - if isinstance(rs.field, string_types): + if isinstance(rs.field, str): field = rs.field else: field = rs.field[-1] @@ -499,7 +485,7 @@ def _annotate(self, ax, tf, source, label="", label_fmt=None): def _validate(self): r"""Validate the current state of the scene.""" - for k, source in iteritems(self.sources): + for k, source in self.sources.items(): source._validate() return @@ -840,15 +826,16 @@ def arr(self): """Converts an array into a :class:`yt.units.yt_array.YTArray` The returned YTArray will be dimensionless by default, but can be - cast to arbitrary units using the ``input_units`` keyword argument. + cast to arbitrary units using the ``units`` keyword argument. Parameters ---------- input_array : Iterable A tuple, list, or array to attach units to - input_units : String unit specification, unit symbol object, or astropy - units object + units: String unit specification, unit symbol object, or astropy + units object + input_units : deprecated in favor of 'units' The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). dtype : string or NumPy dtype object @@ -883,15 +870,16 @@ def quan(self): """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity` The returned YTQuantity will be dimensionless by default, but can be - cast to arbitrary units using the ``input_units`` keyword argument. + cast to arbitrary units using the ``units`` keyword argument. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to - input_units : String unit specification, unit symbol object, or astropy - units + units : String unit specification, unit symbol object, or astropy + units + input_units : deprecated in favor of 'units' The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). dtype : string or NumPy dtype object @@ -932,7 +920,7 @@ def _repr_png_(self): def __repr__(self): disp = ":" disp += "\nSources: \n" - for k, v in iteritems(self.sources): + for k, v in self.sources.items(): disp += " %s: %s\n" % (k, v) disp += "Camera: \n" disp += " %s" % self.camera diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index d4d14669c03..2d366ccb46f 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -1,23 +1,8 @@ -# encoding: utf-8 -""" -Shader and ShaderProgram wrapper classes for vertex and fragment shaders used -in Interactive Data Visualization -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2016, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - # This is a part of the experimental Interactive Data Visualization import os import OpenGL.GL as GL import contextlib -from yt.extern.six import add_metaclass from collections import OrderedDict from yt.utilities.exceptions import \ YTInvalidShaderType, \ @@ -144,8 +129,7 @@ def __init__(cls, name, b, d): if getattr(cls, "_shader_name", None) is not None: known_shaders[cls._shader_name] = cls -@add_metaclass(RegisteredShader) -class Shader(object): +class Shader(metaclass = RegisteredShader): ''' Creates a shader from source diff --git a/yt/visualization/volume_rendering/tests/test_camera_attributes.py b/yt/visualization/volume_rendering/tests/test_camera_attributes.py index ce5c2211dd0..3c12e504a29 100644 --- a/yt/visualization/volume_rendering/tests/test_camera_attributes.py +++ b/yt/visualization/volume_rendering/tests/test_camera_attributes.py @@ -1,19 +1,8 @@ -""" -Tests for setting camera and scene attributes -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np import yt.units as u from yt.testing import \ + assert_almost_equal, \ assert_equal, \ fake_random_ds from yt.visualization.volume_rendering.api import \ @@ -74,14 +63,14 @@ def test_scene_and_camera_attributes(): try: # test properties setattr(cam, attribute, attribute_value) - assert_equal(getattr(cam, attribute), expected_result) + assert_almost_equal(getattr(cam, attribute), expected_result) except RuntimeError: assert expected_result is RuntimeError try: # test setters/getters getattr(cam, 'set_%s' % attribute)(attribute_value) - assert_equal(getattr(cam, 'get_%s' % attribute)(), + assert_almost_equal(getattr(cam, 'get_%s' % attribute)(), expected_result) except RuntimeError: assert expected_result is RuntimeError diff --git a/yt/visualization/volume_rendering/tests/test_composite.py b/yt/visualization/volume_rendering/tests/test_composite.py index 29e3118c1a7..2f673565d79 100644 --- a/yt/visualization/volume_rendering/tests/test_composite.py +++ b/yt/visualization/volume_rendering/tests/test_composite.py @@ -1,15 +1,3 @@ -""" -Test for Composite VR. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index 85db802b8b4..35719147247 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -1,15 +1,3 @@ -""" -Test for Volume Rendering Lenses. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_mesh_render.py b/yt/visualization/volume_rendering/tests/test_mesh_render.py index 330f3a4ef4b..e3885b75dc2 100644 --- a/yt/visualization/volume_rendering/tests/test_mesh_render.py +++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py @@ -1,15 +1,3 @@ -""" -Test Surface Mesh Rendering - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- from nose.plugins.attrib import attr from yt.testing import \ fake_tetrahedral_ds, \ diff --git a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py new file mode 100644 index 00000000000..62df69a66b7 --- /dev/null +++ b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py @@ -0,0 +1,328 @@ +from yt.visualization.volume_rendering import \ + off_axis_projection as OffAP +from yt.testing import \ + fake_sph_orientation_ds, \ + assert_almost_equal, \ + requires_module +from yt.utilities.lib.pixelization_routines import \ + pixelize_sph_kernel_projection +import numpy as np +from yt.utilities.on_demand_imports import _scipy + +spatial = _scipy.spatial +ndimage = _scipy.ndimage + +def test_no_rotation(): + """ Determines if a projection processed through + off_axis_projection with no rotation will give the same + image buffer if processed directly through + pixelize_sph_kernel_projection + """ + normal_vector = [0., 0., 1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + ad = ds.all_data() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = (left_edge + right_edge)/2 + width = (right_edge - left_edge) + px = ad["particle_position_x"] + py = ad["particle_position_y"] + hsml = ad["smoothing_length"] + quantity_to_smooth = ad[('gas', 'density')] + density = ad["density"] + mass = ad["particle_mass"] + bounds = [-4, 4, -4, 4, -4, 4] + + buf2 = np.zeros(resolution) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density') + ) + pixelize_sph_kernel_projection(buf2, + px, + py, + hsml, + mass, + density, + quantity_to_smooth, + bounds + ) + assert_almost_equal(buf1.ndarray_view(), buf2) + + +@requires_module('scipy') +def test_basic_rotation_1(): + """ All particles on Z-axis should now be on the negative Y-Axis + fake_sph_orientation has three z-axis particles, so there should be three y-axis particles + after rotation + (0, 0, 1) -> (0, -1) + (0, 0, 2) -> (0, -2) + (0, 0, 3) -> (0, -3) + In addition, we should find a local maxima at (0, 0) due to: + (0, 0, 0) -> (0, 0) + (0, 1, 0) -> (0, 0) + (0, 2, 0) -> (0, 0) + and the one particle on the x-axis should not change its position: + (1, 0, 0) -> (1, 0) + """ + expected_maxima = ([0., 0., 0., 0., 1.], [0., -1., -2., -3., 0.]) + normal_vector = [0., 1., 0.] + north_vector = [0., 0., -1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = (left_edge + right_edge)/2 + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density'), + north_vector=north_vector + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_basic_rotation_2(): + """ Rotation of x-axis onto z-axis. All particles on z-axis should now be on the negative x-Axis + fake_sph_orientation has three z-axis particles, so there should be three x-axis particles + after rotation + (0, 0, 1) -> (-1, 0) + (0, 0, 2) -> (-2, 0) + (0, 0, 3) -> (-3, 0) + In addition, we should find a local maxima at (0, 0) due to: + (0, 0, 0) -> (0, 0) + (1, 0, 0) -> (0, 0) + and the two particles on the y-axis should not change its position: + (0, 1, 0) -> (0, 1) + (0, 2, 0) -> (0, 2) + """ + expected_maxima = ([-1., -2., -3., 0., 0., 0.], + [0., 0., 0., 0., 1., 2.]) + normal_vector = [1., 0., 0.] + north_vector = [0., 1., 0.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = (left_edge + right_edge)/2 + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density'), + north_vector=north_vector + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_basic_rotation_3(): + """ Rotation of z-axis onto negative z-axis. All fake particles on z-axis should now be on + the negative z-Axis. + fake_sph_orientation has three z-axis particles, so we should have a local maxima at (0, 0) + (0, 0, 1) -> (0, 0) + (0, 0, 2) -> (0, 0) + (0, 0, 3) -> (0, 0) + In addition, (0, 0, 0) should also contribute to the local maxima at (0, 0): + (0, 0, 0) -> (0, 0) + x-axis particles should be rotated as such: + (1, 0, 0) -> (0, -1) + and same goes for y-axis particles: + (0, 1, 0) -> (-1, 0) + (0, 2, 0) -> (-2, 0) + """ + expected_maxima = ([0., 0., -1., -2.], [0., -1., 0., 0.]) + normal_vector = [0., 0., -1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = (left_edge + right_edge)/2 + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density') + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_basic_rotation_4(): + """ Rotation of x-axis to z-axis and original z-axis to y-axis with the use + of the north_vector. All fake particles on z-axis should now be on the + y-Axis. All fake particles on the x-axis should now be on the z-axis, and + all fake particles on the y-axis should now be on the x-axis. + + (0, 0, 1) -> (0, 1) + (0, 0, 2) -> (0, 2) + (0, 0, 3) -> (0, 3) + In addition, (0, 0, 0) should contribute to the local maxima at (0, 0): + (0, 0, 0) -> (0, 0) + x-axis particles should be rotated and contribute to the local maxima at (0, 0): + (1, 0, 0) -> (0, 0) + and the y-axis particles shift into the positive x direction: + (0, 1, 0) -> (1, 0) + (0, 2, 0) -> (2, 0) + """ + expected_maxima = ([0., 0., 0., 0., 1., 2.], [1., 2., 3., 0., 0., 0.]) + normal_vector = [1., 0., 0.] + north_vector = [0., 0., 1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = (left_edge + right_edge)/2 + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density'), + north_vector=north_vector + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_center_1(): + """ Change the center to [0, 3, 0] + Every point will be shifted by 3 in the y-domain + With this, we should not be able to see any of the y-axis particles + (0, 1, 0) -> (0, -2) + (0, 2, 0) -> (0, -1) + (0, 0, 1) -> (0, -3) + (0, 0, 2) -> (0, -3) + (0, 0, 3) -> (0, -3) + (0, 0, 0) -> (0, -3) + (1, 0, 0) -> (1, -3) + """ + expected_maxima = ([0., 0., 0., 1.], [-2., -1., -3., -3.]) + normal_vector = [0., 0., 1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + # center = [(left_edge[0] + right_edge[0])/2, + # left_edge[1], + # (left_edge[2] + right_edge[2])/2] + center = [0., 3., 0.] + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density') + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_center_2(): + """ Change the center to [0, -1, 0] + Every point will be shifted by 1 in the y-domain + With this, we should not be able to see any of the y-axis particles + (0, 1, 0) -> (0, 2) + (0, 2, 0) -> (0, 3) + (0, 0, 1) -> (0, 1) + (0, 0, 2) -> (0, 1) + (0, 0, 3) -> (0, 1) + (0, 0, 0) -> (0, 1) + (1, 0, 0) -> (1, 1) + """ + expected_maxima = ([0., 0., 0., 1.], [2., 3., 1., 1.]) + normal_vector = [0., 0., 1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = [0., -1., 0.] + width = (right_edge - left_edge) + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density') + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def test_center_3(): + """ Change the center to the left edge, or [0, -8, 0] + Every point will be shifted by 8 in the y-domain + With this, we should not be able to see anything ! + """ + expected_maxima = ([], []) + normal_vector = [0., 0., 1.] + resolution = (64, 64) + ds = fake_sph_orientation_ds() + left_edge = ds.domain_left_edge + right_edge = ds.domain_right_edge + center = [0., -1., 0.] + width = [(right_edge[0] - left_edge[0]), + left_edge[1], + (right_edge[2] - left_edge[2])] + buf1 = OffAP.off_axis_projection(ds, + center, + normal_vector, + width, + resolution, + ('gas', 'density') + ) + find_compare_maxima(expected_maxima, buf1, resolution, width) + + +@requires_module('scipy') +def find_compare_maxima(expected_maxima, buf, resolution, width): + buf_ndarray = buf.ndarray_view() + max_filter_buf = ndimage.filters.maximum_filter(buf_ndarray, size=5) + maxima = np.isclose(max_filter_buf, buf_ndarray, rtol=1e-09) + + # ignore contributions from zones of no smoothing + for i in range(len(maxima)): + for j in range(len(maxima[i])): + if np.isclose(buf_ndarray[i, j], 0., 1e-09): + maxima[i, j] = False + coords = ([], []) + + for i in range(len(maxima)): + for j in range(len(maxima[i])): + if maxima[i, j]: + coords[0].append(i) + coords[1].append(j) + pixel_tolerance = 2.0 + x_scaling_factor = resolution[0] / width[0] + y_scaling_factor = resolution[1] / width[1] + for i in range(len(expected_maxima[0])): + found_match = False + for j in range(len(coords[0])): + # normalize coordinates + x_coord = coords[0][j] + y_coord = coords[1][j] + x_coord -= resolution[0] / 2 + y_coord -= resolution[1] / 2 + x_coord /= x_scaling_factor + y_coord /= y_scaling_factor + if spatial.distance.euclidean([x_coord, y_coord], [expected_maxima[0][i], + expected_maxima[1][i]]) < pixel_tolerance: + found_match = True + break + if found_match is not True: + assert False + pass diff --git a/yt/visualization/volume_rendering/tests/test_points.py b/yt/visualization/volume_rendering/tests/test_points.py index f12e04208dc..c81744ad2dc 100644 --- a/yt/visualization/volume_rendering/tests/test_points.py +++ b/yt/visualization/volume_rendering/tests/test_points.py @@ -1,15 +1,3 @@ -""" -Test for Composite VR. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_scene.py b/yt/visualization/volume_rendering/tests/test_scene.py index f92bc0b4bff..8228a8c0f7b 100644 --- a/yt/visualization/volume_rendering/tests/test_scene.py +++ b/yt/visualization/volume_rendering/tests/test_scene.py @@ -1,18 +1,3 @@ - -""" -Test for Volume Rendering Scene, and their movement. - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_sigma_clip.py b/yt/visualization/volume_rendering/tests/test_sigma_clip.py index 076461412a0..99cb1b9cb02 100644 --- a/yt/visualization/volume_rendering/tests/test_sigma_clip.py +++ b/yt/visualization/volume_rendering/tests/test_sigma_clip.py @@ -1,16 +1,3 @@ -""" -Test Simple Volume Rendering Scene - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_simple_vr.py b/yt/visualization/volume_rendering/tests/test_simple_vr.py index 24ba0cd9931..319b36f279c 100644 --- a/yt/visualization/volume_rendering/tests/test_simple_vr.py +++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py @@ -1,16 +1,3 @@ -""" -Test Simple Volume Rendering Scene - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index 858665f0191..2d75926a34c 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -1,15 +1,3 @@ -""" -Miscellaneous tests for VR infrastructure -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/tests/test_vr_cameras.py b/yt/visualization/volume_rendering/tests/test_vr_cameras.py index 11639d33383..fc7992cf970 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py +++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py @@ -1,18 +1,3 @@ -""" -Test for Volume Rendering Cameras, and their movement. - - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- import os import os.path import tempfile diff --git a/yt/visualization/volume_rendering/tests/test_vr_orientation.py b/yt/visualization/volume_rendering/tests/test_vr_orientation.py index 391d006e563..c6e923f6437 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py +++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py @@ -1,15 +1,3 @@ -""" -Answer test to verify VR orientation and rotation is correct -""" - -# ----------------------------------------------------------------------------- -# Copyright (c) 2015, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ----------------------------------------------------------------------------- - import numpy as np from nose.plugins.attrib import attr diff --git a/yt/visualization/volume_rendering/tests/test_zbuff.py b/yt/visualization/volume_rendering/tests/test_zbuff.py index 5ccb4c6f3ce..c51cc698348 100644 --- a/yt/visualization/volume_rendering/tests/test_zbuff.py +++ b/yt/visualization/volume_rendering/tests/test_zbuff.py @@ -1,15 +1,3 @@ -""" -Test for Composite VR. -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import os import tempfile import shutil diff --git a/yt/visualization/volume_rendering/transfer_function_helper.py b/yt/visualization/volume_rendering/transfer_function_helper.py index ca9b627060f..9dffc4247ce 100644 --- a/yt/visualization/volume_rendering/transfer_function_helper.py +++ b/yt/visualization/volume_rendering/transfer_function_helper.py @@ -1,19 +1,3 @@ -""" -A helper class to build, display, and modify transfer functions for volume -rendering. - - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import matplotlib import numpy as np @@ -23,7 +7,7 @@ from yt.data_objects.profiles import create_profile from yt.visualization.volume_rendering.transfer_functions import \ ColorTransferFunction -from yt.extern.six import BytesIO +from io import BytesIO class TransferFunctionHelper(object): diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index ee98235eb9c..2a1c80b7742 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -1,19 +1,3 @@ -""" -Simple transfer function editor - - - -""" -from __future__ import absolute_import - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np from matplotlib.cm import get_cmap diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index 3559137646c..b91d1e4a672 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -1,16 +1,3 @@ -""" -Volume rendering - -""" - -# ---------------------------------------------------------------------------- -# Copyright (c) 2014, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -# ---------------------------------------------------------------------------- - from .scene import Scene from .render_source import VolumeSource, \ diff --git a/yt/visualization/volume_rendering/zbuffer_array.py b/yt/visualization/volume_rendering/zbuffer_array.py index bcbd7649ccc..c241380ef0b 100644 --- a/yt/visualization/volume_rendering/zbuffer_array.py +++ b/yt/visualization/volume_rendering/zbuffer_array.py @@ -1,16 +1,3 @@ -""" - - -""" - -#----------------------------------------------------------------------------- -# Copyright (c) 2013, yt Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - import numpy as np