From e0d25c899a555d7e17f683f4a42c06b933287783 Mon Sep 17 00:00:00 2001 From: pymc-devs Date: Thu, 24 Nov 2022 10:24:31 +0100 Subject: [PATCH] Merge Aeppl as logprob submodule --- .github/workflows/tests.yml | 12 + .pre-commit-config.yaml | 1 - conda-envs/environment-dev.yml | 2 +- conda-envs/environment-test.yml | 3 +- conda-envs/windows-environment-dev.yml | 2 +- conda-envs/windows-environment-test.yml | 3 +- docs/source/conf.py | 1 - .../learn/core_notebooks/GLM_linear.ipynb | 20 +- .../core_notebooks/model_comparison.ipynb | 20 +- .../core_notebooks/posterior_predictive.ipynb | 20 +- .../learn/core_notebooks/pymc_aesara.ipynb | 185 +++- .../learn/core_notebooks/pymc_overview.ipynb | 20 +- pymc/__init__.py | 1 + pymc/aesaraf.py | 10 +- pymc/distributions/__init__.py | 2 - pymc/distributions/censored.py | 2 +- pymc/distributions/continuous.py | 10 +- pymc/distributions/discrete.py | 3 + pymc/distributions/dist_math.py | 2 +- pymc/distributions/distribution.py | 31 +- pymc/distributions/logprob.py | 217 +---- pymc/distributions/mixture.py | 6 +- pymc/distributions/multivariate.py | 2 +- pymc/distributions/simulator.py | 2 +- pymc/distributions/timeseries.py | 16 +- pymc/distributions/transforms.py | 25 +- pymc/distributions/truncated.py | 3 +- pymc/initial_point.py | 2 +- pymc/logprob/LICENSE_AEPPL.txt | 21 + pymc/logprob/__init__.py | 52 ++ pymc/logprob/abstract.py | 249 ++++++ pymc/logprob/censoring.py | 285 +++++++ pymc/logprob/cumsum.py | 128 +++ pymc/logprob/joint_logprob.py | 254 ++++++ pymc/logprob/mixture.py | 467 +++++++++++ pymc/logprob/rewriting.py | 353 ++++++++ pymc/logprob/scan.py | 548 ++++++++++++ pymc/logprob/tensor.py | 331 ++++++++ pymc/logprob/transforms.py | 727 ++++++++++++++++ pymc/logprob/utils.py | 254 ++++++ pymc/sampling/jax.py | 2 +- pymc/tests/distributions/test_continuous.py | 24 +- pymc/tests/distributions/test_discrete.py | 32 +- pymc/tests/distributions/test_dist_math.py | 2 +- pymc/tests/distributions/test_distribution.py | 4 +- pymc/tests/distributions/test_logprob.py | 2 +- pymc/tests/distributions/test_mixture.py | 2 +- pymc/tests/distributions/test_multivariate.py | 2 +- pymc/tests/distributions/test_truncated.py | 5 +- pymc/tests/distributions/util.py | 2 +- pymc/tests/logprob/__init__.py | 0 pymc/tests/logprob/test_abstract.py | 149 ++++ pymc/tests/logprob/test_censoring.py | 254 ++++++ pymc/tests/logprob/test_composite_logprob.py | 213 +++++ pymc/tests/logprob/test_cumsum.py | 118 +++ pymc/tests/logprob/test_joint_logprob.py | 311 +++++++ pymc/tests/logprob/test_mixture.py | 792 ++++++++++++++++++ pymc/tests/logprob/test_rewriting.py | 85 ++ pymc/tests/logprob/test_scan.py | 459 ++++++++++ pymc/tests/logprob/test_tensor.py | 306 +++++++ pymc/tests/logprob/test_transforms.py | 741 ++++++++++++++++ pymc/tests/logprob/test_utils.py | 193 +++++ pymc/tests/logprob/utils.py | 174 ++++ pymc/tests/sampling/test_forward.py | 2 +- pymc/tests/test_aesaraf.py | 2 +- pymc/tests/test_model.py | 2 +- requirements-dev.txt | 2 +- requirements.txt | 1 - scripts/run_mypy.py | 6 +- 69 files changed, 7841 insertions(+), 338 deletions(-) create mode 100644 pymc/logprob/LICENSE_AEPPL.txt create mode 100644 pymc/logprob/__init__.py create mode 100644 pymc/logprob/abstract.py create mode 100644 pymc/logprob/censoring.py create mode 100644 pymc/logprob/cumsum.py create mode 100644 pymc/logprob/joint_logprob.py create mode 100644 pymc/logprob/mixture.py create mode 100644 pymc/logprob/rewriting.py create mode 100644 pymc/logprob/scan.py create mode 100644 pymc/logprob/tensor.py create mode 100644 pymc/logprob/transforms.py create mode 100644 pymc/logprob/utils.py create mode 100644 pymc/tests/logprob/__init__.py create mode 100644 pymc/tests/logprob/test_abstract.py create mode 100644 pymc/tests/logprob/test_censoring.py create mode 100644 pymc/tests/logprob/test_composite_logprob.py create mode 100644 pymc/tests/logprob/test_cumsum.py create mode 100644 pymc/tests/logprob/test_joint_logprob.py create mode 100644 pymc/tests/logprob/test_mixture.py create mode 100644 pymc/tests/logprob/test_rewriting.py create mode 100644 pymc/tests/logprob/test_scan.py create mode 100644 pymc/tests/logprob/test_tensor.py create mode 100644 pymc/tests/logprob/test_transforms.py create mode 100644 pymc/tests/logprob/test_utils.py create mode 100644 pymc/tests/logprob/utils.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 301ff657e87..d48bca42626 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -85,6 +85,18 @@ jobs: pymc/tests/ode/test_utils.py pymc/tests/step_methods/hmc/test_quadpotential.py + - | + pymc/tests/logprob/test_abstract.py + pymc/tests/logprob/test_censoring.py + pymc/tests/logprob/test_composite_logprob.py + pymc/tests/logprob/test_cumsum.py + pymc/tests/logprob/test_mixture.py + pymc/tests/logprob/test_rewriting.py + pymc/tests/logprob/test_scan.py + pymc/tests/logprob/test_tensor.py + pymc/tests/logprob/test_transforms.py + pymc/tests/logprob/test_utils.py + fail-fast: false runs-on: ${{ matrix.os }} env: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13acae8373e..41dc4de4aa3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -85,7 +85,6 @@ repos: (?x)(arviz-devs.github.io| python.arviz.org| aesara.readthedocs.io| - aeppl.readthedocs.io| pymc-experimental.readthedocs.io| docs.pymc.io| www.pymc.io| diff --git a/conda-envs/environment-dev.yml b/conda-envs/environment-dev.yml index 5ef07a37de6..93a85ea9ab1 100644 --- a/conda-envs/environment-dev.yml +++ b/conda-envs/environment-dev.yml @@ -5,7 +5,6 @@ channels: - defaults dependencies: # Base dependencies -- aeppl=0.0.38 - aesara=2.8.7 - arviz>=0.13.0 - blas @@ -41,3 +40,4 @@ dependencies: - types-cachetools - pip: - git+https://github.com/pymc-devs/pymc-sphinx-theme + - numdifftools>=0.9.40 diff --git a/conda-envs/environment-test.yml b/conda-envs/environment-test.yml index 58ad3be53a5..a1544682c40 100644 --- a/conda-envs/environment-test.yml +++ b/conda-envs/environment-test.yml @@ -5,7 +5,6 @@ channels: - defaults dependencies: # Base dependencies -- aeppl=0.0.38 - aesara=2.8.7 - arviz>=0.13.0 - blas @@ -29,3 +28,5 @@ dependencies: - pytest>=3.0 - mypy=0.990 - types-cachetools +- pip: + - numdifftools>=0.9.40 diff --git a/conda-envs/windows-environment-dev.yml b/conda-envs/windows-environment-dev.yml index 53385f2bec5..5899f26fc8a 100644 --- a/conda-envs/windows-environment-dev.yml +++ b/conda-envs/windows-environment-dev.yml @@ -5,7 +5,6 @@ channels: - defaults dependencies: # Base dependencies (see install guide for Windows) -- aeppl=0.0.38 - aesara=2.8.7 - arviz>=0.13.0 - blas @@ -38,3 +37,4 @@ dependencies: - types-cachetools - pip: - git+https://github.com/pymc-devs/pymc-sphinx-theme + - numdifftools>=0.9.40 diff --git a/conda-envs/windows-environment-test.yml b/conda-envs/windows-environment-test.yml index b1515cb7b79..3cf18a2d402 100644 --- a/conda-envs/windows-environment-test.yml +++ b/conda-envs/windows-environment-test.yml @@ -5,7 +5,6 @@ channels: - defaults dependencies: # Base dependencies (see install guide for Windows) -- aeppl=0.0.38 - aesara=2.8.7 - arviz>=0.13.0 - blas @@ -30,3 +29,5 @@ dependencies: - pytest>=3.0 - mypy=0.990 - types-cachetools +- pip: + - numdifftools>=0.9.40 diff --git a/docs/source/conf.py b/docs/source/conf.py index 6d388e46f9d..ed5cb9d4da6 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -188,7 +188,6 @@ intersphinx_mapping = { "arviz": ("https://python.arviz.org/en/latest/", None), "aesara": ("https://aesara.readthedocs.io/en/latest/", None), - "aeppl": ("https://aeppl.readthedocs.io/en/latest/", None), "home": ("https://www.pymc.io", None), "pmx": ("https://www.pymc.io/projects/experimental/en/latest", None), "numpy": ("https://numpy.org/doc/stable/", None), diff --git a/docs/source/learn/core_notebooks/GLM_linear.ipynb b/docs/source/learn/core_notebooks/GLM_linear.ipynb index f331a705fcf..db37daea923 100644 --- a/docs/source/learn/core_notebooks/GLM_linear.ipynb +++ b/docs/source/learn/core_notebooks/GLM_linear.ipynb @@ -522,14 +522,15 @@ "source": [ "%load_ext watermark\n", "\n", - "%watermark -n -u -v -iv -w -p aesara,aeppl" + "%watermark -n -u -v -iv -w -p aesara" ] } ], "metadata": { "anaconda-cloud": {}, + "hide_input": false, "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -543,7 +544,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.8.10" }, "latex_envs": { "bibliofile": "biblio.bib", @@ -551,6 +552,19 @@ "current_citInitial": 1, "eqLabelWithNumbers": true, "eqNumInitial": 0 + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false } }, "nbformat": 4, diff --git a/docs/source/learn/core_notebooks/model_comparison.ipynb b/docs/source/learn/core_notebooks/model_comparison.ipynb index d959813d875..767354b86f0 100644 --- a/docs/source/learn/core_notebooks/model_comparison.ipynb +++ b/docs/source/learn/core_notebooks/model_comparison.ipynb @@ -536,16 +536,17 @@ } ], "source": [ - "%watermark -n -u -v -iv -w -p xarray,aesara,aeppl" + "%watermark -n -u -v -iv -w -p xarray,aesara" ] } ], "metadata": { + "hide_input": false, "interpreter": { "hash": "baf205d70af30bf8b721a304f5a44beb31bf8af014f6b7340f1a7ae004926653" }, "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -559,7 +560,20 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.8.10" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false } }, "nbformat": 4, diff --git a/docs/source/learn/core_notebooks/posterior_predictive.ipynb b/docs/source/learn/core_notebooks/posterior_predictive.ipynb index f76cfd1f403..0f74b329d36 100644 --- a/docs/source/learn/core_notebooks/posterior_predictive.ipynb +++ b/docs/source/learn/core_notebooks/posterior_predictive.ipynb @@ -4649,14 +4649,15 @@ ], "source": [ "%load_ext watermark\n", - "%watermark -n -u -v -iv -w -p aesara,aeppl" + "%watermark -n -u -v -iv -w -p aesara" ] } ], "metadata": { "anaconda-cloud": {}, + "hide_input": false, "kernelspec": { - "display_name": "Python 3.9.13 ('pymc-dev-py39')", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -4670,7 +4671,20 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.8.10" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false }, "vscode": { "interpreter": { diff --git a/docs/source/learn/core_notebooks/pymc_aesara.ipynb b/docs/source/learn/core_notebooks/pymc_aesara.ipynb index 18e17329ae4..8d1d172bd73 100644 --- a/docs/source/learn/core_notebooks/pymc_aesara.ipynb +++ b/docs/source/learn/core_notebooks/pymc_aesara.ipynb @@ -160,7 +160,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 5, "metadata": {}, @@ -201,7 +203,9 @@ "outputs": [ { "data": { - "text/plain": "array([0., 1.])" + "text/plain": [ + "array([0., 1.])" + ] }, "execution_count": 7, "metadata": {}, @@ -228,7 +232,9 @@ "outputs": [ { "data": { - "text/plain": "array([0., 1.])" + "text/plain": [ + "array([0., 1.])" + ] }, "execution_count": 8, "metadata": {}, @@ -253,7 +259,9 @@ "outputs": [ { "data": { - "text/plain": "array([0., 1.])" + "text/plain": [ + "array([0., 1.])" + ] }, "execution_count": 9, "metadata": {}, @@ -289,7 +297,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 10, "metadata": {}, @@ -331,7 +341,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 11, "metadata": {}, @@ -367,7 +379,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 12, "metadata": {}, @@ -513,7 +527,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 15, "metadata": {}, @@ -540,7 +556,9 @@ "outputs": [ { "data": { - "text/plain": "[x, y]" + "text/plain": [ + "[x, y]" + ] }, "execution_count": 16, "metadata": {}, @@ -595,7 +613,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 18, "metadata": {}, @@ -632,7 +652,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 19, "metadata": {}, @@ -659,7 +681,9 @@ "outputs": [ { "data": { - "text/plain": "array([1. , 2.71828183])" + "text/plain": [ + "array([1. , 2.71828183])" + ] }, "execution_count": 20, "metadata": {}, @@ -703,7 +727,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 21, "metadata": {}, @@ -723,7 +749,9 @@ "outputs": [ { "data": { - "text/plain": "array([1. , 2.71828183])" + "text/plain": [ + "array([1. , 2.71828183])" + ] }, "execution_count": 22, "metadata": {}, @@ -752,8 +780,10 @@ "outputs": [ { "data": { - "text/plain": "
", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAF1CAYAAAAeOhj3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAgAElEQVR4nO3deZhkdX3v8feHRVxRCA0CA44LEhFNTOYSk3gNEb0SMWDyxFyMmnFJiIkavTGGQZMg6iQYs+jVeBOibC4Q4hJRNAFRJMYIDorKojLKCAMjM4CERUUHv/ePc1qKpnumpmeqqvs379fz9FN1ljrne35V1Z865/zqVKoKSZK0uO0w6QIkSdLWM9AlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiaiCSvS/KebbzMJDklyXeSXLwtl709SfKCJJ/ZgvnXJHlqf/81Sd65DWu5Pckj+vunJnnjNlz2PyT5s221vK21rdtO25+dJl2AxivJk4C/Ah4L3AVcCbyyqj4/0cK2jScBTwOWVNUdky5me1RVfzHMfEkuAN5TVZsMsKp64LaoK8kLgN+pqicNLPsl22LZ28qwbSfNxUDfjiTZFfgo8PvAWcB9gP8J3DnJurahhwFr5grzJDtV1cYx1zRyLW5Xi9skjZqH3LcvjwaoqjOq6q6q+l5VnVtVXwZI8sgkn0xyU5Ibk7w3yUOmH9wfWn11ki8nuSPJu5LsleTjSW5L8okku/XzLk1SSY5Jcn2SdUleNVdhSZ6Y5LNJbknypSSHDkx7QZJv9uu4OslzZ3n8i4F3Aj/fH6Y9IcmhSdYmOTbJt4FTkuyS5C19Tdf393fplzE9/58kWd/X/Kwkz0jy9SQ3J3nNJrbhiCRfTHJrkmuTvG4T806v61UD63rhwPQHJzk9yYYk30ryp0l2GGiP/0zyd0luBl7XH45+R/9c3N5Pf2i/fd9J8tUkTxhY/ook3+jb9IokvzZXrbPU/vy+ppuSvHbGtB+fSkly3yTv6ee7Jcnn+9fLSroPkm/va317P38leWmSq4CrBsY9amAVeyQ5r6/700ke1s83/XrbaaCWC5L8TpLHAP/A3a+NW/rp9ziEn+R3k6zun+ezk+wzMK2SvCTJVX17/n2SzNE+M5d7aJK1A8PHJrmu34avJTlslrab3p7lSa5J93587cAy7pfktL6WK/vX7FrmsKn6M+P018y27Nvxjenen7cn+UiSn0j3/+HW/nldOmNdf5juPXtjkjcn2SHde+/mJI8bmHfPJN9LMjVX7doCVeXfdvIH7ArcBJwG/Aqw24zpj6I7ZL0LMAVcCLxlYPoa4HPAXsC+wHrgC8AT+sd8Eji+n3cpUMAZwAOAxwEbgKf2019Hd8iVflk3Ac+g+5D5tH54qn/srcCB/bx7A4+dY/teAHxmYPhQYCPwpr6++wGv77dhz375nwXeMGP+Pwd2Bn63r/l9wIPoTlN8H3jEHOs/tN/OHYDHAzcAz9rEvBv7enbut/27088JcDrw4X69S4GvAy8e2M6NwMvpjrLdDzgVuBH4WeC+/XNxNfDbwI7AG4FPDaz/2cA+fa3/G7gD2Hu2dpxR90HA7cCT+zb9276W2Z7X3wM+Aty/r+FngV37aRfQHQIfXHYB5wG7A/cbGPeo/v6pwG0D637rdJ3c/XrbaWB5P17HbNvUL++N/f2n9O33M/2y3wZcOKO2jwIPAfane10cPkcb/Xi5A8/12v7+gcC1wD4DdT9ylrab3p5/6p/fn6I7kvaYfvqJwKeB3YAlwJen1zFHTXPWP7je2dqyb8fVwCOBBwNX0L0en0r3+jsdOGXGuj7VP4/79/NOPw/vAN40MO8rgI9M+n9jK3/uoW9HqupWuvPM0/8oNvR7Inv101dX1XlVdWdVbaD7Z/1LMxbztqq6oaquA/4DuKiqvlhVdwIfogv3QSdU1R1V9RXgFOA5s5T2POBjVfWxqvpRVZ0HrKILOYAfAQcnuV9Vrauqy7dgs39E9yHjzqr6HvBc4PVVtb7fxhOA5w/M/0NgZVX9EDgT2AN4a1Xd1q/3crqwvpequqCqvtJvw5fpPszMbL9BP+xr+WFVfYwuKA9MsiNdyB7Xr3cN8Dcz6ry+qt5WVRv77QL4UFVdUlXfp3suvl9Vp1fVXcA/M/DcVNW/VNX1fa3/TLdHfMhm2hLgN4CPVtWF/XP+Z3RtPNf2/QRdIN/V13brZpb/l1V188A2zXTOwLpfS7fXvd8QdW/Oc4GTq+oL/bKP65e9dGCeE6vqlqq6hi6wfnoe67mL7gPDQUl2rqo1VfWNTcx/QnVH0r4EfIku2AF+E/iLqvpOVa0F/u8Q696a+k+pqm9U1X8DHwe+UVWfqO60yL9w7/f9m/rn8RrgLdz9vj8N+K30R5voXtPv3oI6tAkG+namqq6sqhdU1RLgYLq9tLfAjw9/ndkfDrwVeA9doA26YeD+92YZntmJ6dqB+9/q1zfTw4Bn94dlb+kPiT6Jbo/xDrpwewmwLsk5SX5y+C1mQx9w0/bp65irppv6AJzeHtj8NgKQ5OeSfCrdYfL/7mue2X6Dbqp7nif+br/sPej6N8ysc9+B4cF2nTb0c5Pkt5NcOtDeB2+m1mn7DK67f35ummPedwP/DpyZ7vTGXyXZeTPLn227Zp1eVbcDNzP7a2pL3eN10S/7Ju7Z5t8euD/9XG2RqloNvJJur3h9/37bVP1zrfMezwObb7dNLWsY2+R9X1UX0R0N+qX+ffwo4OwtqEObYKBvx6rqq3SHBw/uR/0l3d7746tqV7o951nPE26Bwb2n/YHrZ5nnWuDdVfWQgb8HVNWJfZ3/XlVPozvc/lW6owvDmvlzgtfTfYDYXE3z8T66f077VdWD6c7bzqf9bqTbu51Z53UDw/P+mcT+vPM/AS8DfqKqHgJcxnC1rmPgOU1yf7q98HvpjzycUFUHAb8APJPuFMCm6t/cdg2u+4F0h3WvpwsJ6A7vT3voFiz3Hq+LJA+g267r5nzE3O7YRB1U1fuq623/sL6uN81jHevoDrVP25qjFJusd5429b4/je5/y/OB98/4wK2tYKBvR5L8ZLpOWEv64f3oDoV9rp/lQXSHfW9Jsi/w6m2w2j9Lcv8kjwVeSHfod6b3AL+a5OlJdkzXmerQJEvSdaI6sv8He2df312zLGNYZwB/mmQqyR5058u31ffhHwTcXFXfT3II8FvzWUh/hOAsYGWSB/UB/EfbsM4H0AXJBoB0nfEO3uQj7vZ+4JlJnpTkPnR9AGb9P5Lkl5M8rj+FcCvdh5Tp5+4G4BHzqP0ZA+t+A90pn2v70yfXAc/rX0MvojvnO+0GYEn/uNm8D3hhkp9O10nyL/plr5lHjZf2de6e5KF0e+QAJDkwyVP6dXyfbu92Pq/ns4DjkuzWv1dfNo9lDNb75CT7J3kw3emGrfXqvrb96M6TD77v3w38Gl2on74N1qWegb59uQ34OeCiJHfQBfllwHTv8xPoOgX9N3AO8MFtsM5P03WoOR/466o6d+YMVXUtcBTwGrqQuZbuw8QO/d+r6D7h30x3TvoPtqKeN9Kdn/8y8BW6Tn3b6mIlfwC8PsltdB8UztqKZb2cbs/pm8Bn6ALn5K2uEKiqK+jOyf8XXdA9DvjPIR97OfDSvp51wHeAuXpXP5TuA8CtdNc7+DR3fyh5K/AbfY/rYc7/TnsfcDzda+Fn6c59T/tdutfNTXQdGD87MO2TdP0fvp3kxlm263y6/gAf6LfrkcDRW1DXoHfTne9eA5zLPcNsF7oObTfSHQLfk+51v6VeT9fuVwOfoGvneX39tO+z8s9074lL6DrPba0P98u6lO5/ybsG1reW7n1XdP1wtI2kat5H7qQ59Z2JrgZ2Lr9PLI1Ukt8Hjq6qTXXCHFctBRzQ9xeYa56T6Tp2/un4KmufF5aRpEUmyd50pyz+CziA7ijW2yda1JD6D/u/zr17xmsrechdkhaf+wD/SHca7ZN0h7jfMdGKhpDkDXSn+d5cVVdPup7WeMhdkqQGuIcuSVIDDHRJkhqwqDvF7bHHHrV06dJJlyFJ0thccsklN1bVvX7QZlEH+tKlS1m1atWky5AkaWySfGu28R5ylySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhqwqH9tTdLWW7rinLGta82JR4xtXdL2xj10SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGjCzQk5ycZH2Sy2aMf3mSryW5PMlfDYw/LsnqftrTR1WXJEktGuWPs5wKvB04fXpEkl8GjgIeX1V3JtmzH38QcDTwWGAf4BNJHl1Vd42wPkmSmjGyPfSquhC4ecbo3wdOrKo7+3nW9+OPAs6sqjur6mpgNXDIqGqTJKk14z6H/mjgfya5KMmnk/yPfvy+wLUD863tx91LkmOSrEqyasOGDSMuV5KkxWHcgb4TsBvwRODVwFlJAmSWeWu2BVTVSVW1rKqWTU1Nja5SSZIWkXEH+lrgg9W5GPgRsEc/fr+B+ZYA14+5NkmSFq1xB/q/Ak8BSPJo4D7AjcDZwNFJdknycOAA4OIx1yZJ0qI1sl7uSc4ADgX2SLIWOB44GTi5/yrbD4DlVVXA5UnOAq4ANgIvtYe7JEnDG1mgV9Vz5pj0vDnmXwmsHFU9kiS1zCvFSZLUgFFeWEaS7mHpinPGtq41Jx4xtnVJC4F76JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBuw06QIk3dvSFedMugRJi4x76JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgZIGe5OQk65NcNsu0P05SSfYYGHdcktVJvpbk6aOqS5KkFo1yD/1U4PCZI5PsBzwNuGZg3EHA0cBj+8e8I8mOI6xNkqSmjCzQq+pC4OZZJv0d8CdADYw7Cjizqu6sqquB1cAho6pNkqTWjPUcepIjgeuq6kszJu0LXDswvLYfN9syjkmyKsmqDRs2jKhSSZIWl7EFepL7A68F/ny2ybOMq1nGUVUnVdWyqlo2NTW1LUuUJGnRGufPpz4SeDjwpSQAS4AvJDmEbo98v4F5lwDXj7E2SZIWtbHtoVfVV6pqz6paWlVL6UL8Z6rq28DZwNFJdknycOAA4OJx1SZJ0mI3yq+tnQH8F3BgkrVJXjzXvFV1OXAWcAXwb8BLq+quUdUmSVJrRnbIvaqes5npS2cMrwRWjqoeSZJa5pXiJElqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhowskBPcnKS9UkuGxj35iRfTfLlJB9K8pCBacclWZ3ka0mePqq6JElq0Sj30E8FDp8x7jzg4Kp6PPB14DiAJAcBRwOP7R/zjiQ7jrA2SZKaMrJAr6oLgZtnjDu3qjb2g58DlvT3jwLOrKo7q+pqYDVwyKhqkySpNZM8h/4i4OP9/X2Bawemre3HSZKkIew0iZUmeS2wEXjv9KhZZqs5HnsMcAzA/vvvP5L6JC1+S1ecM7Z1rTnxiLGtS5rL2PfQkywHngk8t6qmQ3stsN/AbEuA62d7fFWdVFXLqmrZ1NTUaIuVJGmRGGugJzkcOBY4sqq+OzDpbODoJLskeThwAHDxOGuTJGkxG9kh9yRnAIcCeyRZCxxP16t9F+C8JACfq6qXVNXlSc4CrqA7FP/SqrprVLVJktSakQV6VT1nltHv2sT8K4GVo6pHkqSWeaU4SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEj+7U1qTVLV5wz6RIkaU7uoUuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWrAyAI9yclJ1ie5bGDc7knOS3JVf7vbwLTjkqxO8rUkTx9VXZIktWiUe+inAofPGLcCOL+qDgDO74dJchBwNPDY/jHvSLLjCGuTJKkpIwv0qroQuHnG6KOA0/r7pwHPGhh/ZlXdWVVXA6uBQ0ZVmyRJrRn3OfS9qmodQH+7Zz9+X+DagfnW9uMkSdIQFkqnuMwyrmadMTkmyaokqzZs2DDisiRJWhzGHeg3JNkboL9d349fC+w3MN8S4PrZFlBVJ1XVsqpaNjU1NdJiJUlaLMYd6GcDy/v7y4EPD4w/OskuSR4OHABcPObaJElatHYa1YKTnAEcCuyRZC1wPHAicFaSFwPXAM8GqKrLk5wFXAFsBF5aVXeNqjZJklozskCvqufMMemwOeZfCawcVT2SJLVsoXSKkyRJW8FAlySpAQa6JEkNMNAlSWqAgS5JUgOGCvQk5w8zTpIkTcYmv7aW5L7A/em+S74bd1+idVdgnxHXJkmShrS576H/HvBKuvC+hLsD/Vbg70dXliRJ2hKbDPSqeivw1iQvr6q3jakmSZK0hYa6UlxVvS3JLwBLBx9TVaePqC5JkrQFhgr0JO8GHglcCkxfY70AA12SpAVg2Gu5LwMOqqpZf6NckiRN1rDfQ78MeOgoC5EkSfM37B76HsAVSS4G7pweWVVHjqQqSZK0RYYN9NeNsghJkrR1hu3l/ulRFyJJkuZv2F7ut9H1age4D7AzcEdV7TqqwiRJ0vCG3UN/0OBwkmcBh4yiIEmStOXm9WtrVfWvwFO2bSmSJGm+hj3k/usDgzvQfS/d76RLkrRADNvL/VcH7m8E1gBHbfNqJEnSvAx7Dv2Foy5Emo+lK86ZdAmStCAMdQ49yZIkH0qyPskNST6QZMmoi5MkScMZtlPcKcDZdL+Lvi/wkX6cJElaAIYN9KmqOqWqNvZ/pwJTI6xLkiRtgWED/cYkz0uyY//3POCmURYmSZKGN2ygvwj4TeDbwDrgNwA7ykmStEAM+7W1NwDLq+o7AEl2B/6aLuglSdKEDbuH/vjpMAeoqpuBJ8x3pUn+T5LLk1yW5Iwk902ye5LzklzV3+423+VLkrS9GTbQdxgM2H4Pfdi9+3tIsi/wh8CyqjoY2BE4GlgBnF9VBwDn98OSJGkIw4by3wCfTfJ+uku+/iawcivXe78kPwTuD1wPHAcc2k8/DbgAOHYr1iFJ0nZj2CvFnZ5kFd0PsgT49aq6Yj4rrKrrkvw1cA3wPeDcqjo3yV5Vta6fZ12SPeezfEmStkdDHzbvA3xeIT6oP3R/FPBw4BbgX/qvwQ37+GOAYwD233//rS1HkqQmzOvnU7fSU4Grq2pDVf0Q+CDwC8ANSfYG6G/Xz/bgqjqpqpZV1bKpKa9tI0kSTCbQrwGemOT+SQIcBlxJd2nZ5f08y4EPT6A2SZIWpXn1VN8aVXVR37nuC3Q/xfpF4CTggcBZSV5MF/rPHndtkiQtVmMPdICqOh44fsboO+n21iVJ0haaxCF3SZK0jRnokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ2YSKAneUiS9yf5apIrk/x8kt2TnJfkqv52t0nUJknSYjSpPfS3Av9WVT8J/BRwJbACOL+qDgDO74clSdIQdhr3CpPsCjwZeAFAVf0A+EGSo4BD+9lOAy4Ajh13fZK0pZauOGds61pz4hFjW5cWl0nsoT8C2ACckuSLSd6Z5AHAXlW1DqC/3XMCtUmStChNItB3An4G+H9V9QTgDrbg8HqSY5KsSrJqw4YNo6pRkqRFZRKBvhZYW1UX9cPvpwv4G5LsDdDfrp/twVV1UlUtq6plU1NTYylYkqSFbuyBXlXfBq5NcmA/6jDgCuBsYHk/bjnw4XHXJknSYjX2TnG9lwPvTXIf4JvAC+k+XJyV5MXANcCzJ1SbJEmLzkQCvaouBZbNMumwMZciSVITvFKcJEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktSAnSZdgNqzdMU5ky5BkrY77qFLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWrAxAI9yY5Jvpjko/3w7knOS3JVf7vbpGqTJGmxmeQe+iuAKweGVwDnV9UBwPn9sCRJGsJELiyTZAlwBLAS+KN+9FHAof3904ALgGPHXZskLWTjvHDTmhOPGNu6tPUmtYf+FuBPgB8NjNurqtYB9Ld7TqAuSZIWpbEHepJnAuur6pJ5Pv6YJKuSrNqwYcM2rk6SpMVpEnvovwgcmWQNcCbwlCTvAW5IsjdAf7t+tgdX1UlVtayqlk1NTY2rZkmSFrSxB3pVHVdVS6pqKXA08Mmqeh5wNrC8n2058OFx1yZJ0mK1kL6HfiLwtCRXAU/rhyVJ0hAm+vOpVXUBXW92quom4LBJ1iNJ0mK1kPbQJUnSPBnokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwE6TLkDjsXTFOZMuQZI0Qu6hS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwNgDPcl+ST6V5Moklyd5RT9+9yTnJbmqv91t3LVJkrRYTWIPfSPwqqp6DPBE4KVJDgJWAOdX1QHA+f2wJEkawtgDvarWVdUX+vu3AVcC+wJHAaf1s50GPGvctUmStFhN9Bx6kqXAE4CLgL2qah10oQ/sOcHSJElaVCYW6EkeCHwAeGVV3boFjzsmyaokqzZs2DC6AiVJWkQmEuhJdqYL8/dW1Qf70Tck2bufvjewfrbHVtVJVbWsqpZNTU2Np2BJkha4SfRyD/Au4Mqq+tuBSWcDy/v7y4EPj7s2SZIWq0n82tovAs8HvpLk0n7ca4ATgbOSvBi4Bnj2BGqTJGlRGnugV9VngMwx+bBx1iJJUiu8UpwkSQ0w0CVJasAkzqFLkhaBpSvOGdu61px4xNjW1Sr30CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAn0+doHH+NKEkLWT+VOvWcw9dkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgL3cZ7DnuSRpMXIPXZKkBhjokiQ1wECXJKkBBrokSQ1YcJ3ikhwOvBXYEXhnVZ044ZIkSQ1p9TKzC2oPPcmOwN8DvwIcBDwnyUGTrUqSpIVvQQU6cAiwuqq+WVU/AM4EjppwTZIkLXgLLdD3Ba4dGF7bj5MkSZuw0M6hZ5ZxdY8ZkmOAY/rB25N8beRVwR7AjWNYz0JmG9gGYBuAbQC2AQzZBnnTSNb9sNlGLrRAXwvsNzC8BLh+cIaqOgk4aZxFJVlVVcvGuc6FxjawDcA2ANsAbANYmG2w0A65fx44IMnDk9wHOBo4e8I1SZK04C2oPfSq2pjkZcC/031t7eSqunzCZUmStOAtqEAHqKqPAR+bdB0zjPUQ/wJlG9gGYBuAbQC2ASzANkhVbX4uSZK0oC20c+iSJGkeDPQhJHlDki8nuTTJuUn2mXRN45bkzUm+2rfDh5I8ZNI1jVuSZye5PMmPkiyo3q2jluTwJF9LsjrJiknXMwlJTk6yPsllk65lUpLsl+RTSa7s3wuvmHRN45bkvkkuTvKlvg1OmHRN0zzkPoQku1bVrf39PwQOqqqXTLissUryv4BP9h0X3wRQVcdOuKyxSvIY4EfAPwJ/XFWrJlzSWPSXZP468DS6r5Z+HnhOVV0x0cLGLMmTgduB06vq4EnXMwlJ9gb2rqovJHkQcAnwrO3ptZAkwAOq6vYkOwOfAV5RVZ+bcGnuoQ9jOsx7D2DGxW62B1V1blVt7Ac/R3eNgO1KVV1ZVeO4kNFC4yWZgaq6ELh50nVMUlWtq6ov9PdvA65kO7uaZ3Vu7wd37v8WRCYY6ENKsjLJtcBzgT+fdD0T9iLg45MuQmPjJZl1L0mWAk8ALppwKWOXZMcklwLrgfOqakG0gYHeS/KJJJfN8ncUQFW9tqr2A94LvGyy1Y7G5tqgn+e1wEa6dmjOMG2wHdrsJZm1fUnyQOADwCtnHMHcLlTVXVX103RHKg9JsiBOwSy476FPSlU9dchZ3wecAxw/wnImYnNtkGQ58EzgsGq088UWvA62J5u9JLO2H/154w8A762qD066nkmqqluSXAAcDky8s6R76ENIcsDA4JHAVydVy6QkORw4Fjiyqr476Xo0Vl6SWcCPO4S9C7iyqv520vVMQpKp6W/5JLkf8FQWSCbYy30IST4AHEjXw/lbwEuq6rrJVjVeSVYDuwA39aM+tx329P814G3AFHALcGlVPX2iRY1JkmcAb+HuSzKvnGxF45fkDOBQul/ZugE4vqreNdGixizJk4D/AL5C9/8Q4DX9FT63C0keD5xG917YATirql4/2ao6BrokSQ3wkLskSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAf8fx1ZFpDaNFggAAAAASUVORK5CYII=\n" + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAF1CAYAAAAeOhj3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAgAElEQVR4nO3deZhkdX3v8feHRVxRCA0CA44LEhFNTOYSk3gNEb0SMWDyxFyMmnFJiIkavTGGQZMg6iQYs+jVeBOibC4Q4hJRNAFRJMYIDorKojLKCAMjM4CERUUHv/ePc1qKpnumpmeqqvs379fz9FN1ljrne35V1Z865/zqVKoKSZK0uO0w6QIkSdLWM9AlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiaiCSvS/KebbzMJDklyXeSXLwtl709SfKCJJ/ZgvnXJHlqf/81Sd65DWu5Pckj+vunJnnjNlz2PyT5s221vK21rdtO25+dJl2AxivJk4C/Ah4L3AVcCbyyqj4/0cK2jScBTwOWVNUdky5me1RVfzHMfEkuAN5TVZsMsKp64LaoK8kLgN+pqicNLPsl22LZ28qwbSfNxUDfjiTZFfgo8PvAWcB9gP8J3DnJurahhwFr5grzJDtV1cYx1zRyLW5Xi9skjZqH3LcvjwaoqjOq6q6q+l5VnVtVXwZI8sgkn0xyU5Ibk7w3yUOmH9wfWn11ki8nuSPJu5LsleTjSW5L8okku/XzLk1SSY5Jcn2SdUleNVdhSZ6Y5LNJbknypSSHDkx7QZJv9uu4OslzZ3n8i4F3Aj/fH6Y9IcmhSdYmOTbJt4FTkuyS5C19Tdf393fplzE9/58kWd/X/Kwkz0jy9SQ3J3nNJrbhiCRfTHJrkmuTvG4T806v61UD63rhwPQHJzk9yYYk30ryp0l2GGiP/0zyd0luBl7XH45+R/9c3N5Pf2i/fd9J8tUkTxhY/ook3+jb9IokvzZXrbPU/vy+ppuSvHbGtB+fSkly3yTv6ee7Jcnn+9fLSroPkm/va317P38leWmSq4CrBsY9amAVeyQ5r6/700ke1s83/XrbaaCWC5L8TpLHAP/A3a+NW/rp9ziEn+R3k6zun+ezk+wzMK2SvCTJVX17/n2SzNE+M5d7aJK1A8PHJrmu34avJTlslrab3p7lSa5J93587cAy7pfktL6WK/vX7FrmsKn6M+P018y27Nvxjenen7cn+UiSn0j3/+HW/nldOmNdf5juPXtjkjcn2SHde+/mJI8bmHfPJN9LMjVX7doCVeXfdvIH7ArcBJwG/Aqw24zpj6I7ZL0LMAVcCLxlYPoa4HPAXsC+wHrgC8AT+sd8Eji+n3cpUMAZwAOAxwEbgKf2019Hd8iVflk3Ac+g+5D5tH54qn/srcCB/bx7A4+dY/teAHxmYPhQYCPwpr6++wGv77dhz375nwXeMGP+Pwd2Bn63r/l9wIPoTlN8H3jEHOs/tN/OHYDHAzcAz9rEvBv7enbut/27088JcDrw4X69S4GvAy8e2M6NwMvpjrLdDzgVuBH4WeC+/XNxNfDbwI7AG4FPDaz/2cA+fa3/G7gD2Hu2dpxR90HA7cCT+zb9276W2Z7X3wM+Aty/r+FngV37aRfQHQIfXHYB5wG7A/cbGPeo/v6pwG0D637rdJ3c/XrbaWB5P17HbNvUL++N/f2n9O33M/2y3wZcOKO2jwIPAfane10cPkcb/Xi5A8/12v7+gcC1wD4DdT9ylrab3p5/6p/fn6I7kvaYfvqJwKeB3YAlwJen1zFHTXPWP7je2dqyb8fVwCOBBwNX0L0en0r3+jsdOGXGuj7VP4/79/NOPw/vAN40MO8rgI9M+n9jK3/uoW9HqupWuvPM0/8oNvR7Inv101dX1XlVdWdVbaD7Z/1LMxbztqq6oaquA/4DuKiqvlhVdwIfogv3QSdU1R1V9RXgFOA5s5T2POBjVfWxqvpRVZ0HrKILOYAfAQcnuV9Vrauqy7dgs39E9yHjzqr6HvBc4PVVtb7fxhOA5w/M/0NgZVX9EDgT2AN4a1Xd1q/3crqwvpequqCqvtJvw5fpPszMbL9BP+xr+WFVfYwuKA9MsiNdyB7Xr3cN8Dcz6ry+qt5WVRv77QL4UFVdUlXfp3suvl9Vp1fVXcA/M/DcVNW/VNX1fa3/TLdHfMhm2hLgN4CPVtWF/XP+Z3RtPNf2/QRdIN/V13brZpb/l1V188A2zXTOwLpfS7fXvd8QdW/Oc4GTq+oL/bKP65e9dGCeE6vqlqq6hi6wfnoe67mL7gPDQUl2rqo1VfWNTcx/QnVH0r4EfIku2AF+E/iLqvpOVa0F/u8Q696a+k+pqm9U1X8DHwe+UVWfqO60yL9w7/f9m/rn8RrgLdz9vj8N+K30R5voXtPv3oI6tAkG+namqq6sqhdU1RLgYLq9tLfAjw9/ndkfDrwVeA9doA26YeD+92YZntmJ6dqB+9/q1zfTw4Bn94dlb+kPiT6Jbo/xDrpwewmwLsk5SX5y+C1mQx9w0/bp65irppv6AJzeHtj8NgKQ5OeSfCrdYfL/7mue2X6Dbqp7nif+br/sPej6N8ysc9+B4cF2nTb0c5Pkt5NcOtDeB2+m1mn7DK67f35ummPedwP/DpyZ7vTGXyXZeTPLn227Zp1eVbcDNzP7a2pL3eN10S/7Ju7Z5t8euD/9XG2RqloNvJJur3h9/37bVP1zrfMezwObb7dNLWsY2+R9X1UX0R0N+qX+ffwo4OwtqEObYKBvx6rqq3SHBw/uR/0l3d7746tqV7o951nPE26Bwb2n/YHrZ5nnWuDdVfWQgb8HVNWJfZ3/XlVPozvc/lW6owvDmvlzgtfTfYDYXE3z8T66f077VdWD6c7bzqf9bqTbu51Z53UDw/P+mcT+vPM/AS8DfqKqHgJcxnC1rmPgOU1yf7q98HvpjzycUFUHAb8APJPuFMCm6t/cdg2u+4F0h3WvpwsJ6A7vT3voFiz3Hq+LJA+g267r5nzE3O7YRB1U1fuq623/sL6uN81jHevoDrVP25qjFJusd5429b4/je5/y/OB98/4wK2tYKBvR5L8ZLpOWEv64f3oDoV9rp/lQXSHfW9Jsi/w6m2w2j9Lcv8kjwVeSHfod6b3AL+a5OlJdkzXmerQJEvSdaI6sv8He2df312zLGNYZwB/mmQqyR5058u31ffhHwTcXFXfT3II8FvzWUh/hOAsYGWSB/UB/EfbsM4H0AXJBoB0nfEO3uQj7vZ+4JlJnpTkPnR9AGb9P5Lkl5M8rj+FcCvdh5Tp5+4G4BHzqP0ZA+t+A90pn2v70yfXAc/rX0MvojvnO+0GYEn/uNm8D3hhkp9O10nyL/plr5lHjZf2de6e5KF0e+QAJDkwyVP6dXyfbu92Pq/ns4DjkuzWv1dfNo9lDNb75CT7J3kw3emGrfXqvrb96M6TD77v3w38Gl2on74N1qWegb59uQ34OeCiJHfQBfllwHTv8xPoOgX9N3AO8MFtsM5P03WoOR/466o6d+YMVXUtcBTwGrqQuZbuw8QO/d+r6D7h30x3TvoPtqKeN9Kdn/8y8BW6Tn3b6mIlfwC8PsltdB8UztqKZb2cbs/pm8Bn6ALn5K2uEKiqK+jOyf8XXdA9DvjPIR97OfDSvp51wHeAuXpXP5TuA8CtdNc7+DR3fyh5K/AbfY/rYc7/TnsfcDzda+Fn6c59T/tdutfNTXQdGD87MO2TdP0fvp3kxlm263y6/gAf6LfrkcDRW1DXoHfTne9eA5zLPcNsF7oObTfSHQLfk+51v6VeT9fuVwOfoGvneX39tO+z8s9074lL6DrPba0P98u6lO5/ybsG1reW7n1XdP1wtI2kat5H7qQ59Z2JrgZ2Lr9PLI1Ukt8Hjq6qTXXCHFctBRzQ9xeYa56T6Tp2/un4KmufF5aRpEUmyd50pyz+CziA7ijW2yda1JD6D/u/zr17xmsrechdkhaf+wD/SHca7ZN0h7jfMdGKhpDkDXSn+d5cVVdPup7WeMhdkqQGuIcuSVIDDHRJkhqwqDvF7bHHHrV06dJJlyFJ0thccsklN1bVvX7QZlEH+tKlS1m1atWky5AkaWySfGu28R5ylySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhqwqH9tTdLWW7rinLGta82JR4xtXdL2xj10SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGjCzQk5ycZH2Sy2aMf3mSryW5PMlfDYw/LsnqftrTR1WXJEktGuWPs5wKvB04fXpEkl8GjgIeX1V3JtmzH38QcDTwWGAf4BNJHl1Vd42wPkmSmjGyPfSquhC4ecbo3wdOrKo7+3nW9+OPAs6sqjur6mpgNXDIqGqTJKk14z6H/mjgfya5KMmnk/yPfvy+wLUD863tx91LkmOSrEqyasOGDSMuV5KkxWHcgb4TsBvwRODVwFlJAmSWeWu2BVTVSVW1rKqWTU1Nja5SSZIWkXEH+lrgg9W5GPgRsEc/fr+B+ZYA14+5NkmSFq1xB/q/Ak8BSPJo4D7AjcDZwNFJdknycOAA4OIx1yZJ0qI1sl7uSc4ADgX2SLIWOB44GTi5/yrbD4DlVVXA5UnOAq4ANgIvtYe7JEnDG1mgV9Vz5pj0vDnmXwmsHFU9kiS1zCvFSZLUgFFeWEaS7mHpinPGtq41Jx4xtnVJC4F76JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBuw06QIk3dvSFedMugRJi4x76JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgZIGe5OQk65NcNsu0P05SSfYYGHdcktVJvpbk6aOqS5KkFo1yD/1U4PCZI5PsBzwNuGZg3EHA0cBj+8e8I8mOI6xNkqSmjCzQq+pC4OZZJv0d8CdADYw7Cjizqu6sqquB1cAho6pNkqTWjPUcepIjgeuq6kszJu0LXDswvLYfN9syjkmyKsmqDRs2jKhSSZIWl7EFepL7A68F/ny2ybOMq1nGUVUnVdWyqlo2NTW1LUuUJGnRGufPpz4SeDjwpSQAS4AvJDmEbo98v4F5lwDXj7E2SZIWtbHtoVfVV6pqz6paWlVL6UL8Z6rq28DZwNFJdknycOAA4OJx1SZJ0mI3yq+tnQH8F3BgkrVJXjzXvFV1OXAWcAXwb8BLq+quUdUmSVJrRnbIvaqes5npS2cMrwRWjqoeSZJa5pXiJElqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhowskBPcnKS9UkuGxj35iRfTfLlJB9K8pCBacclWZ3ka0mePqq6JElq0Sj30E8FDp8x7jzg4Kp6PPB14DiAJAcBRwOP7R/zjiQ7jrA2SZKaMrJAr6oLgZtnjDu3qjb2g58DlvT3jwLOrKo7q+pqYDVwyKhqkySpNZM8h/4i4OP9/X2Bawemre3HSZKkIew0iZUmeS2wEXjv9KhZZqs5HnsMcAzA/vvvP5L6JC1+S1ecM7Z1rTnxiLGtS5rL2PfQkywHngk8t6qmQ3stsN/AbEuA62d7fFWdVFXLqmrZ1NTUaIuVJGmRGGugJzkcOBY4sqq+OzDpbODoJLskeThwAHDxOGuTJGkxG9kh9yRnAIcCeyRZCxxP16t9F+C8JACfq6qXVNXlSc4CrqA7FP/SqrprVLVJktSakQV6VT1nltHv2sT8K4GVo6pHkqSWeaU4SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEj+7U1qTVLV5wz6RIkaU7uoUuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWrAyAI9yclJ1ie5bGDc7knOS3JVf7vbwLTjkqxO8rUkTx9VXZIktWiUe+inAofPGLcCOL+qDgDO74dJchBwNPDY/jHvSLLjCGuTJKkpIwv0qroQuHnG6KOA0/r7pwHPGhh/ZlXdWVVXA6uBQ0ZVmyRJrRn3OfS9qmodQH+7Zz9+X+DagfnW9uMkSdIQFkqnuMwyrmadMTkmyaokqzZs2DDisiRJWhzGHeg3JNkboL9d349fC+w3MN8S4PrZFlBVJ1XVsqpaNjU1NdJiJUlaLMYd6GcDy/v7y4EPD4w/OskuSR4OHABcPObaJElatHYa1YKTnAEcCuyRZC1wPHAicFaSFwPXAM8GqKrLk5wFXAFsBF5aVXeNqjZJklozskCvqufMMemwOeZfCawcVT2SJLVsoXSKkyRJW8FAlySpAQa6JEkNMNAlSWqAgS5JUgOGCvQk5w8zTpIkTcYmv7aW5L7A/em+S74bd1+idVdgnxHXJkmShrS576H/HvBKuvC+hLsD/Vbg70dXliRJ2hKbDPSqeivw1iQvr6q3jakmSZK0hYa6UlxVvS3JLwBLBx9TVaePqC5JkrQFhgr0JO8GHglcCkxfY70AA12SpAVg2Gu5LwMOqqpZf6NckiRN1rDfQ78MeOgoC5EkSfM37B76HsAVSS4G7pweWVVHjqQqSZK0RYYN9NeNsghJkrR1hu3l/ulRFyJJkuZv2F7ut9H1age4D7AzcEdV7TqqwiRJ0vCG3UN/0OBwkmcBh4yiIEmStOXm9WtrVfWvwFO2bSmSJGm+hj3k/usDgzvQfS/d76RLkrRADNvL/VcH7m8E1gBHbfNqJEnSvAx7Dv2Foy5Emo+lK86ZdAmStCAMdQ49yZIkH0qyPskNST6QZMmoi5MkScMZtlPcKcDZdL+Lvi/wkX6cJElaAIYN9KmqOqWqNvZ/pwJTI6xLkiRtgWED/cYkz0uyY//3POCmURYmSZKGN2ygvwj4TeDbwDrgNwA7ykmStEAM+7W1NwDLq+o7AEl2B/6aLuglSdKEDbuH/vjpMAeoqpuBJ8x3pUn+T5LLk1yW5Iwk902ye5LzklzV3+423+VLkrS9GTbQdxgM2H4Pfdi9+3tIsi/wh8CyqjoY2BE4GlgBnF9VBwDn98OSJGkIw4by3wCfTfJ+uku+/iawcivXe78kPwTuD1wPHAcc2k8/DbgAOHYr1iFJ0nZj2CvFnZ5kFd0PsgT49aq6Yj4rrKrrkvw1cA3wPeDcqjo3yV5Vta6fZ12SPeezfEmStkdDHzbvA3xeIT6oP3R/FPBw4BbgX/qvwQ37+GOAYwD233//rS1HkqQmzOvnU7fSU4Grq2pDVf0Q+CDwC8ANSfYG6G/Xz/bgqjqpqpZV1bKpKa9tI0kSTCbQrwGemOT+SQIcBlxJd2nZ5f08y4EPT6A2SZIWpXn1VN8aVXVR37nuC3Q/xfpF4CTggcBZSV5MF/rPHndtkiQtVmMPdICqOh44fsboO+n21iVJ0haaxCF3SZK0jRnokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ2YSKAneUiS9yf5apIrk/x8kt2TnJfkqv52t0nUJknSYjSpPfS3Av9WVT8J/BRwJbACOL+qDgDO74clSdIQdhr3CpPsCjwZeAFAVf0A+EGSo4BD+9lOAy4Ajh13fZK0pZauOGds61pz4hFjW5cWl0nsoT8C2ACckuSLSd6Z5AHAXlW1DqC/3XMCtUmStChNItB3An4G+H9V9QTgDrbg8HqSY5KsSrJqw4YNo6pRkqRFZRKBvhZYW1UX9cPvpwv4G5LsDdDfrp/twVV1UlUtq6plU1NTYylYkqSFbuyBXlXfBq5NcmA/6jDgCuBsYHk/bjnw4XHXJknSYjX2TnG9lwPvTXIf4JvAC+k+XJyV5MXANcCzJ1SbJEmLzkQCvaouBZbNMumwMZciSVITvFKcJEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktSAnSZdgNqzdMU5ky5BkrY77qFLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWrAxAI9yY5Jvpjko/3w7knOS3JVf7vbpGqTJGmxmeQe+iuAKweGVwDnV9UBwPn9sCRJGsJELiyTZAlwBLAS+KN+9FHAof3904ALgGPHXZskLWTjvHDTmhOPGNu6tPUmtYf+FuBPgB8NjNurqtYB9Ld7TqAuSZIWpbEHepJnAuur6pJ5Pv6YJKuSrNqwYcM2rk6SpMVpEnvovwgcmWQNcCbwlCTvAW5IsjdAf7t+tgdX1UlVtayqlk1NTY2rZkmSFrSxB3pVHVdVS6pqKXA08Mmqeh5wNrC8n2058OFx1yZJ0mK1kL6HfiLwtCRXAU/rhyVJ0hAm+vOpVXUBXW92quom4LBJ1iNJ0mK1kPbQJUnSPBnokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwE6TLkDjsXTFOZMuQZI0Qu6hS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwNgDPcl+ST6V5Moklyd5RT9+9yTnJbmqv91t3LVJkrRYTWIPfSPwqqp6DPBE4KVJDgJWAOdX1QHA+f2wJEkawtgDvarWVdUX+vu3AVcC+wJHAaf1s50GPGvctUmStFhN9Bx6kqXAE4CLgL2qah10oQ/sOcHSJElaVCYW6EkeCHwAeGVV3boFjzsmyaokqzZs2DC6AiVJWkQmEuhJdqYL8/dW1Qf70Tck2bufvjewfrbHVtVJVbWsqpZNTU2Np2BJkha4SfRyD/Au4Mqq+tuBSWcDy/v7y4EPj7s2SZIWq0n82tovAs8HvpLk0n7ca4ATgbOSvBi4Bnj2BGqTJGlRGnugV9VngMwx+bBx1iJJUiu8UpwkSQ0w0CVJasAkzqFLkhaBpSvOGdu61px4xNjW1Sr30CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAn0+doHH+NKEkLWT+VOvWcw9dkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgL3cZ7DnuSRpMXIPXZKkBhjokiQ1wECXJKkBBrokSQ1YcJ3ikhwOvBXYEXhnVZ044ZIkSQ1p9TKzC2oPPcmOwN8DvwIcBDwnyUGTrUqSpIVvQQU6cAiwuqq+WVU/AM4EjppwTZIkLXgLLdD3Ba4dGF7bj5MkSZuw0M6hZ5ZxdY8ZkmOAY/rB25N8beRVwR7AjWNYz0JmG9gGYBuAbQC2AQzZBnnTSNb9sNlGLrRAXwvsNzC8BLh+cIaqOgk4aZxFJVlVVcvGuc6FxjawDcA2ANsAbANYmG2w0A65fx44IMnDk9wHOBo4e8I1SZK04C2oPfSq2pjkZcC/031t7eSqunzCZUmStOAtqEAHqKqPAR+bdB0zjPUQ/wJlG9gGYBuAbQC2ASzANkhVbX4uSZK0oC20c+iSJGkeDPQhJHlDki8nuTTJuUn2mXRN45bkzUm+2rfDh5I8ZNI1jVuSZye5PMmPkiyo3q2jluTwJF9LsjrJiknXMwlJTk6yPsllk65lUpLsl+RTSa7s3wuvmHRN45bkvkkuTvKlvg1OmHRN0zzkPoQku1bVrf39PwQOqqqXTLissUryv4BP9h0X3wRQVcdOuKyxSvIY4EfAPwJ/XFWrJlzSWPSXZP468DS6r5Z+HnhOVV0x0cLGLMmTgduB06vq4EnXMwlJ9gb2rqovJHkQcAnwrO3ptZAkwAOq6vYkOwOfAV5RVZ+bcGnuoQ9jOsx7D2DGxW62B1V1blVt7Ac/R3eNgO1KVV1ZVeO4kNFC4yWZgaq6ELh50nVMUlWtq6ov9PdvA65kO7uaZ3Vu7wd37v8WRCYY6ENKsjLJtcBzgT+fdD0T9iLg45MuQmPjJZl1L0mWAk8ALppwKWOXZMcklwLrgfOqakG0gYHeS/KJJJfN8ncUQFW9tqr2A94LvGyy1Y7G5tqgn+e1wEa6dmjOMG2wHdrsJZm1fUnyQOADwCtnHMHcLlTVXVX103RHKg9JsiBOwSy476FPSlU9dchZ3wecAxw/wnImYnNtkGQ58EzgsGq088UWvA62J5u9JLO2H/154w8A762qD066nkmqqluSXAAcDky8s6R76ENIcsDA4JHAVydVy6QkORw4Fjiyqr476Xo0Vl6SWcCPO4S9C7iyqv520vVMQpKp6W/5JLkf8FQWSCbYy30IST4AHEjXw/lbwEuq6rrJVjVeSVYDuwA39aM+tx329P814G3AFHALcGlVPX2iRY1JkmcAb+HuSzKvnGxF45fkDOBQul/ZugE4vqreNdGixizJk4D/AL5C9/8Q4DX9FT63C0keD5xG917YATirql4/2ao6BrokSQ3wkLskSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAf8fx1ZFpDaNFggAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] }, "metadata": { "needs_background": "light" @@ -783,7 +813,9 @@ "outputs": [ { "data": { - "text/plain": "TensorType(float64, ())" + "text/plain": [ + "TensorType(float64, ())" + ] }, "execution_count": 24, "metadata": {}, @@ -821,7 +853,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 25, "metadata": {}, @@ -859,7 +893,9 @@ "outputs": [ { "data": { - "text/plain": "array(-0.8043385)" + "text/plain": [ + "array(-0.8043385)" + ] }, "execution_count": 26, "metadata": {}, @@ -953,7 +989,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 28, "metadata": {}, @@ -1020,8 +1058,10 @@ "outputs": [ { "data": { - "text/plain": "
", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAF1CAYAAAAeOhj3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAefklEQVR4nO3deZhkdX3v8feHRVRkDaOyOoCEBNHAzTzk3kQNcYnEa9yeaCBqcIlIIok+MbmuCYNIonG/GjUoCKigRMLVRHIDQYUYgzogIquCjDIwDsOMrCrXGb73j3MairZ7pmemq6r71+/X89TTVb+zfc+pqv7U+Z1Tp1JVSJKk+W2rcRcgSZK2nIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wEDXnJBkaZJPzvI8k+TjSX6U5OuzOe+FJMlLk3xlE8ZfnuRp/f03JfnYLNZyd5L9+vunJXnbLM77I0n+arbmt6Vme9upfduMuwCNV5InAn8HPA5YD1wDvLaqvjHWwmbHE4GnA3tV1T3jLmYhqqq/mcl4Sb4MfLKqNhhgVfWI2agryUuBP6qqJw7M+9jZmPdsmem2kyYY6AtYkh2BfwH+GDgbeAjwJODecdY1ix4DLJ8uzJNsU1XrRlzT0LW4Xi2ukzTb7HJf2H4RoKrOqqr1VfWTqjq/qq4ASLJ/ki8mWZPktiSfSrLzxMR91+pfJrkiyT1JTknyqCT/muSuJP+eZJd+3MVJKskxSW5JsjLJ66YrLMl/T/LVJLcn+VaSwweGvTTJ9/pl3JjkRVNM/wrgY8D/6LtpT0hyeJIVSV6f5IfAx5Nsl+R9fU239Pe36+cxMf7/SnJrX/NzkzwzyXeSrE3ypg2sw/9M8s0kdya5KcnSDYw7sazXDSzrZQPDd0pyRpLVSb6f5C1JthrYHv+Z5L1J1gJL++7oD/XPxd398Ef36/ejJNcmOXRg/m9IckO/Ta9O8rzpap2i9pf0Na1J8uZJw+4/lJLkoUk+2Y93e5Jv9K+Xk+g+SH6wr/WD/fiV5NVJvgt8d6DtsQOL2C3JBX3dFyV5TD/exOttm4Favpzkj5L8MvARHnht3N4Pf1AXfpJXJrm+f54/n2SPgWGV5Ngk3+23598nyTTbZ/J8D0+yYuDx65Pc3K/DdUmeOsW2m1ifo5P8IN378c0D83hYktP7Wq7pX7MrmEY/rz9L9z66Lck7k2yV7v2wNsnjB8Z9ZJKfJFmUTXxPJNk63aGDidfWpUn2nq4ubaGq8rZAb8COwBrgdOB3gF0mDX8sXZf1dsAi4GLgfQPDlwOXAI8C9gRuBS4DDu2n+SJwfD/uYqCAs4DtgccDq4Gn9cOX0nW50s9rDfBMug+dT+8fL+qnvRM4sB93d+Bx06zfS4GvDDw+HFgHvKOv72HAW/t1eGQ//68CJ04a/6+BbYFX9jWfCexAd5jip8B+0yz/8H49twKeAKwCnruBcdf19Wzbr/uPJ54T4Azgc/1yFwPfAV4xsJ7rgD+l63V7GHAacBvwq8BD++fiRuAPga2BtwFfGlj+C4A9+lp/H7gH2H2q7Tip7oOAu4En99v0PX0tUz2vrwL+GXh4X8OvAjv2w75M1wU+OO8CLgB2BR420PbY/v5pwF0Dy37/RJ088HrbZmB+9y9jqnXq5/e2/v5T+u333/p5fwC4eFJt/wLsDOxD97o4YpptdP98B57rFf39A4GbgD0G6t5/im03sT4f7Z/fX6HrSfvlfvjbgYuAXYC9gCsmljFNTQV8qd+2+9C9nia2zYeAdwyM+xrgnzfnPQH8JfDtfj3T1/0L4/7f1+rNPfQFrKrupDvOPPGPYnW/J/Kofvj1VXVBVd1bVavp/ln/5qTZfKCqVlXVzcB/AF+rqm9W1b3AuXThPuiEqrqnqr4NfBw4aorSXgycV1XnVdV9VXUBsIwu5ADuAw5O8rCqWllVV23Cat9H9yHj3qr6CfAi4K1VdWu/jicALxkY/2fASVX1M+DTwG7A+6vqrn65V9GF9c+pqi9X1bf7dbiC7sPM5O036Gd9LT+rqvPogvLAJFvThewb++UuB949qc5bquoDVbWuXy+Ac6vq0qr6Kd1z8dOqOqOq1gOfYeC5qap/rKpb+lo/Q7dHfNhGtiXA7wH/UlUX98/5X9Ft4+nW7xfoAnl9X9udG5n/31bV2oF1muwLA8t+M91e92zsAb4IOLWqLuvn/cZ+3osHxnl7Vd1eVT+gC8dDNmM56+k+MByUZNuqWl5VN2xg/BOq60n7FvAtuoAEeCHwN1X1o6paAfzvGSz7Hf22/QHwPh54L54O/MFEDxDd6+wTA9Ntynvij4C3VNV11flWVa2ZQW3aDAb6AldV11TVS6tqL+Bgur2098H9XW2f7rsD7wQ+SffmHbRq4P5Ppng8+SSmmwbuf79f3mSPAV7Qd8ve3neJPpFuj/EeunA7FliZ5AtJfmnma8zqPuAm7NHXMV1Na/oAnFgf2Pg6ApDk15J8KV03+R19zZO336A19eDjxD/u570b3fkNk+vcc+Dx4HadMOPnJskfJrl8YHsfvJFaJ+wxuOz++ZnuH/YngH8DPp3u8MbfJdl2I/Ofar2mHF5VdwNrmfo1take9Lro572GB2/zHw7cn3iuNklVXQ+8lm5v/Nb+/bah+qdb5oOeBza+3SaPc//rvqq+RtdD85v9e+uxwOcHxt2U98TewIY+oGgWGei6X1VdS9c9eHDf9Ld0e+9PqKod6facpzxOuAkG9572AW6ZYpybgE9U1c4Dt+2r6u19nf9WVU+n626/lq53YaYm/7zgLXQfIDZW0+Y4k+4f4d5VtRPdcdvN2X630e0VTa7z5oHHm/2zif1x548Cx9F1h+4MXMnMal3JwHOa5OF0e+E/p+95OKGqDgJ+HXgW3SGADdW/sfUaXPYj6LqQb6ELJOi69yc8ehPm+6DXRZLt6dbr5mmnmN49G6iDqjqzurPtH9PX9Y7NWMZKuq72CTPppdjQe/F0uvf7S4DPTvoQvCluAvbfzGm1iQz0BSzJL6U7CWuv/vHedN1ul/Sj7EDX7Xt7kj3pjodtqb9K8vAkjwNeRtf1O9kngd9N8oz+pJqH9ifj7JXuJKpn9/9g7+3rWz/FPGbqLOAt/Qk/u9EdG5yt78PvAKytqp8mOQz4g82ZSb83dDZwUpId+gD+81msc3u6IFkNkO5kvIM3OMUDPgs8K8kTkzyE7hyAKf+vJPmtJI/vDyHcSfchZeK5WwXstxm1P3Ng2SfSHfK5qT98cjPw4v419HIeHCyrgL366aZyJvCyJIekO0nyb/p5L9+MGi/v69w1yaPp9sgBSHJgkqf0y/gp3d7t5ryezwbemGSX/r163Aym+ct+/L3pjpMPvhc/ATyPLtTP2Ix6JnwMODHJAek8IcmUH/i05Qz0he0u4NeAryW5hy7IrwQmzj4/ge6koDuALwD/NAvLvAi4HrgQeFdVnT95hKq6CXgO8Ca6kLmJ7sPEVv3tdXR7E2vpjkn/yRbU8za64/NX0J28c1nfNhv+BHhrkrvoPiicvQXz+lO6Pb3vAV+hC5xTt7hCoKqupjsm/190Qfd44D9nOO1VwKv7elYCPwKmO7v60XQfAO6ku97BRTzwoeT9wO/1Z2nP5PjvhDOB4+leC79Kd+x7wivpXjdr6E7W+urAsC/SHev9YZLbplivC+nOBzinX6/9gSM3oa5Bn6A73r0cOJ8HB+d2dCe03UbXnf5Iutf9pnor3Xa/Efh3uu28sa+ffg64lO4DxxeAUyYG9MfhL6P7oPcfm1HPhPfQve7Pp3veT6E7qU9DkKrN7qmTZqw/mehGYNvy+8TSUCX5Y+DIqpryJMwkBRzQH8Ofbh6n0p1s+ZYhlalZ5oVlJGmeS7I73SGL/wIOoOvF+uAWzG8x8Hx+/lsqmsPscpek+e8hwD/QHUb7Il13+oc2Z0ZJTqQ79PbOqrpx1irU0NnlLklSA9xDlySpAQa6JEkNmNcnxe222261ePHicZchSdLIXHrppbdV1aLJ7fM60BcvXsyyZcvGXYYkSSOT5PtTtdvlLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgXv/amtSspTuNcFl3jG5ZkobGPXRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSA4YW6ElOTXJrkisH2j6T5PL+tjzJ5X374iQ/GRj2kWHVJUlSi4Z5pbjTgA8CZ0w0VNXvT9xP8m5g8BJVN1TVIUOsR5KkZg0t0Kvq4iSLpxqWJMALgacMa/mSJC0k4zqG/iRgVVV9d6Bt3yTfTHJRkidNN2GSY5IsS7Js9erVw69UkqR5YFyBfhRw1sDjlcA+VXUo8OfAmUl2nGrCqjq5qpZU1ZJFixaNoFRJkua+kQd6km2A5wOfmWirqnurak1//1LgBuAXR12bJEnz1Tj20J8GXFtVKyYakixKsnV/fz/gAOB7Y6hNkqR5aZhfWzsL+C/gwCQrkryiH3QkD+5uB3gycEWSbwGfBY6tqrXDqk2SpNYM8yz3o6Zpf+kUbecA5wyrFkmSWueV4iRJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIasM24C5A0Zkt3GuGy7hjdsqQFxj10SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAUML9CSnJrk1yZUDbUuT3Jzk8v72zIFhb0xyfZLrkjxjWHVJktSiYe6hnwYcMUX7e6vqkP52HkCSg4Ajgcf103woydZDrE2SpKYMLdCr6mJg7QxHfw7w6aq6t6puBK4HDhtWbZIktWYcx9CPS3JF3yW/S9+2J3DTwDgr+jZJkjQDo75S3IeBE4Hq/74beDmQKcatqWaQ5BjgGIB99tlnOFVKGg6vSicNzUj30KtqVVWtr6r7gI/yQLf6CmDvgVH3Am6ZZh4nV9WSqlqyaNGi4RYsSdI8MdJAT7L7wMPnARNnwH8eODLJdkn2BQ4Avj7K2iRJms+G1uWe5CzgcGC3JCuA44HDkxxC152+HHgVQFVdleRs4GpgHfDqqlo/rNokSWrN0AK9qo6aovmUDYx/EnDSsOqRJKllXilOkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgm3EXIM0bS3cadwWSNC330CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAUML9CSnJrk1yZUDbe9Mcm2SK5Kcm2Tnvn1xkp8kuby/fWRYdUmS1KJh7qGfBhwxqe0C4OCqegLwHeCNA8NuqKpD+tuxQ6xLkqTmDC3Qq+piYO2ktvOral3/8BJgr2EtX5KkhWScx9BfDvzrwON9k3wzyUVJnjTdREmOSbIsybLVq1cPv0pJkuaBsQR6kjcD64BP9U0rgX2q6lDgz4Ezk+w41bRVdXJVLamqJYsWLRpNwZIkzXEjD/QkRwPPAl5UVQVQVfdW1Zr+/qXADcAvjro2SZLmq5EGepIjgNcDz66qHw+0L0qydX9/P+AA4HujrE2SpPlsaD+fmuQs4HBgtyQrgOPpzmrfDrggCcAl/RntTwbemmQdsB44tqrWTjljSZL0c4YW6FV11BTNp0wz7jnAOcOqRZKk1nmlOEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDdhm3AVI0lAs3WmEy7pjdMuSpuEeuiRJDTDQJUlqwIwCPcmFM2mTJEnjscFAT/LQJLsCuyXZJcmu/W0xsMdGpj01ya1Jrhxo2zXJBUm+2//dZWDYG5Ncn+S6JM/YwvWSJGlB2dge+quAS4Ff6v9O3D4H/P1Gpj0NOGJS2xuAC6vqAODC/jFJDgKOBB7XT/OhJFvPeC0kSVrgNhjoVfX+qtoX+Iuq2q+q9u1vv1JVH9zItBcDayc1Pwc4vb9/OvDcgfZPV9W9VXUjcD1w2CauiyRJC9aMvrZWVR9I8uvA4sFpquqMTVzeo6pqZT/tyiSP7Nv3BC4ZGG9F3/ZzkhwDHAOwzz77bOLiJUlq04wCPckngP2By4H1fXMBmxro0y5iiraaasSqOhk4GWDJkiVTjiNJ0kIz0wvLLAEOqqotDdBVSXbv9853B27t21cAew+MtxdwyxYuS5KkBWOm30O/Enj0LCzv88DR/f2j6U6um2g/Msl2SfYFDgC+PgvLkyRpQZjpHvpuwNVJvg7cO9FYVc+eboIkZwGH033lbQVwPPB24OwkrwB+ALygn89VSc4GrgbWAa+uqvVTzliSJP2cmQb60k2dcVUdNc2gp04z/knASZu6HEmSNPOz3C8adiGSJGnzzfQs97t44KzzhwDbAvdU1Y7DKkySJM3cTPfQdxh8nOS5eOEXSZLmjM36tbWq+j/AU2a3FEmStLlm2uX+/IGHW9F9L92LukiSNEfM9Cz33x24vw5YTnf9dUmSNAfM9Bj6y4ZdiCRJ2nwzOoaeZK8k5/a/b74qyTlJ9hp2cZIkaWZmelLcx+kuz7oH3a+g/XPfJkmS5oCZBvqiqvp4Va3rb6cBi4ZYlyRJ2gQzDfTbkrw4ydb97cXAmmEWJkmSZm6mgf5y4IXAD4GVwO8BnignSdIcMdOvrZ0IHF1VPwJIsivwLrqglyRJYzbTPfQnTIQ5QFWtBQ4dTkmSJGlTzTTQt0qyy8SDfg99pnv3kiRpyGYayu8Gvprks3SXfH0h/na5JElzxkyvFHdGkmV0P8gS4PlVdfVQK5MkSTM2427zPsANcUmS5qDN+vlUSZI0txjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGjDj30OfLUkOBD4z0LQf8NfAzsArgdV9+5uq6rzRVidJ0vw08kCvquuAQwCSbA3cDJwLvAx4b1W9a9Q1SZI03427y/2pwA1V9f0x1yFJ0rw27kA/Ejhr4PFxSa5IcmqSXcZVlCRJ883YAj3JQ4BnA//YN30Y2J+uO34l8O5ppjsmybIky1avXj3VKJIkLTjj3EP/HeCyqloFUFWrqmp9Vd0HfBQ4bKqJqurkqlpSVUsWLVo0wnIlSZq7xhnoRzHQ3Z5k94FhzwOuHHlFkiTNUyM/yx0gycOBpwOvGmj+uySHAAUsnzRMkiRtwFgCvap+DPzCpLaXjKMWSZJaMO6z3CVJ0iww0CVJaoCBLklSA8ZyDF2aNUt3GncF0mhfh0vvGN2yNK+4hy5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQHbjGOhSZYDdwHrgXVVtSTJrsBngMXAcuCFVfWjcdQnSdJ8M8499N+qqkOqakn/+A3AhVV1AHBh/1iSJM3AXOpyfw5wen//dOC54ytFkqT5ZVyBXsD5SS5Nckzf9qiqWgnQ/33kVBMmOSbJsiTLVq9ePaJyJUma28ZyDB34jaq6JckjgQuSXDvTCavqZOBkgCVLltSwCpQkaT4Zyx56Vd3S/70VOBc4DFiVZHeA/u+t46hNkqT5aOSBnmT7JDtM3Ad+G7gS+DxwdD/a0cDnRl2bJEnz1Ti63B8FnJtkYvlnVtX/TfIN4OwkrwB+ALxgDLVJkjQvjTzQq+p7wK9M0b4GeOqo65EkqQVz6WtrkiRpMxnokiQ1wECXJKkBBrokSQ0w0CVJasC4rhQnSdocS3ca4bLuGN2ytMXcQ5ckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAfz5Vs2+UP+8oSQLcQ5ckqQkGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaMPJAT7J3ki8luSbJVUle07cvTXJzksv72zNHXZskSfPVOH4+dR3wuqq6LMkOwKVJLuiHvbeq3jWGmiRJmtdGHuhVtRJY2d+/K8k1wJ6jrkOSpJaM9Rh6ksXAocDX+qbjklyR5NQku0wzzTFJliVZtnr16lGVKknSnDa2QE/yCOAc4LVVdSfwYWB/4BC6Pfh3TzVdVZ1cVUuqasmiRYtGVa4kSXPaWAI9ybZ0Yf6pqvongKpaVVXrq+o+4KPAYeOoTZKk+WgcZ7kHOAW4pqreM9C++8BozwOuHHVtkiTNV+M4y/03gJcA305yed/2JuCoJIcABSwHXjWG2iRJmpfGcZb7V4BMMei8UdeyoCzdadwVSJKGyCvFSZLUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwDi+hy5Jmg9G+XXXpXeMblmNcg9dkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkN2GbcBSxoS3cadwWStPCM8n/v0jtGtij30CVJaoCBLklSA+xylySNn4cgt5h76JIkNcBAlySpAXMu0JMckeS6JNcnecO465EkaT6YU4GeZGvg74HfAQ4Cjkpy0HirkiRp7ptrJ8UdBlxfVd8DSPJp4DnA1SOrwBMzJEnz0JzaQwf2BG4aeLyib5MkSRsw1/bQM0VbPWiE5BjgmP7h3UmuG3pVm2434LZxFzEGrvfC4novLK735jhhqljbYo+ZqnGuBfoKYO+Bx3sBtwyOUFUnAyePsqhNlWRZVS0Zdx2j5novLK73wuJ6z31zrcv9G8ABSfZN8hDgSODzY65JkqQ5b07toVfVuiTHAf8GbA2cWlVXjbksSZLmvDkV6ABVdR5w3rjr2EJz+pDAELneC4vrvbC43nNcqmrjY0mSpDltrh1DlyRJm8FAH4IkJya5IsnlSc5Psse4axqFJO9Mcm2/7ucm2XncNY1CkhckuSrJfUnmxdmwW2KhXp45yalJbk1y5bhrGaUkeyf5UpJr+tf5a8Zd0ygkeWiSryf5Vr/eJ4y7po2xy30IkuxYVXf29/8MOKiqjh1zWUOX5LeBL/YnN74DoKpeP+ayhi7JLwP3Af8A/EVVLRtzSUPTX575O8DT6b5m+g3gqKoa3dUcxyTJk4G7gTOq6uBx1zMqSXYHdq+qy5LsAFwKPLf15zxJgO2r6u4k2wJfAV5TVZeMubRpuYc+BBNh3tueSRfHaVVVnV9V6/qHl9BdR6B5VXVNVc3FCxwNw/2XZ66q/wdMXJ65eVV1MbB23HWMWlWtrKrL+vt3AdewAK7gWZ27+4fb9rc5/b/cQB+SJCcluQl4EfDX465nDF4O/Ou4i9Cs8/LMC1iSxcChwNfGXMpIJNk6yeXArcAFVTWn19tA30xJ/j3JlVPcngNQVW+uqr2BTwHHjbfa2bOx9e7HeTOwjm7dmzCT9V4gNnp5ZrUpySOAc4DXTuqFbFZVra+qQ+h6Gw9LMqcPtcy576HPF1X1tBmOeibwBeD4IZYzMhtb7yRHA88CnloNnaCxCc936zZ6eWa1pz+GfA7wqar6p3HXM2pVdXuSLwNHAHP2pEj30IcgyQEDD58NXDuuWkYpyRHA64FnV9WPx12PhsLLMy8w/clhpwDXVNV7xl3PqCRZNPFNnSQPA57GHP9f7lnuQ5DkHOBAujOfvw8cW1U3j7eq4UtyPbAdsKZvumSBnN3/POADwCLgduDyqnrGWIsaoiTPBN7HA5dnPmm8FY1GkrOAw+l+fWsVcHxVnTLWokYgyROB/wC+Tfc/DeBN/VU9m5XkCcDpdK/zrYCzq+qt461qwwx0SZIaYJe7JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQH/H+Rs+go/ymb0AAAAAElFTkSuQmCC\n" + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAF1CAYAAAAeOhj3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAefklEQVR4nO3deZhkdX3v8feHRVRkDaOyOoCEBNHAzTzk3kQNcYnEa9yeaCBqcIlIIok+MbmuCYNIonG/GjUoCKigRMLVRHIDQYUYgzogIquCjDIwDsOMrCrXGb73j3MairZ7pmemq6r71+/X89TTVb+zfc+pqv7U+Z1Tp1JVSJKk+W2rcRcgSZK2nIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wEDXnJBkaZJPzvI8k+TjSX6U5OuzOe+FJMlLk3xlE8ZfnuRp/f03JfnYLNZyd5L9+vunJXnbLM77I0n+arbmt6Vme9upfduMuwCNV5InAn8HPA5YD1wDvLaqvjHWwmbHE4GnA3tV1T3jLmYhqqq/mcl4Sb4MfLKqNhhgVfWI2agryUuBP6qqJw7M+9jZmPdsmem2kyYY6AtYkh2BfwH+GDgbeAjwJODecdY1ix4DLJ8uzJNsU1XrRlzT0LW4Xi2ukzTb7HJf2H4RoKrOqqr1VfWTqjq/qq4ASLJ/ki8mWZPktiSfSrLzxMR91+pfJrkiyT1JTknyqCT/muSuJP+eZJd+3MVJKskxSW5JsjLJ66YrLMl/T/LVJLcn+VaSwweGvTTJ9/pl3JjkRVNM/wrgY8D/6LtpT0hyeJIVSV6f5IfAx5Nsl+R9fU239Pe36+cxMf7/SnJrX/NzkzwzyXeSrE3ypg2sw/9M8s0kdya5KcnSDYw7sazXDSzrZQPDd0pyRpLVSb6f5C1JthrYHv+Z5L1J1gJL++7oD/XPxd398Ef36/ejJNcmOXRg/m9IckO/Ta9O8rzpap2i9pf0Na1J8uZJw+4/lJLkoUk+2Y93e5Jv9K+Xk+g+SH6wr/WD/fiV5NVJvgt8d6DtsQOL2C3JBX3dFyV5TD/exOttm4Favpzkj5L8MvARHnht3N4Pf1AXfpJXJrm+f54/n2SPgWGV5Ngk3+23598nyTTbZ/J8D0+yYuDx65Pc3K/DdUmeOsW2m1ifo5P8IN378c0D83hYktP7Wq7pX7MrmEY/rz9L9z66Lck7k2yV7v2wNsnjB8Z9ZJKfJFmUTXxPJNk63aGDidfWpUn2nq4ubaGq8rZAb8COwBrgdOB3gF0mDX8sXZf1dsAi4GLgfQPDlwOXAI8C9gRuBS4DDu2n+SJwfD/uYqCAs4DtgccDq4Gn9cOX0nW50s9rDfBMug+dT+8fL+qnvRM4sB93d+Bx06zfS4GvDDw+HFgHvKOv72HAW/t1eGQ//68CJ04a/6+BbYFX9jWfCexAd5jip8B+0yz/8H49twKeAKwCnruBcdf19Wzbr/uPJ54T4Azgc/1yFwPfAV4xsJ7rgD+l63V7GHAacBvwq8BD++fiRuAPga2BtwFfGlj+C4A9+lp/H7gH2H2q7Tip7oOAu4En99v0PX0tUz2vrwL+GXh4X8OvAjv2w75M1wU+OO8CLgB2BR420PbY/v5pwF0Dy37/RJ088HrbZmB+9y9jqnXq5/e2/v5T+u333/p5fwC4eFJt/wLsDOxD97o4YpptdP98B57rFf39A4GbgD0G6t5/im03sT4f7Z/fX6HrSfvlfvjbgYuAXYC9gCsmljFNTQV8qd+2+9C9nia2zYeAdwyM+xrgnzfnPQH8JfDtfj3T1/0L4/7f1+rNPfQFrKrupDvOPPGPYnW/J/Kofvj1VXVBVd1bVavp/ln/5qTZfKCqVlXVzcB/AF+rqm9W1b3AuXThPuiEqrqnqr4NfBw4aorSXgycV1XnVdV9VXUBsIwu5ADuAw5O8rCqWllVV23Cat9H9yHj3qr6CfAi4K1VdWu/jicALxkY/2fASVX1M+DTwG7A+6vqrn65V9GF9c+pqi9X1bf7dbiC7sPM5O036Gd9LT+rqvPogvLAJFvThewb++UuB949qc5bquoDVbWuXy+Ac6vq0qr6Kd1z8dOqOqOq1gOfYeC5qap/rKpb+lo/Q7dHfNhGtiXA7wH/UlUX98/5X9Ft4+nW7xfoAnl9X9udG5n/31bV2oF1muwLA8t+M91e92zsAb4IOLWqLuvn/cZ+3osHxnl7Vd1eVT+gC8dDNmM56+k+MByUZNuqWl5VN2xg/BOq60n7FvAtuoAEeCHwN1X1o6paAfzvGSz7Hf22/QHwPh54L54O/MFEDxDd6+wTA9Ntynvij4C3VNV11flWVa2ZQW3aDAb6AldV11TVS6tqL+Bgur2098H9XW2f7rsD7wQ+SffmHbRq4P5Ppng8+SSmmwbuf79f3mSPAV7Qd8ve3neJPpFuj/EeunA7FliZ5AtJfmnma8zqPuAm7NHXMV1Na/oAnFgf2Pg6ApDk15J8KV03+R19zZO336A19eDjxD/u570b3fkNk+vcc+Dx4HadMOPnJskfJrl8YHsfvJFaJ+wxuOz++ZnuH/YngH8DPp3u8MbfJdl2I/Ofar2mHF5VdwNrmfo1take9Lro572GB2/zHw7cn3iuNklVXQ+8lm5v/Nb+/bah+qdb5oOeBza+3SaPc//rvqq+RtdD85v9e+uxwOcHxt2U98TewIY+oGgWGei6X1VdS9c9eHDf9Ld0e+9PqKod6facpzxOuAkG9572AW6ZYpybgE9U1c4Dt+2r6u19nf9WVU+n626/lq53YaYm/7zgLXQfIDZW0+Y4k+4f4d5VtRPdcdvN2X630e0VTa7z5oHHm/2zif1x548Cx9F1h+4MXMnMal3JwHOa5OF0e+E/p+95OKGqDgJ+HXgW3SGADdW/sfUaXPYj6LqQb6ELJOi69yc8ehPm+6DXRZLt6dbr5mmnmN49G6iDqjqzurPtH9PX9Y7NWMZKuq72CTPppdjQe/F0uvf7S4DPTvoQvCluAvbfzGm1iQz0BSzJL6U7CWuv/vHedN1ul/Sj7EDX7Xt7kj3pjodtqb9K8vAkjwNeRtf1O9kngd9N8oz+pJqH9ifj7JXuJKpn9/9g7+3rWz/FPGbqLOAt/Qk/u9EdG5yt78PvAKytqp8mOQz4g82ZSb83dDZwUpId+gD+81msc3u6IFkNkO5kvIM3OMUDPgs8K8kTkzyE7hyAKf+vJPmtJI/vDyHcSfchZeK5WwXstxm1P3Ng2SfSHfK5qT98cjPw4v419HIeHCyrgL366aZyJvCyJIekO0nyb/p5L9+MGi/v69w1yaPp9sgBSHJgkqf0y/gp3d7t5ryezwbemGSX/r163Aym+ct+/L3pjpMPvhc/ATyPLtTP2Ix6JnwMODHJAek8IcmUH/i05Qz0he0u4NeAryW5hy7IrwQmzj4/ge6koDuALwD/NAvLvAi4HrgQeFdVnT95hKq6CXgO8Ca6kLmJ7sPEVv3tdXR7E2vpjkn/yRbU8za64/NX0J28c1nfNhv+BHhrkrvoPiicvQXz+lO6Pb3vAV+hC5xTt7hCoKqupjsm/190Qfd44D9nOO1VwKv7elYCPwKmO7v60XQfAO6ku97BRTzwoeT9wO/1Z2nP5PjvhDOB4+leC79Kd+x7wivpXjdr6E7W+urAsC/SHev9YZLbplivC+nOBzinX6/9gSM3oa5Bn6A73r0cOJ8HB+d2dCe03UbXnf5Iutf9pnor3Xa/Efh3uu28sa+ffg64lO4DxxeAUyYG9MfhL6P7oPcfm1HPhPfQve7Pp3veT6E7qU9DkKrN7qmTZqw/mehGYNvy+8TSUCX5Y+DIqpryJMwkBRzQH8Ofbh6n0p1s+ZYhlalZ5oVlJGmeS7I73SGL/wIOoOvF+uAWzG8x8Hx+/lsqmsPscpek+e8hwD/QHUb7Il13+oc2Z0ZJTqQ79PbOqrpx1irU0NnlLklSA9xDlySpAQa6JEkNmNcnxe222261ePHicZchSdLIXHrppbdV1aLJ7fM60BcvXsyyZcvGXYYkSSOT5PtTtdvlLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgXv/amtSspTuNcFl3jG5ZkobGPXRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSA4YW6ElOTXJrkisH2j6T5PL+tjzJ5X374iQ/GRj2kWHVJUlSi4Z5pbjTgA8CZ0w0VNXvT9xP8m5g8BJVN1TVIUOsR5KkZg0t0Kvq4iSLpxqWJMALgacMa/mSJC0k4zqG/iRgVVV9d6Bt3yTfTHJRkidNN2GSY5IsS7Js9erVw69UkqR5YFyBfhRw1sDjlcA+VXUo8OfAmUl2nGrCqjq5qpZU1ZJFixaNoFRJkua+kQd6km2A5wOfmWirqnurak1//1LgBuAXR12bJEnz1Tj20J8GXFtVKyYakixKsnV/fz/gAOB7Y6hNkqR5aZhfWzsL+C/gwCQrkryiH3QkD+5uB3gycEWSbwGfBY6tqrXDqk2SpNYM8yz3o6Zpf+kUbecA5wyrFkmSWueV4iRJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIasM24C5A0Zkt3GuGy7hjdsqQFxj10SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAUML9CSnJrk1yZUDbUuT3Jzk8v72zIFhb0xyfZLrkjxjWHVJktSiYe6hnwYcMUX7e6vqkP52HkCSg4Ajgcf103woydZDrE2SpKYMLdCr6mJg7QxHfw7w6aq6t6puBK4HDhtWbZIktWYcx9CPS3JF3yW/S9+2J3DTwDgr+jZJkjQDo75S3IeBE4Hq/74beDmQKcatqWaQ5BjgGIB99tlnOFVKGg6vSicNzUj30KtqVVWtr6r7gI/yQLf6CmDvgVH3Am6ZZh4nV9WSqlqyaNGi4RYsSdI8MdJAT7L7wMPnARNnwH8eODLJdkn2BQ4Avj7K2iRJms+G1uWe5CzgcGC3JCuA44HDkxxC152+HHgVQFVdleRs4GpgHfDqqlo/rNokSWrN0AK9qo6aovmUDYx/EnDSsOqRJKllXilOkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDVgm3EXIM0bS3cadwWSNC330CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAUML9CSnJrk1yZUDbe9Mcm2SK5Kcm2Tnvn1xkp8kuby/fWRYdUmS1KJh7qGfBhwxqe0C4OCqegLwHeCNA8NuqKpD+tuxQ6xLkqTmDC3Qq+piYO2ktvOral3/8BJgr2EtX5KkhWScx9BfDvzrwON9k3wzyUVJnjTdREmOSbIsybLVq1cPv0pJkuaBsQR6kjcD64BP9U0rgX2q6lDgz4Ezk+w41bRVdXJVLamqJYsWLRpNwZIkzXEjD/QkRwPPAl5UVQVQVfdW1Zr+/qXADcAvjro2SZLmq5EGepIjgNcDz66qHw+0L0qydX9/P+AA4HujrE2SpPlsaD+fmuQs4HBgtyQrgOPpzmrfDrggCcAl/RntTwbemmQdsB44tqrWTjljSZL0c4YW6FV11BTNp0wz7jnAOcOqRZKk1nmlOEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDdhm3AVI0lAs3WmEy7pjdMuSpuEeuiRJDTDQJUlqwIwCPcmFM2mTJEnjscFAT/LQJLsCuyXZJcmu/W0xsMdGpj01ya1Jrhxo2zXJBUm+2//dZWDYG5Ncn+S6JM/YwvWSJGlB2dge+quAS4Ff6v9O3D4H/P1Gpj0NOGJS2xuAC6vqAODC/jFJDgKOBB7XT/OhJFvPeC0kSVrgNhjoVfX+qtoX+Iuq2q+q9u1vv1JVH9zItBcDayc1Pwc4vb9/OvDcgfZPV9W9VXUjcD1w2CauiyRJC9aMvrZWVR9I8uvA4sFpquqMTVzeo6pqZT/tyiSP7Nv3BC4ZGG9F3/ZzkhwDHAOwzz77bOLiJUlq04wCPckngP2By4H1fXMBmxro0y5iiraaasSqOhk4GWDJkiVTjiNJ0kIz0wvLLAEOqqotDdBVSXbv9853B27t21cAew+MtxdwyxYuS5KkBWOm30O/Enj0LCzv88DR/f2j6U6um2g/Msl2SfYFDgC+PgvLkyRpQZjpHvpuwNVJvg7cO9FYVc+eboIkZwGH033lbQVwPPB24OwkrwB+ALygn89VSc4GrgbWAa+uqvVTzliSJP2cmQb60k2dcVUdNc2gp04z/knASZu6HEmSNPOz3C8adiGSJGnzzfQs97t44KzzhwDbAvdU1Y7DKkySJM3cTPfQdxh8nOS5eOEXSZLmjM36tbWq+j/AU2a3FEmStLlm2uX+/IGHW9F9L92LukiSNEfM9Cz33x24vw5YTnf9dUmSNAfM9Bj6y4ZdiCRJ2nwzOoaeZK8k5/a/b74qyTlJ9hp2cZIkaWZmelLcx+kuz7oH3a+g/XPfJkmS5oCZBvqiqvp4Va3rb6cBi4ZYlyRJ2gQzDfTbkrw4ydb97cXAmmEWJkmSZm6mgf5y4IXAD4GVwO8BnignSdIcMdOvrZ0IHF1VPwJIsivwLrqglyRJYzbTPfQnTIQ5QFWtBQ4dTkmSJGlTzTTQt0qyy8SDfg99pnv3kiRpyGYayu8Gvprks3SXfH0h/na5JElzxkyvFHdGkmV0P8gS4PlVdfVQK5MkSTM2427zPsANcUmS5qDN+vlUSZI0txjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGjDj30OfLUkOBD4z0LQf8NfAzsArgdV9+5uq6rzRVidJ0vw08kCvquuAQwCSbA3cDJwLvAx4b1W9a9Q1SZI03427y/2pwA1V9f0x1yFJ0rw27kA/Ejhr4PFxSa5IcmqSXcZVlCRJ883YAj3JQ4BnA//YN30Y2J+uO34l8O5ppjsmybIky1avXj3VKJIkLTjj3EP/HeCyqloFUFWrqmp9Vd0HfBQ4bKqJqurkqlpSVUsWLVo0wnIlSZq7xhnoRzHQ3Z5k94FhzwOuHHlFkiTNUyM/yx0gycOBpwOvGmj+uySHAAUsnzRMkiRtwFgCvap+DPzCpLaXjKMWSZJaMO6z3CVJ0iww0CVJaoCBLklSA8ZyDF2aNUt3GncF0mhfh0vvGN2yNK+4hy5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQHbjGOhSZYDdwHrgXVVtSTJrsBngMXAcuCFVfWjcdQnSdJ8M8499N+qqkOqakn/+A3AhVV1AHBh/1iSJM3AXOpyfw5wen//dOC54ytFkqT5ZVyBXsD5SS5Nckzf9qiqWgnQ/33kVBMmOSbJsiTLVq9ePaJyJUma28ZyDB34jaq6JckjgQuSXDvTCavqZOBkgCVLltSwCpQkaT4Zyx56Vd3S/70VOBc4DFiVZHeA/u+t46hNkqT5aOSBnmT7JDtM3Ad+G7gS+DxwdD/a0cDnRl2bJEnz1Ti63B8FnJtkYvlnVtX/TfIN4OwkrwB+ALxgDLVJkjQvjTzQq+p7wK9M0b4GeOqo65EkqQVz6WtrkiRpMxnokiQ1wECXJKkBBrokSQ0w0CVJasC4rhQnSdocS3ca4bLuGN2ytMXcQ5ckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAfz5Vs2+UP+8oSQLcQ5ckqQkGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaMPJAT7J3ki8luSbJVUle07cvTXJzksv72zNHXZskSfPVOH4+dR3wuqq6LMkOwKVJLuiHvbeq3jWGmiRJmtdGHuhVtRJY2d+/K8k1wJ6jrkOSpJaM9Rh6ksXAocDX+qbjklyR5NQku0wzzTFJliVZtnr16lGVKknSnDa2QE/yCOAc4LVVdSfwYWB/4BC6Pfh3TzVdVZ1cVUuqasmiRYtGVa4kSXPaWAI9ybZ0Yf6pqvongKpaVVXrq+o+4KPAYeOoTZKk+WgcZ7kHOAW4pqreM9C++8BozwOuHHVtkiTNV+M4y/03gJcA305yed/2JuCoJIcABSwHXjWG2iRJmpfGcZb7V4BMMei8UdeyoCzdadwVSJKGyCvFSZLUAANdkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqwDi+hy5Jmg9G+XXXpXeMblmNcg9dkqQGGOiSJDXAQJckqQEGuiRJDTDQJUlqgIEuSVIDDHRJkhpgoEuS1AADXZKkBhjokiQ1wECXJKkBBrokSQ0w0CVJaoCBLklSAwx0SZIaYKBLktQAA12SpAYY6JIkNcBAlySpAQa6JEkN2GbcBSxoS3cadwWStPCM8n/v0jtGtij30CVJaoCBLklSA+xylySNn4cgt5h76JIkNcBAlySpAXMu0JMckeS6JNcnecO465EkaT6YU4GeZGvg74HfAQ4Cjkpy0HirkiRp7ptrJ8UdBlxfVd8DSPJp4DnA1SOrwBMzJEnz0JzaQwf2BG4aeLyib5MkSRsw1/bQM0VbPWiE5BjgmP7h3UmuG3pVm2434LZxFzEGrvfC4novLK735jhhqljbYo+ZqnGuBfoKYO+Bx3sBtwyOUFUnAyePsqhNlWRZVS0Zdx2j5novLK73wuJ6z31zrcv9G8ABSfZN8hDgSODzY65JkqQ5b07toVfVuiTHAf8GbA2cWlVXjbksSZLmvDkV6ABVdR5w3rjr2EJz+pDAELneC4vrvbC43nNcqmrjY0mSpDltrh1DlyRJm8FAH4IkJya5IsnlSc5Psse4axqFJO9Mcm2/7ucm2XncNY1CkhckuSrJfUnmxdmwW2KhXp45yalJbk1y5bhrGaUkeyf5UpJr+tf5a8Zd0ygkeWiSryf5Vr/eJ4y7po2xy30IkuxYVXf29/8MOKiqjh1zWUOX5LeBL/YnN74DoKpeP+ayhi7JLwP3Af8A/EVVLRtzSUPTX575O8DT6b5m+g3gqKoa3dUcxyTJk4G7gTOq6uBx1zMqSXYHdq+qy5LsAFwKPLf15zxJgO2r6u4k2wJfAV5TVZeMubRpuYc+BBNh3tueSRfHaVVVnV9V6/qHl9BdR6B5VXVNVc3FCxwNw/2XZ66q/wdMXJ65eVV1MbB23HWMWlWtrKrL+vt3AdewAK7gWZ27+4fb9rc5/b/cQB+SJCcluQl4EfDX465nDF4O/Ou4i9Cs8/LMC1iSxcChwNfGXMpIJNk6yeXArcAFVTWn19tA30xJ/j3JlVPcngNQVW+uqr2BTwHHjbfa2bOx9e7HeTOwjm7dmzCT9V4gNnp5ZrUpySOAc4DXTuqFbFZVra+qQ+h6Gw9LMqcPtcy576HPF1X1tBmOeibwBeD4IZYzMhtb7yRHA88CnloNnaCxCc936zZ6eWa1pz+GfA7wqar6p3HXM2pVdXuSLwNHAHP2pEj30IcgyQEDD58NXDuuWkYpyRHA64FnV9WPx12PhsLLMy8w/clhpwDXVNV7xl3PqCRZNPFNnSQPA57GHP9f7lnuQ5DkHOBAujOfvw8cW1U3j7eq4UtyPbAdsKZvumSBnN3/POADwCLgduDyqnrGWIsaoiTPBN7HA5dnPmm8FY1GkrOAw+l+fWsVcHxVnTLWokYgyROB/wC+Tfc/DeBN/VU9m5XkCcDpdK/zrYCzq+qt461qwwx0SZIaYJe7JEkNMNAlSWqAgS5JUgMMdEmSGmCgS5LUAANdkqQGGOiSJDXAQJckqQH/H+Rs+go/ymb0AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] }, "metadata": { "needs_background": "light" @@ -1075,7 +1115,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 31, "metadata": {}, @@ -1103,7 +1145,9 @@ "outputs": [ { "data": { - "text/plain": "[z ~ N(, )]" + "text/plain": [ + "[z ~ N(, )]" + ] }, "execution_count": 32, "metadata": {}, @@ -1133,7 +1177,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 33, "metadata": {}, @@ -1219,8 +1265,10 @@ "outputs": [ { "data": { - "text/plain": "
", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeIAAAHiCAYAAAA06c+jAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAeAklEQVR4nO3de5SkdX3n8c+nq3u65woMwwjMjAwqXpGoZ8Br1ohgUAme3dxM1GiSPXPWLFlzDkoUdhM8m2Q5cY9RV7M5E0U3QiSuQsLhEi7rbbOrCCJXB1zWADMMl5kB5j59q+/+UYXbNj0zDM+359tT9X6dM4fpqeJT3+ep7vrU81RX/RwRAgAANQaqBwAAoJ9RxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYmENsX2T70oLbvcD25w/17QKgiAFJku032f7ftrfZfsL2/7J9avVcGfZV7rbD9oskKSL+LCL+9bPI+pbtA14PwLM3WD0AUM32EklXS/qgpK9Kmifp5yWNVs7Vb2wPRsRE9RzAocYRMSC9WJIi4isRMRkReyLihoi4U5Jsv9D2N2xvtb3F9mW2j3z6f7b9gO2P2L7T9i7bX7D9PNvX2d5h+ybbR3Wvu7p7JLrW9ibbj9g+b1+D2X5d90j9Kdt32P6FKZd9wPZPurfxz7bf81x3wNSjZtsjti/tbu9Ttm/pbs+fqvME5bO2d9r+bPf6b+heZ1v3v2+Yknui7e9M2Q+fm3I7T++L37X9kKRvdP/9v9t+tJv3HduvmJL3Jdt/2d23O7tnLo61/SnbT9q+1/arn+t+ACpQxID0Y0mTtv+b7bc/XZpTWNJ/knS8pJdJWiXpomnX+WVJZ6pT6r8k6TpJF0haps7P2b+bdv23SDpJ0tskfdT2GdOHsr1C0jWS/kTSUkkflvR128fYXijpM5LeHhGLJb1B0u0HveUze7+kI9TZzqMl/RtJeyLiQkn/U9K5EbEoIs61vbQ742e61/2kpGtsH93N+ltJ3+9edpGk981we29WZ7/+Yvfr69TZN8sl3SbpsmnX/zVJ/16dfTsq6bvd6y2T9LXuDMBhgyJG34uI7ZLeJCkk/bWkzbavsv287uX3R8SNETEaEZvVeaB/87SY/xIRj0XEw+qU1c0R8cOIGJV0paTpR2kfj4hdEXGXpC9K+o0ZRnuvpGsj4tqIaEfEjZJulfSO7uVtSSfbnh8Rj0TEPfvZzF/rHt3+9M9+rjuuTnG+qHuG4AfdfTSTd0r6PxHx5YiYiIivSLpX0i/Zfr6kUyX9UUSMRcQ/SbpqhoyLuvtijyRFxCURsaO77y6S9HO2j5hy/Su7M+1VZ9/ujYi/iYhJSX+nZ+5rYE6jiAFJEbE+Ij4QESslnazO0e+nJMn2ctuX237Y9nZJl6pz9DXVY1P+vmeGrxdNu/6GKX9/sHt7050g6VenleebJB0XEbsk/bo6R6uP2L7G9kv3s4lfjYgjp/7Zz3W/LOl6SZd3T5//ue2hfVz3+O78Uz0oaUX3siciYveUyzbomX76b7Zbti+2/X+7+/qB7kVT9/fB7mtgTqOIgWki4l5JX1KnkKXOaemQdEpELFHnSNUNb2bVlL8/X9KmGa6zQdKXpxXowoi4uDvn9RFxpqTj1DkK/euGM6mbOx4RH4+Il6tzyvtsSb/19MXTrr5JnScMUz1f0sOSHpG01PaCKZet0jNNzfxNSe+SdIY6p8dXd/+96f4G5iyKGH3P9kttn2d7ZffrVeqcKv5e9yqLJe2U9FT3dduPJNzsf7C9oPuLSL+tzinV6S5V5xTvL3aPFEds/4Ltld1fnjqn+1rxaHe+yYS5ZPsttl9puyVpuzqnqp/OfkzSC6Zc/VpJL7b9m7YHbf+6pJdLujoiHlTnVPpFtufZfr06r5/vz+Lu9myVtEDSn2VsEzCXUcSAtEPSayXdbHuXOgV8t6Snf5v545JeI2mbOr+YdEXCbX5b0v2S/oek/xwRN0y/QkRsUOfo8AJJm9U5Qv6IOj+3A935Nkl6Qp3XrH8vYS5JOladX3raLml9d9an34f8aUm/0v0N5c9ExFZ1jpjPU6c8z5d0dkRs6V7/PZJe373sT9R5wrG/t4X9jTqnth+W9CP9/ydDQM9yxPQzTQBmi+3Vkv5Z0lA/vmfW9t9Jujci/rh6FmCu4IgYwKyxfao778MesH2WOkf4f188FjCn8MlaAGbTseqcyj9a0kZJH4yIH9aOBMwtnJoGAKAQp6YBAChEEQMAUKjkNeJ5Ho4RLay4aQAADrkdenJLRBwz02UlRTyihXqt31px0wAOFSedcIt2Tg5Q6Kb42vSPgv0pTk0DAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABQqWY8YQB/IWke4F9c17sVtwnPGETEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFBqsHAJDESc+ro52TM8d4cCglJybGE0KS9jH3eU/giBgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAocHqAYB+l7VgvQeckpMl2pGSk7VdnjcvJae9t3lGTE42D0HPSDkitn2k7a/Zvtf2etuvz8gFAKDXZR0Rf1rSP0bEr9ieJ2lBUi4AAD2tcRHbXiLpX0j6gCRFxJiksaa5AAD0g4xT0y+QtFnSF23/0PbnbS9MyAUAoOdlFPGgpNdI+q8R8WpJuyR9dPqVbK+1favtW8c1mnCzAAAc/jKKeKOkjRFxc/frr6lTzD8jItZFxJqIWDOk4YSbBQDg8Ne4iCPiUUkbbL+k+09vlfSjprkAAPSDrN+a/n1Jl3V/Y/onkn47KRcAgJ6WUsQRcbukNRlZAAD0Ez7iEgCAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAoaxP1kKvctJztWjn5CTN41YrJSdDTE6m5Hgo5zPcPZj0sDAxkZOTtH9iLGd1Vg813z8xMZ4wifJ+HgaHUnKyvpfTZD3uzDKOiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUSloBHD0ra2HtpAXMBxIWZZekaEdKTsYi8QOLFiZMIilrUfZWKyXGSTlzTcZ9nrVvIuk+j/GJlJy0x4s+wxExAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEI5q6yjZ3lwqHqEn5W0oPrAcM63vhfMT8lJMZG0SPxxy1JyPJq02Hwr53gh5iU93G3d1jjC80cSBpEmN2xKyWkdsSQlZ3Lb9pQcRTsn5zDBETEAAIUoYgAAClHEAAAUoogBAChEEQMAUCitiG23bP/Q9tVZmQAA9LrMI+IPSVqfmAcAQM9LKWLbKyW9U9LnM/IAAOgXWUfEn5J0vqT+ehc2AAANNS5i22dLejwifnCA6621favtW8c12vRmAQDoCRlHxG+UdI7tByRdLul025dOv1JErIuINRGxZkjDCTcLAMDhr3ERR8THImJlRKyW9G5J34iI9zaeDACAPsD7iAEAKJS6+lJEfEvStzIzAQDoZRwRAwBQiCIGAKAQRQwAQKHU14jRe2JiPCWntWhRSs7krt0pOYOrjk7JicULmofYzTMkeXQiJWd82cKUnJ0rct6mOLx9MiVnYCxScoYHmx+/DDz2ZMIk0uDzV6TktB/fkpIzMJJzn7f35nzWhFutxhlZj4H7wxExAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEKD1QMAB2Nw2dKcoNGxlJhYdkTzjKHmi5dL0s6XHpWSM3pkzvPzvUl31e7lOQ9TI09GSs7YksWNMxYl3ecDu0ZzcrQsJSe2PpmS41bO/onJyZSc2cYRMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCOStuI4/n1nOjgZHhnCA7JydJ+/ichdBHj1nQOCOS7vKtJ+cETc6PlJz2cFJOzhrxGtuas39Gtjb/Xm4PLUyYRBrZMi8n59GcfePxiZycycmUnJgYT8mZbXPrUR8AgD5DEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKBQ4yK2vcr2N22vt32P7Q9lDAYAQD/IWPRhQtJ5EXGb7cWSfmD7xoj4UUI2AAA9rfERcUQ8EhG3df++Q9J6SSua5gIA0A9SXyO2vVrSqyXdnJkLAECvSluP2PYiSV+X9AcRsX2Gy9dKWitJI2q+hisAAL0g5YjY9pA6JXxZRFwx03UiYl1ErImINUNKWmweAIDDXOMjYtuW9AVJ6yPik81HOox57rwbbGAk58mOB3NOmnjJ4pQczR9JiRk7OueszN6jm++fx0+LhEkkL9+TknPmSetTcu7btjwl54FHlqXk6Imc7509z2ueMTDu5iGSBve0UnLSjI6lxHg46fFrfCIlJ8X4vi/KaI43SnqfpNNt3979846EXAAAel7jp/MR8U+Scp7eAQDQZ+bOuVQAAPoQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUyln1vYqTnkdEe27lJHArZ8HwGB1NybEWp+S0l8xPyfFkpOTsPK75CqAxkDPLymOeTMkZcs738cUvvCIl54tHvCkl577jlqfkPPT40Y0zRnfnfB8veiQlRrtPyPn5XPjUjpQcTyY9lu7clZMzyzgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoNBg9QCNRLt6gp/l5s9rBkaGEwaR2ntHU3JaRx2RkqOxsZyciJSYbS/M2c+ebJ7RWr6neYikNy+/PyVn5bwnUnJOmZcSoyHn/Jy/87i7U3L+8tE3N87YfVzONi27K+fnYeH6LSk52rM3JSbGJ1JyPNS84rJm2R+OiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAACqUUse2zbN9n+37bH83IBACgHzReNdl2S9LnJJ0paaOkW2xfFRE/2v//mPAcIHIW187iVqtxRtqC2ANOyZl8cltKTuv456XkeHwyJWdod86C6ttf0Hw/Dw/n3OcnDm9Oyfn2Uy9JyfmtJQ+l5Hz6+FtScn7v4dem5Bx91M7GGdvvn58wibTz2JyTmkPbjkjJmTc+npKjbTtSYjIeT2My5zFnfzLuxdMk3R8RP4mIMUmXS3pXQi4AAD0vo4hXSNow5euN3X8DAAAH0PjUtKSZzs0947yf7bWS1krSiBYk3CwAAIe/jCPijZJWTfl6paRN068UEesiYk1ErBnScMLNAgBw+Mso4lsknWT7RNvzJL1b0lUJuQAA9LzGp6YjYsL2uZKul9SSdElE3NN4MgAA+kDGa8SKiGslXZuRBQBAP+GTtQAAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAolPLJWs9JtMtuerbERMKi2M56btRKSfFQ0rfI8Nxa6GNwT873X2tP8/2864mc1ci+v+PElJz5rbGUnJv2LE7J2dGen5Iz2s75Xt68eUnjjIEjc77/lnw3Z9H6wR2jKTlZYvfu6hF+yq2cx1Lt5y7niBgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCg9UD9BTzvGafRkdTYjw2lpIzMH5ESs687c0zYuNQ8xBJ/zj4ipScV5ywKSXn/Id+OSXnRcu2pOTsHJ+XkjP84HDzjCcSBpHUHmqn5AzsGU/J0fhESky0IydnImm7ZhnNAQBAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKDQYPUAPSUSFun23Hpu5OHmi6BLUmzfmZLjZUtTcgbGcxYe92TzjKxF4icW5Cx8f/foqpScI4/dnpJz190npOTM29rKyUnYrEWbEh4rJC28/8mUHO3Zm5OTxEM51TQw0vzxa3JnzmPX/sytR30AAPoMRQwAQCGKGACAQhQxAACFGhWx7U/Yvtf2nbavtH1k0lwAAPSFpkfEN0o6OSJOkfRjSR9rPhIAAP2jURFHxA0RMdH98nuSVjYfCQCA/pH5GvHvSLouMQ8AgJ53wHdN275J0rEzXHRhRPxD9zoXSpqQdNl+ctZKWitJI1rwnIYFAKDXHLCII+KM/V1u+/2Szpb01ojY58cVRcQ6SeskaYmX5nysEQAAh7lGnyNm+yxJfyjpzRGxO2ckAAD6R9PXiD8rabGkG23fbvuvEmYCAKBvNDoijogXZQ0CAEA/4pO1AAAoRBEDAFCIIgYAoBBFDABAoUa/rIVZEO2UGA/k3LWxe269K83bcrZrcNeSlJzlt403zth+4vyESaT5W1JitOVVrZSc0Q1LU3KOfiQlRuMLc3IG9zbP8OTc+iiF2LajeoSfEeMTB77Ss9DesyclZ7ZxRAwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgUM4q65hz2mNjKTkeHMrJGXBKTtaC4YO335+So9UrGkcsvXl7wiDSrpctS8lZ8e2UGO1Z1krJGdoVKTnD21JitPDhvY0zBh99qvkgkjQwt46lYvfulJy0x4uMECft4/0MM7fuRQAA+gxFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQaLB6AMySpMWsY2I8JyclRWoND6fkeP5ISo4e29o4IpYvTRhEWnjHppScyeNy5lnwwGhKTnthzn3e2rozJSdGhpqHTEw0z5CkicmUGLdaKTlZP+ft8aT9kyHas34THBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUCiliG1/2HbYXpaRBwBAv2hcxLZXSTpT0kPNxwEAoL9kHBH/haTzlfcxowAA9I1GRWz7HEkPR8QdSfMAANBXDrj6ku2bJB07w0UXSrpA0tuezQ3ZXitprSSNaMFBjAgAQO86YBFHxBkz/bvtV0o6UdIdtiVppaTbbJ8WEY/OkLNO0jpJWuKlnMYGAEAN1iOOiLskLX/6a9sPSFoTEVsS5gIAoC/wPmIAAAo95yPi6SJidVYWEkQ7J8c5z9XcaqXkxOhoSk57z96UnIH5I40zvOWp5oNIil27U3JanZeamtubc1+1NqfESBOTKTEpe2cgZx+3d+7KyUm6r7J+zvsNR8QAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAACg1WD4A5Lto5MRM5ORocSolx0sLsMdp8QfWYmEiYRPJg0o9zRE7M7j05OVn7J2nR+vbe5vd51vdfe2wsJSdL2s95FiccayY9Bu4PR8QAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUGqweADgYMTGek5OSIskJz2XHJ5pnSBoYmkzJiUcfT8lpJ21Xa9HClJzJnbtScgaGmj9stsfGEiZRzvefJEU7JydLr27XPnBEDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKNS4iG3/vu37bN9j+88zhgIAoF80WljT9lskvUvSKRExant5zlgAAPSHpitcf1DSxRExKkkRkbOiOHpP1kLfWbIWDM/ISdo37fGJlBy3Wik5WSZ37soJSrrPU/Zzny18f9B6dbv2oel3w4sl/bztm21/2/apGUMBANAvDnhEbPsmScfOcNGF3f//KEmvk3SqpK/afkFExAw5ayWtlaQRLWgyMwAAPeOARRwRZ+zrMtsflHRFt3i/b7staZmkzTPkrJO0TpKWeOkzihoAgH7U9NT030s6XZJsv1jSPElbGmYCANA3mv6y1iWSLrF9t6QxSe+f6bQ0AACYWaMijogxSe9NmgUAgL4zx95TAgBAf6GIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBACjU9LOmgWenVxf6zljgPWvfJC02H5OTKTlz7j5P2j9zbrtw2OOIGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEKOiEN/o/ZmSQ8e4ptdJmnLIb7NCmxnb+mH7eyHbZTYzl5zsNt5QkQcM9MFJUVcwfatEbGmeo7Zxnb2ln7Yzn7YRont7DWZ28mpaQAAClHEAAAU6qciXlc9wCHCdvaWftjOfthGie3sNWnb2TevEQMAMBf10xExAABzTl8Wse0P2w7by6pnmQ22/6PtO23fbvsG28dXzzQbbH/C9r3dbb3S9pHVM2Wz/au277Hdtt1zv4lq+yzb99m+3/ZHq+eZDbYvsf247burZ5kttlfZ/qbt9d3v1w9VzzQbbI/Y/r7tO7rb+fGM3L4rYturJJ0p6aHqWWbRJyLilIh4laSrJf1R8Tyz5UZJJ0fEKZJ+LOljxfPMhrsl/StJ36keJJvtlqTPSXq7pJdL+g3bL6+dalZ8SdJZ1UPMsglJ50XEyyS9TtK/7dH7clTS6RHxc5JeJeks269rGtp3RSzpLySdL6lnXxyPiO1TvlyoHt3WiLghIia6X35P0srKeWZDRKyPiPuq55glp0m6PyJ+EhFjki6X9K7imdJFxHckPVE9x2yKiEci4rbu33dIWi9pRe1U+aJjZ/fLoe6fxo+vfVXEts+R9HBE3FE9y2yz/ae2N0h6j3r3iHiq35F0XfUQOCgrJG2Y8vVG9eCDd7+xvVrSqyXdXDzKrLDdsn27pMcl3RgRjbdzsPFUc4ztmyQdO8NFF0q6QNLbDu1Es2N/2xkR/xARF0q60PbHJJ0r6Y8P6YBJDrSd3etcqM6pscsO5WxZns029ijP8G89efamX9heJOnrkv5g2pm5nhERk5Je1f2dlCttnxwRjV7/77kijogzZvp326+UdKKkO2xLndOYt9k+LSIePYQjptjXds7gbyVdo8O0iA+0nbbfL+lsSW+Nw/S9eAdxX/aajZJWTfl6paRNRbOgIdtD6pTwZRFxRfU8sy0inrL9LXVe/29UxH1zajoi7oqI5RGxOiJWq/Mg8JrDsYQPxPZJU748R9K9VbPMJttnSfpDSedExO7qeXDQbpF0ku0Tbc+T9G5JVxXPhOfAnaObL0haHxGfrJ5nttg+5ul3Z9ieL+kMJTy+9k0R95mLbd9t+051TsX35FsJJH1W0mJJN3bfqvVX1QNls/0vbW+U9HpJ19i+vnqmLN1ftDtX0vXq/HLPVyPintqp8tn+iqTvSnqJ7Y22f7d6plnwRknvk3R692fxdtvvqB5qFhwn6Zvdx9Zb1HmN+OqmoXyyFgAAhTgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQ6P8Bkf7MQQFmhvQAAAAASUVORK5CYII=\n" + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeIAAAHiCAYAAAA06c+jAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAAeAklEQVR4nO3de5SkdX3n8c+nq3u65woMwwjMjAwqXpGoZ8Br1ohgUAme3dxM1GiSPXPWLFlzDkoUdhM8m2Q5cY9RV7M5E0U3QiSuQsLhEi7rbbOrCCJXB1zWADMMl5kB5j59q+/+UYXbNj0zDM+359tT9X6dM4fpqeJT3+ep7vrU81RX/RwRAgAANQaqBwAAoJ9RxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYmENsX2T70oLbvcD25w/17QKgiAFJku032f7ftrfZfsL2/7J9avVcGfZV7rbD9oskKSL+LCL+9bPI+pbtA14PwLM3WD0AUM32EklXS/qgpK9Kmifp5yWNVs7Vb2wPRsRE9RzAocYRMSC9WJIi4isRMRkReyLihoi4U5Jsv9D2N2xvtb3F9mW2j3z6f7b9gO2P2L7T9i7bX7D9PNvX2d5h+ybbR3Wvu7p7JLrW9ibbj9g+b1+D2X5d90j9Kdt32P6FKZd9wPZPurfxz7bf81x3wNSjZtsjti/tbu9Ttm/pbs+fqvME5bO2d9r+bPf6b+heZ1v3v2+Yknui7e9M2Q+fm3I7T++L37X9kKRvdP/9v9t+tJv3HduvmJL3Jdt/2d23O7tnLo61/SnbT9q+1/arn+t+ACpQxID0Y0mTtv+b7bc/XZpTWNJ/knS8pJdJWiXpomnX+WVJZ6pT6r8k6TpJF0haps7P2b+bdv23SDpJ0tskfdT2GdOHsr1C0jWS/kTSUkkflvR128fYXijpM5LeHhGLJb1B0u0HveUze7+kI9TZzqMl/RtJeyLiQkn/U9K5EbEoIs61vbQ742e61/2kpGtsH93N+ltJ3+9edpGk981we29WZ7/+Yvfr69TZN8sl3SbpsmnX/zVJ/16dfTsq6bvd6y2T9LXuDMBhgyJG34uI7ZLeJCkk/bWkzbavsv287uX3R8SNETEaEZvVeaB/87SY/xIRj0XEw+qU1c0R8cOIGJV0paTpR2kfj4hdEXGXpC9K+o0ZRnuvpGsj4tqIaEfEjZJulfSO7uVtSSfbnh8Rj0TEPfvZzF/rHt3+9M9+rjuuTnG+qHuG4AfdfTSTd0r6PxHx5YiYiIivSLpX0i/Zfr6kUyX9UUSMRcQ/SbpqhoyLuvtijyRFxCURsaO77y6S9HO2j5hy/Su7M+1VZ9/ujYi/iYhJSX+nZ+5rYE6jiAFJEbE+Ij4QESslnazO0e+nJMn2ctuX237Y9nZJl6pz9DXVY1P+vmeGrxdNu/6GKX9/sHt7050g6VenleebJB0XEbsk/bo6R6uP2L7G9kv3s4lfjYgjp/7Zz3W/LOl6SZd3T5//ue2hfVz3+O78Uz0oaUX3siciYveUyzbomX76b7Zbti+2/X+7+/qB7kVT9/fB7mtgTqOIgWki4l5JX1KnkKXOaemQdEpELFHnSNUNb2bVlL8/X9KmGa6zQdKXpxXowoi4uDvn9RFxpqTj1DkK/euGM6mbOx4RH4+Il6tzyvtsSb/19MXTrr5JnScMUz1f0sOSHpG01PaCKZet0jNNzfxNSe+SdIY6p8dXd/+96f4G5iyKGH3P9kttn2d7ZffrVeqcKv5e9yqLJe2U9FT3dduPJNzsf7C9oPuLSL+tzinV6S5V5xTvL3aPFEds/4Ltld1fnjqn+1rxaHe+yYS5ZPsttl9puyVpuzqnqp/OfkzSC6Zc/VpJL7b9m7YHbf+6pJdLujoiHlTnVPpFtufZfr06r5/vz+Lu9myVtEDSn2VsEzCXUcSAtEPSayXdbHuXOgV8t6Snf5v545JeI2mbOr+YdEXCbX5b0v2S/oek/xwRN0y/QkRsUOfo8AJJm9U5Qv6IOj+3A935Nkl6Qp3XrH8vYS5JOladX3raLml9d9an34f8aUm/0v0N5c9ExFZ1jpjPU6c8z5d0dkRs6V7/PZJe373sT9R5wrG/t4X9jTqnth+W9CP9/ydDQM9yxPQzTQBmi+3Vkv5Z0lA/vmfW9t9Jujci/rh6FmCu4IgYwKyxfao778MesH2WOkf4f188FjCn8MlaAGbTseqcyj9a0kZJH4yIH9aOBMwtnJoGAKAQp6YBAChEEQMAUKjkNeJ5Ho4RLay4aQAADrkdenJLRBwz02UlRTyihXqt31px0wAOFSedcIt2Tg5Q6Kb42vSPgv0pTk0DAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABQqWY8YQB/IWke4F9c17sVtwnPGETEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFBqsHAJDESc+ro52TM8d4cCglJybGE0KS9jH3eU/giBgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAocHqAYB+l7VgvQeckpMl2pGSk7VdnjcvJae9t3lGTE42D0HPSDkitn2k7a/Zvtf2etuvz8gFAKDXZR0Rf1rSP0bEr9ieJ2lBUi4AAD2tcRHbXiLpX0j6gCRFxJiksaa5AAD0g4xT0y+QtFnSF23/0PbnbS9MyAUAoOdlFPGgpNdI+q8R8WpJuyR9dPqVbK+1favtW8c1mnCzAAAc/jKKeKOkjRFxc/frr6lTzD8jItZFxJqIWDOk4YSbBQDg8Ne4iCPiUUkbbL+k+09vlfSjprkAAPSDrN+a/n1Jl3V/Y/onkn47KRcAgJ6WUsQRcbukNRlZAAD0Ez7iEgCAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAoaxP1kKvctJztWjn5CTN41YrJSdDTE6m5Hgo5zPcPZj0sDAxkZOTtH9iLGd1Vg813z8xMZ4wifJ+HgaHUnKyvpfTZD3uzDKOiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUSloBHD0ra2HtpAXMBxIWZZekaEdKTsYi8QOLFiZMIilrUfZWKyXGSTlzTcZ9nrVvIuk+j/GJlJy0x4s+wxExAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEI5q6yjZ3lwqHqEn5W0oPrAcM63vhfMT8lJMZG0SPxxy1JyPJq02Hwr53gh5iU93G3d1jjC80cSBpEmN2xKyWkdsSQlZ3Lb9pQcRTsn5zDBETEAAIUoYgAAClHEAAAUoogBAChEEQMAUCitiG23bP/Q9tVZmQAA9LrMI+IPSVqfmAcAQM9LKWLbKyW9U9LnM/IAAOgXWUfEn5J0vqT+ehc2AAANNS5i22dLejwifnCA6621favtW8c12vRmAQDoCRlHxG+UdI7tByRdLul025dOv1JErIuINRGxZkjDCTcLAMDhr3ERR8THImJlRKyW9G5J34iI9zaeDACAPsD7iAEAKJS6+lJEfEvStzIzAQDoZRwRAwBQiCIGAKAQRQwAQKHU14jRe2JiPCWntWhRSs7krt0pOYOrjk7JicULmofYzTMkeXQiJWd82cKUnJ0rct6mOLx9MiVnYCxScoYHmx+/DDz2ZMIk0uDzV6TktB/fkpIzMJJzn7f35nzWhFutxhlZj4H7wxExAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEKD1QMAB2Nw2dKcoNGxlJhYdkTzjKHmi5dL0s6XHpWSM3pkzvPzvUl31e7lOQ9TI09GSs7YksWNMxYl3ecDu0ZzcrQsJSe2PpmS41bO/onJyZSc2cYRMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCOStuI4/n1nOjgZHhnCA7JydJ+/ichdBHj1nQOCOS7vKtJ+cETc6PlJz2cFJOzhrxGtuas39Gtjb/Xm4PLUyYRBrZMi8n59GcfePxiZycycmUnJgYT8mZbXPrUR8AgD5DEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKBQ4yK2vcr2N22vt32P7Q9lDAYAQD/IWPRhQtJ5EXGb7cWSfmD7xoj4UUI2AAA9rfERcUQ8EhG3df++Q9J6SSua5gIA0A9SXyO2vVrSqyXdnJkLAECvSluP2PYiSV+X9AcRsX2Gy9dKWitJI2q+hisAAL0g5YjY9pA6JXxZRFwx03UiYl1ErImINUNKWmweAIDDXOMjYtuW9AVJ6yPik81HOox57rwbbGAk58mOB3NOmnjJ4pQczR9JiRk7OueszN6jm++fx0+LhEkkL9+TknPmSetTcu7btjwl54FHlqXk6Imc7509z2ueMTDu5iGSBve0UnLSjI6lxHg46fFrfCIlJ8X4vi/KaI43SnqfpNNt3979846EXAAAel7jp/MR8U+Scp7eAQDQZ+bOuVQAAPoQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUyln1vYqTnkdEe27lJHArZ8HwGB1NybEWp+S0l8xPyfFkpOTsPK75CqAxkDPLymOeTMkZcs738cUvvCIl54tHvCkl577jlqfkPPT40Y0zRnfnfB8veiQlRrtPyPn5XPjUjpQcTyY9lu7clZMzyzgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoNBg9QCNRLt6gp/l5s9rBkaGEwaR2ntHU3JaRx2RkqOxsZyciJSYbS/M2c+ebJ7RWr6neYikNy+/PyVn5bwnUnJOmZcSoyHn/Jy/87i7U3L+8tE3N87YfVzONi27K+fnYeH6LSk52rM3JSbGJ1JyPNS84rJm2R+OiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAACqUUse2zbN9n+37bH83IBACgHzReNdl2S9LnJJ0paaOkW2xfFRE/2v//mPAcIHIW187iVqtxRtqC2ANOyZl8cltKTuv456XkeHwyJWdod86C6ttf0Hw/Dw/n3OcnDm9Oyfn2Uy9JyfmtJQ+l5Hz6+FtScn7v4dem5Bx91M7GGdvvn58wibTz2JyTmkPbjkjJmTc+npKjbTtSYjIeT2My5zFnfzLuxdMk3R8RP4mIMUmXS3pXQi4AAD0vo4hXSNow5euN3X8DAAAH0PjUtKSZzs0947yf7bWS1krSiBYk3CwAAIe/jCPijZJWTfl6paRN068UEesiYk1ErBnScMLNAgBw+Mso4lsknWT7RNvzJL1b0lUJuQAA9LzGp6YjYsL2uZKul9SSdElE3NN4MgAA+kDGa8SKiGslXZuRBQBAP+GTtQAAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAolPLJWs9JtMtuerbERMKi2M56btRKSfFQ0rfI8Nxa6GNwT873X2tP8/2864mc1ci+v+PElJz5rbGUnJv2LE7J2dGen5Iz2s75Xt68eUnjjIEjc77/lnw3Z9H6wR2jKTlZYvfu6hF+yq2cx1Lt5y7niBgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCg9UD9BTzvGafRkdTYjw2lpIzMH5ESs687c0zYuNQ8xBJ/zj4ipScV5ywKSXn/Id+OSXnRcu2pOTsHJ+XkjP84HDzjCcSBpHUHmqn5AzsGU/J0fhESky0IydnImm7ZhnNAQBAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQiCIGAKDQYPUAPSUSFun23Hpu5OHmi6BLUmzfmZLjZUtTcgbGcxYe92TzjKxF4icW5Cx8f/foqpScI4/dnpJz190npOTM29rKyUnYrEWbEh4rJC28/8mUHO3Zm5OTxEM51TQw0vzxa3JnzmPX/sytR30AAPoMRQwAQCGKGACAQhQxAACFGhWx7U/Yvtf2nbavtH1k0lwAAPSFpkfEN0o6OSJOkfRjSR9rPhIAAP2jURFHxA0RMdH98nuSVjYfCQCA/pH5GvHvSLouMQ8AgJ53wHdN275J0rEzXHRhRPxD9zoXSpqQdNl+ctZKWitJI1rwnIYFAKDXHLCII+KM/V1u+/2Szpb01ojY58cVRcQ6SeskaYmX5nysEQAAh7lGnyNm+yxJfyjpzRGxO2ckAAD6R9PXiD8rabGkG23fbvuvEmYCAKBvNDoijogXZQ0CAEA/4pO1AAAoRBEDAFCIIgYAoBBFDABAoUa/rIVZEO2UGA/k3LWxe269K83bcrZrcNeSlJzlt403zth+4vyESaT5W1JitOVVrZSc0Q1LU3KOfiQlRuMLc3IG9zbP8OTc+iiF2LajeoSfEeMTB77Ss9DesyclZ7ZxRAwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgUM4q65hz2mNjKTkeHMrJGXBKTtaC4YO335+So9UrGkcsvXl7wiDSrpctS8lZ8e2UGO1Z1krJGdoVKTnD21JitPDhvY0zBh99qvkgkjQwt46lYvfulJy0x4uMECft4/0MM7fuRQAA+gxFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQaLB6AMySpMWsY2I8JyclRWoND6fkeP5ISo4e29o4IpYvTRhEWnjHppScyeNy5lnwwGhKTnthzn3e2rozJSdGhpqHTEw0z5CkicmUGLdaKTlZP+ft8aT9kyHas34THBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUCiliG1/2HbYXpaRBwBAv2hcxLZXSTpT0kPNxwEAoL9kHBH/haTzlfcxowAA9I1GRWz7HEkPR8QdSfMAANBXDrj6ku2bJB07w0UXSrpA0tuezQ3ZXitprSSNaMFBjAgAQO86YBFHxBkz/bvtV0o6UdIdtiVppaTbbJ8WEY/OkLNO0jpJWuKlnMYGAEAN1iOOiLskLX/6a9sPSFoTEVsS5gIAoC/wPmIAAAo95yPi6SJidVYWEkQ7J8c5z9XcaqXkxOhoSk57z96UnIH5I40zvOWp5oNIil27U3JanZeamtubc1+1NqfESBOTKTEpe2cgZx+3d+7KyUm6r7J+zvsNR8QAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAACg1WD4A5Lto5MRM5ORocSolx0sLsMdp8QfWYmEiYRPJg0o9zRE7M7j05OVn7J2nR+vbe5vd51vdfe2wsJSdL2s95FiccayY9Bu4PR8QAABSiiAEAKEQRAwBQiCIGAKAQRQwAQCGKGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUGqweADgYMTGek5OSIskJz2XHJ5pnSBoYmkzJiUcfT8lpJ21Xa9HClJzJnbtScgaGmj9stsfGEiZRzvefJEU7JydLr27XPnBEDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKNS4iG3/vu37bN9j+88zhgIAoF80WljT9lskvUvSKRExant5zlgAAPSHpitcf1DSxRExKkkRkbOiOHpP1kLfWbIWDM/ISdo37fGJlBy3Wik5WSZ37soJSrrPU/Zzny18f9B6dbv2oel3w4sl/bztm21/2/apGUMBANAvDnhEbPsmScfOcNGF3f//KEmvk3SqpK/afkFExAw5ayWtlaQRLWgyMwAAPeOARRwRZ+zrMtsflHRFt3i/b7staZmkzTPkrJO0TpKWeOkzihoAgH7U9NT030s6XZJsv1jSPElbGmYCANA3mv6y1iWSLrF9t6QxSe+f6bQ0AACYWaMijogxSe9NmgUAgL4zx95TAgBAf6GIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBACjU9LOmgWenVxf6zljgPWvfJC02H5OTKTlz7j5P2j9zbrtw2OOIGACAQhQxAACFKGIAAApRxAAAFKKIAQAoRBEDAFCIIgYAoBBFDABAIYoYAIBCFDEAAIUoYgAAClHEAAAUoogBAChEEQMAUIgiBgCgEEUMAEAhihgAgEKOiEN/o/ZmSQ8e4ptdJmnLIb7NCmxnb+mH7eyHbZTYzl5zsNt5QkQcM9MFJUVcwfatEbGmeo7Zxnb2ln7Yzn7YRont7DWZ28mpaQAAClHEAAAU6qciXlc9wCHCdvaWftjOfthGie3sNWnb2TevEQMAMBf10xExAABzTl8Wse0P2w7by6pnmQ22/6PtO23fbvsG28dXzzQbbH/C9r3dbb3S9pHVM2Wz/au277Hdtt1zv4lq+yzb99m+3/ZHq+eZDbYvsf247burZ5kttlfZ/qbt9d3v1w9VzzQbbI/Y/r7tO7rb+fGM3L4rYturJJ0p6aHqWWbRJyLilIh4laSrJf1R8Tyz5UZJJ0fEKZJ+LOljxfPMhrsl/StJ36keJJvtlqTPSXq7pJdL+g3bL6+dalZ8SdJZ1UPMsglJ50XEyyS9TtK/7dH7clTS6RHxc5JeJeks269rGtp3RSzpLySdL6lnXxyPiO1TvlyoHt3WiLghIia6X35P0srKeWZDRKyPiPuq55glp0m6PyJ+EhFjki6X9K7imdJFxHckPVE9x2yKiEci4rbu33dIWi9pRe1U+aJjZ/fLoe6fxo+vfVXEts+R9HBE3FE9y2yz/ae2N0h6j3r3iHiq35F0XfUQOCgrJG2Y8vVG9eCDd7+xvVrSqyXdXDzKrLDdsn27pMcl3RgRjbdzsPFUc4ztmyQdO8NFF0q6QNLbDu1Es2N/2xkR/xARF0q60PbHJJ0r6Y8P6YBJDrSd3etcqM6pscsO5WxZns029ijP8G89efamX9heJOnrkv5g2pm5nhERk5Je1f2dlCttnxwRjV7/77kijogzZvp326+UdKKkO2xLndOYt9k+LSIePYQjptjXds7gbyVdo8O0iA+0nbbfL+lsSW+Nw/S9eAdxX/aajZJWTfl6paRNRbOgIdtD6pTwZRFxRfU8sy0inrL9LXVe/29UxH1zajoi7oqI5RGxOiJWq/Mg8JrDsYQPxPZJU748R9K9VbPMJttnSfpDSedExO7qeXDQbpF0ku0Tbc+T9G5JVxXPhOfAnaObL0haHxGfrJ5nttg+5ul3Z9ieL+kMJTy+9k0R95mLbd9t+051TsX35FsJJH1W0mJJN3bfqvVX1QNls/0vbW+U9HpJ19i+vnqmLN1ftDtX0vXq/HLPVyPintqp8tn+iqTvSnqJ7Y22f7d6plnwRknvk3R692fxdtvvqB5qFhwn6Zvdx9Zb1HmN+OqmoXyyFgAAhTgiBgCgEEUMAEAhihgAgEIUMQAAhShiAAAKUcQAABSiiAEAKEQRAwBQ6P8Bkf7MQQFmhvQAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] }, "metadata": { "needs_background": "light" @@ -1256,8 +1304,16 @@ "outputs": [ { "data": { - "text/plain": "z ~ N(, )", - "text/latex": "$$\n \\begin{array}{rcl}\n \\text{z} &\\sim & \\operatorname{N}(\\text{},~\\text{})\n \\end{array}\n $$" + "text/latex": [ + "$$\n", + " \\begin{array}{rcl}\n", + " \\text{z} &\\sim & \\operatorname{N}(\\text{},~\\text{})\n", + " \\end{array}\n", + " $$" + ], + "text/plain": [ + "z ~ N(, )" + ] }, "execution_count": 37, "metadata": {}, @@ -1330,7 +1386,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 39, "metadata": {}, @@ -1363,7 +1421,9 @@ "outputs": [ { "data": { - "text/plain": "array([-0.91893853, -1.61208571])" + "text/plain": [ + "array([-0.91893853, -1.61208571])" + ] }, "execution_count": 40, "metadata": {}, @@ -1388,7 +1448,9 @@ "outputs": [ { "data": { - "text/plain": "array([-0.91893853, -1.61208571])" + "text/plain": [ + "array([-0.91893853, -1.61208571])" + ] }, "execution_count": 41, "metadata": {}, @@ -1447,7 +1509,9 @@ }, { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 42, "metadata": {}, @@ -1488,7 +1552,9 @@ "outputs": [ { "data": { - "text/plain": "{'z': array([0., 0.])}" + "text/plain": [ + "{'z': array([0., 0.])}" + ] }, "execution_count": 44, "metadata": {}, @@ -1507,7 +1573,9 @@ "outputs": [ { "data": { - "text/plain": "[array([-0.91893853, -1.61208571])]" + "text/plain": [ + "[array([-0.91893853, -1.61208571])]" + ] }, "execution_count": 45, "metadata": {}, @@ -1534,7 +1602,9 @@ "outputs": [ { "data": { - "text/plain": "" + "text/plain": [ + "" + ] }, "execution_count": 46, "metadata": {}, @@ -1555,7 +1625,9 @@ "outputs": [ { "data": { - "text/plain": "array([ 0.14809949, -1.18242809, 1.34229652])" + "text/plain": [ + "array([ 0.14809949, -1.18242809, 1.34229652])" + ] }, "execution_count": 47, "metadata": {}, @@ -1574,7 +1646,9 @@ "outputs": [ { "data": { - "text/plain": "-1.7001885332046727" + "text/plain": [ + "-1.7001885332046727" + ] }, "execution_count": 48, "metadata": {}, @@ -1619,7 +1693,9 @@ "outputs": [ { "data": { - "text/plain": "{mu ~ N(0, 2): mu, sigma ~ N**+(0, 3): sigma_log__, x ~ N(mu, sigma): x}" + "text/plain": [ + "{mu ~ N(0, 2): mu, sigma ~ N**+(0, 3): sigma_log__, x ~ N(mu, sigma): x}" + ] }, "execution_count": 50, "metadata": {}, @@ -1644,7 +1720,9 @@ "outputs": [ { "data": { - "text/plain": "[mu, sigma_log__, x]" + "text/plain": [ + "[mu, sigma_log__, x]" + ] }, "execution_count": 51, "metadata": {}, @@ -1669,7 +1747,9 @@ "outputs": [ { "data": { - "text/plain": "array([ -1.61208571, -11.32440364, 9.08106147])" + "text/plain": [ + "array([ -1.61208571, -11.32440364, 9.08106147])" + ] }, "execution_count": 52, "metadata": {}, @@ -1744,7 +1824,9 @@ "outputs": [ { "data": { - "text/plain": "[array(-1.61208571), array(-11.32440364), array(9.08106147)]" + "text/plain": [ + "[array(-1.61208571), array(-11.32440364), array(9.08106147)]" + ] }, "execution_count": 54, "metadata": {}, @@ -1794,9 +1876,9 @@ }, "hide_input": false, "kernelspec": { - "name": "pymc", + "display_name": "pymc", "language": "python", - "display_name": "pymc" + "name": "pymc" }, "language_info": { "codemirror_mode": { @@ -1808,7 +1890,20 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.4" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false }, "vscode": { "interpreter": { diff --git a/docs/source/learn/core_notebooks/pymc_overview.ipynb b/docs/source/learn/core_notebooks/pymc_overview.ipynb index 6bcaa9cb017..c48acf18104 100644 --- a/docs/source/learn/core_notebooks/pymc_overview.ipynb +++ b/docs/source/learn/core_notebooks/pymc_overview.ipynb @@ -4340,14 +4340,15 @@ ], "source": [ "%load_ext watermark\n", - "%watermark -n -u -v -iv -w -p xarray,aeppl" + "%watermark -n -u -v -iv -w -p xarray" ] } ], "metadata": { "anaconda-cloud": {}, + "hide_input": false, "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -4361,7 +4362,20 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.8.10" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false } }, "nbformat": 4, diff --git a/pymc/__init__.py b/pymc/__init__.py index 09314aa5c30..90fc22dcd8b 100644 --- a/pymc/__init__.py +++ b/pymc/__init__.py @@ -54,6 +54,7 @@ def __set_compiler_flags(): from pymc.distributions import * from pymc.exceptions import * from pymc.func_utils import find_constrained_prior +from pymc.logprob import * from pymc.math import ( expand_packed_triangular, invlogit, diff --git a/pymc/aesaraf.py b/pymc/aesaraf.py index 0fdb53acfdf..b514e6e314b 100644 --- a/pymc/aesaraf.py +++ b/pymc/aesaraf.py @@ -32,8 +32,6 @@ import pandas as pd import scipy.sparse as sps -from aeppl.logprob import CheckParameterValue -from aeppl.transforms import RVTransform from aesara import scalar from aesara.compile import Function, Mode, get_mode from aesara.gradient import grad @@ -65,6 +63,8 @@ from aesara.tensor.var import TensorConstant, TensorVariable from pymc.exceptions import NotConstantValueError +from pymc.logprob.transforms import RVTransform +from pymc.logprob.utils import CheckParameterValue from pymc.vartypes import continuous_types, isgenerator, typefilter PotentialShapeType = Union[int, np.ndarray, Sequence[Union[int, Variable]], TensorVariable] @@ -944,7 +944,7 @@ def largest_common_dtype(tensors): @node_rewriter(tracks=[CheckParameterValue]) def local_remove_check_parameter(fgraph, node): - """Rewrite that removes Aeppl's CheckParameterValue + """Rewrite that removes CheckParameterValue This is used when compile_rv_inplace """ @@ -1068,13 +1068,13 @@ def compile_pymc( Ensures that compiled functions containing random variables will produce new samples on each call. local_check_parameter_to_ninf_switch - Replaces Aeppl's CheckParameterValue assertions is logp expressions with Switches + Replaces CheckParameterValue assertions is logp expressions with Switches that return -inf in place of the assert. Optional rewrites ----------------- local_remove_check_parameter - Replaces Aeppl's CheckParameterValue assertions is logp expressions. This is used + Replaces CheckParameterValue assertions is logp expressions. This is used as an alteranative to the default local_check_parameter_to_ninf_switch whenenver this function is called within a model context and the model `check_bounds` flag is set to False. diff --git a/pymc/distributions/__init__.py b/pymc/distributions/__init__.py index 0f362b3e676..e70192b262e 100644 --- a/pymc/distributions/__init__.py +++ b/pymc/distributions/__init__.py @@ -15,7 +15,6 @@ from pymc.distributions.logprob import ( # isort:skip logcdf, logp, - joint_logp, ) from pymc.distributions.bound import Bound @@ -198,7 +197,6 @@ "Censored", "CAR", "PolyaGamma", - "joint_logp", "logp", "logcdf", ] diff --git a/pymc/distributions/censored.py b/pymc/distributions/censored.py index ab4cdb3f674..7b21f35a15e 100644 --- a/pymc/distributions/censored.py +++ b/pymc/distributions/censored.py @@ -29,7 +29,7 @@ class CensoredRV(SymbolicRandomVariable): """Censored random variable""" - inline_aeppl = True + inline_logprob = True _print_name = ("Censored", "\\operatorname{Censored}") diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index db835ae5660..8013bba05a0 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -28,7 +28,6 @@ import aesara.tensor as at import numpy as np -from aeppl.logprob import _logprob, logcdf, logprob from aesara.graph.basic import Apply, Variable from aesara.graph.op import Op from aesara.raise_op import Assert @@ -57,6 +56,8 @@ from aesara.tensor.random.op import RandomVariable from aesara.tensor.var import TensorConstant +from pymc.logprob.abstract import _logprob, logcdf, logprob + try: from polyagamma import polyagamma_cdf, polyagamma_pdf, random_polyagamma except ImportError: # pragma: no cover @@ -531,6 +532,9 @@ def logcdf(value, mu, sigma): msg="sigma > 0", ) + def icdf(value, mu, sigma): + return mu + sigma * -np.sqrt(2.0) * at.erfcinv(2 * value) + class TruncatedNormalRV(RandomVariable): name = "truncated_normal" @@ -1290,10 +1294,6 @@ class Exponential(PositiveContinuous): Variance :math:`\dfrac{1}{\lambda^2}` ======== ============================ - Notes - ----- - Logp calculation is defined in `aeppl.logprob `_. - Parameters ---------- lam : tensor_like of float diff --git a/pymc/distributions/discrete.py b/pymc/distributions/discrete.py index fc574485843..a220b325d82 100644 --- a/pymc/distributions/discrete.py +++ b/pymc/distributions/discrete.py @@ -819,6 +819,9 @@ def logcdf(value, p): msg="0 <= p <= 1", ) + def icdf(value, p): + return at.ceil(at.log1p(-value) / at.log1p(-p)).astype("int64") + class HyperGeometric(Discrete): R""" diff --git a/pymc/distributions/dist_math.py b/pymc/distributions/dist_math.py index 347d5b818ee..ad3bc8f635a 100644 --- a/pymc/distributions/dist_math.py +++ b/pymc/distributions/dist_math.py @@ -27,7 +27,6 @@ import scipy.linalg import scipy.stats -from aeppl.logprob import CheckParameterValue from aesara.compile.builders import OpFromGraph from aesara.graph.basic import Apply, Variable from aesara.graph.op import Op @@ -38,6 +37,7 @@ from pymc.aesaraf import floatX from pymc.distributions.shape_utils import to_tuple +from pymc.logprob.utils import CheckParameterValue solve_lower = SolveTriangular(lower=True) solve_upper = SolveTriangular(lower=False) diff --git a/pymc/distributions/distribution.py b/pymc/distributions/distribution.py index 7c6703793ba..8c7a8411621 100644 --- a/pymc/distributions/distribution.py +++ b/pymc/distributions/distribution.py @@ -23,9 +23,6 @@ import numpy as np -from aeppl.abstract import MeasurableVariable, _get_measurable_outputs -from aeppl.logprob import _logcdf, _logprob -from aeppl.rewriting import logprob_rewrites_db from aesara import tensor as at from aesara.compile.builders import OpFromGraph from aesara.graph import node_rewriter @@ -48,6 +45,14 @@ find_size, shape_from_dims, ) +from pymc.logprob.abstract import ( + MeasurableVariable, + _get_measurable_outputs, + _icdf, + _logcdf, + _logprob, +) +from pymc.logprob.rewriting import logprob_rewrites_db from pymc.printing import str_for_dist from pymc.util import UNSET, _add_future_warning_tag from pymc.vartypes import string_types @@ -130,6 +135,14 @@ def logcdf(op, value, *dist_params, **kwargs): dist_params = dist_params[3:] return class_logcdf(value, *dist_params) + class_icdf = clsdict.get("icdf") + if class_icdf: + + @_icdf.register(rv_type) + def icdf(op, value, *dist_params, **kwargs): + dist_params = dist_params[3:] + return class_icdf(value, *dist_params) + class_moment = clsdict.get("moment") if class_moment: @@ -170,7 +183,7 @@ class SymbolicRandomVariable(OpFromGraph): (0 for scalar, 1 for vector, ...) """ - inline_aeppl: bool = False + inline_logprob: bool = False """Specifies whether the logprob function is derived automatically by introspection of the inner graph. @@ -375,13 +388,13 @@ def dist( return rv_out -# Let Aeppl know that the SymbolicRandomVariable has a logprob. +# Let PyMC know that the SymbolicRandomVariable has a logprob. MeasurableVariable.register(SymbolicRandomVariable) @_get_measurable_outputs.register(SymbolicRandomVariable) def _get_measurable_outputs_symbolic_random_variable(op, node): - # This tells Aeppl that any non RandomType outputs are measurable + # This tells PyMC that any non RandomType outputs are measurable # Assume that if there is one default_output, that's the only one that is measurable # In the rare case this is not what one wants, a specialized _get_measuarable_outputs @@ -396,11 +409,11 @@ def _get_measurable_outputs_symbolic_random_variable(op, node): @node_rewriter([SymbolicRandomVariable]) def inline_symbolic_random_variable(fgraph, node): """ - This optimization expands the internal graph of a SymbolicRV when obtaining logp - from Aeppl, if the flag `inline_aeppl` is True. + This optimization expands the internal graph of a SymbolicRV when obtaining the logp + graph, if the flag `inline_logprob` is True. """ op = node.op - if op.inline_aeppl: + if op.inline_logprob: return clone_replace(op.inner_outputs, {u: v for u, v in zip(op.inner_inputs, node.inputs)}) diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py index fa2ba09ace2..1b431b8eea8 100644 --- a/pymc/distributions/logprob.py +++ b/pymc/distributions/logprob.py @@ -11,27 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import warnings -from collections.abc import Mapping -from typing import Dict, List, Optional, Sequence, Union + +from typing import Dict, List, Sequence, Union import aesara import numpy as np -from aeppl import factorized_joint_logprob, logprob -from aeppl.abstract import assign_custom_measurable_outputs -from aeppl.logprob import _logprob -from aeppl.logprob import logcdf as logcdf_aeppl -from aeppl.logprob import logprob as logp_aeppl -from aeppl.tensor import MeasurableJoin -from aeppl.transforms import RVTransform, TransformValuesRewrite from aesara import tensor as at -from aesara.graph.basic import graph_inputs, io_toposort from aesara.tensor.random.op import RandomVariable from aesara.tensor.var import TensorVariable -from pymc.aesaraf import constant_fold, floatX +from pymc.aesaraf import floatX +from pymc.logprob.abstract import assign_custom_measurable_outputs +from pymc.logprob.abstract import logcdf as logcdf_logprob +from pymc.logprob.abstract import logprob as logp_logprob +from pymc.logprob.joint_logprob import factorized_joint_logprob +from pymc.logprob.transforms import RVTransform, TransformValuesRewrite TOTAL_SIZE = Union[int, Sequence[int], None] @@ -126,143 +122,6 @@ def _check_no_rvs(logp_terms: Sequence[TensorVariable]): ) -def joint_logp( - var: Union[TensorVariable, List[TensorVariable]], - rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None, - *, - jacobian: bool = True, - scaling: bool = True, - transformed: bool = True, - sum: bool = True, - **kwargs, -) -> Union[TensorVariable, List[TensorVariable]]: - """Create a measure-space (i.e. log-likelihood) graph for a random variable - or a list of random variables at a given point. - - The input `var` determines which log-likelihood graph is used and - `rv_value` is that graph's input parameter. For example, if `var` is - the output of a ``NormalRV`` ``Op``, then the output is a graph of the - density function for `var` set to the value `rv_value`. - - Parameters - ========== - var - The `RandomVariable` output that determines the log-likelihood graph. - Can also be a list of variables. The final log-likelihood graph will - be the sum total of all individual log-likelihood graphs of variables - in the list. - rv_values - A variable, or ``dict`` of variables, that represents the value of - `var` in its log-likelihood. If no `rv_value` is provided, - ``var.tag.value_var`` will be checked and, when available, used. - jacobian - Whether or not to include the Jacobian term. - scaling - A scaling term to apply to the generated log-likelihood graph. - transformed - Apply transforms. - sum - Sum the log-likelihood or return each term as a separate list item. - - """ - warnings.warn( - "joint_logp has been deprecated, use model.logp instead", - FutureWarning, - ) - # TODO: In future when we drop support for tag.value_var most of the following - # logic can be removed and logp can just be a wrapper function that calls aeppl's - # joint_logprob directly. - - # If var is not a list make it one. - if not isinstance(var, (list, tuple)): - var = [var] - - # If logp isn't provided values it is assumed that the tagged value var or - # observation is the value variable for that particular RV. - if rv_values is None: - rv_values = {} - for rv in var: - value_var = getattr(rv.tag, "observations", getattr(rv.tag, "value_var", None)) - if value_var is None: - raise ValueError(f"No value variable found for var {rv}") - rv_values[rv] = value_var - # Else we assume we were given a single rv and respective value - elif not isinstance(rv_values, Mapping): - if len(var) == 1: - rv_values = {var[0]: at.as_tensor_variable(rv_values).astype(var[0].dtype)} - else: - raise ValueError("rv_values must be a dict if more than one var is requested") - - if scaling: - rv_scalings = {} - for rv, value_var in rv_values.items(): - rv_scalings[value_var] = _get_scaling( - getattr(rv.tag, "total_size", None), value_var.shape, value_var.ndim - ) - - # Aeppl needs all rv-values pairs, not just that of the requested var. - # Hence we iterate through the graph to collect them. - tmp_rvs_to_values = rv_values.copy() - for node in io_toposort(graph_inputs(var), var): - try: - curr_vars = [node.default_output()] - except ValueError: - curr_vars = node.outputs - for curr_var in curr_vars: - if curr_var in tmp_rvs_to_values: - continue - # Check if variable has a value variable - value_var = getattr( - curr_var.tag, "observations", getattr(curr_var.tag, "value_var", None) - ) - if value_var is not None: - tmp_rvs_to_values[curr_var] = value_var - - # After collecting all necessary rvs and values, we check for any value transforms - transform_map = {} - if transformed: - for rv, value_var in tmp_rvs_to_values.items(): - if hasattr(value_var.tag, "transform"): - transform_map[value_var] = value_var.tag.transform - # If the provided value_variable does not have transform information, we - # check if the original `rv.tag.value_var` does. - # TODO: This logic should be replaced by an explicit dict of - # `{value_var: transform}` similar to `rv_values`. - else: - original_value_var = getattr(rv.tag, "value_var", None) - if original_value_var is not None and hasattr(original_value_var.tag, "transform"): - transform_map[value_var] = original_value_var.tag.transform - - transform_opt = TransformValuesRewrite(transform_map) - temp_logp_var_dict = factorized_joint_logprob( - tmp_rvs_to_values, - extra_rewrites=transform_opt, - use_jacobian=jacobian, - **kwargs, - ) - - # aeppl returns the logp for every single value term we provided to it. This includes - # the extra values we plugged in above, so we filter those we actually wanted in the - # same order they were given in. - logp_var_dict = {} - for value_var in rv_values.values(): - logp_var_dict[value_var] = temp_logp_var_dict[value_var] - - _check_no_rvs(list(logp_var_dict.values())) - - if scaling: - for value_var in logp_var_dict.keys(): - if value_var in rv_scalings: - logp_var_dict[value_var] *= rv_scalings[value_var] - - if sum: - logp_var = at.sum([at.sum(factor) for factor in logp_var_dict.values()]) - else: - logp_var = list(logp_var_dict.values()) - - return logp_var - - def _joint_logp( rvs: Sequence[TensorVariable], *, @@ -272,8 +131,8 @@ def _joint_logp( rvs_to_total_sizes: Dict[TensorVariable, TOTAL_SIZE], **kwargs, ) -> List[TensorVariable]: - """Thin wrapper around aeppl.factorized_joint_logprob, extended with PyMC specific - concerns such as transforms, jacobian, and scaling""" + """Thin wrapper around pymc.logprob.factorized_joint_logprob, extended with Model + specific concerns such as transforms, jacobian, and scaling""" transform_rewrite = None values_to_transforms = { @@ -292,9 +151,9 @@ def _joint_logp( **kwargs, ) - # aeppl returns the logp for every single value term we provided to it. This includes - # the extra values we plugged in above, so we filter those we actually wanted in the - # same order they were given in. + # The function returns the logp for every single value term we provided to it. This + # includes the extra values we plugged in above, so we filter those we actually + # wanted in the same order they were given in. logp_terms = {} for rv in rvs: value_var = rvs_to_values[rv] @@ -314,7 +173,7 @@ def logp(rv: TensorVariable, value) -> TensorVariable: value = at.as_tensor_variable(value, dtype=rv.dtype) try: - return logp_aeppl(rv, value) + return logp_logprob(rv, value) except NotImplementedError: try: value = rv.type.filter_variable(value) @@ -332,11 +191,11 @@ def logcdf(rv: TensorVariable, value) -> TensorVariable: """Return the log-cdf graph of a Random Variable""" value = at.as_tensor_variable(value, dtype=rv.dtype) - return logcdf_aeppl(rv, value) + return logcdf_logprob(rv, value) def ignore_logprob(rv: TensorVariable) -> TensorVariable: - """Return a duplicated variable that is ignored when creating Aeppl logprob graphs + """Return a duplicated variable that is ignored when creating logprob graphs This is used in SymbolicDistributions that use other RVs as inputs but account for their logp terms explicitly. @@ -350,47 +209,3 @@ def ignore_logprob(rv: TensorVariable) -> TensorVariable: return rv new_node = assign_custom_measurable_outputs(node, type_prefix=prefix) return new_node.outputs[node.outputs.index(rv)] - - -@_logprob.register(MeasurableJoin) -def logprob_join_constant_shapes(op, values, axis, *base_vars, **kwargs): - """Compute the log-likelihood graph for a `Join`. - - This overrides the implementation in Aeppl, to constant fold the shapes - of the base vars so that RandomVariables do not show up in the logp graph, - which is a requirement enforced by `pymc.distributions.logprob.joint_logp` - """ - (value,) = values - - base_var_shapes = [base_var.shape[axis] for base_var in base_vars] - - # We don't need the graph to be constant, just to have RandomVariables removed - base_var_shapes = constant_fold(base_var_shapes, raise_not_constant=False) - - split_values = at.split( - value, - splits_size=[base_var_shape for base_var_shape in base_var_shapes], - n_splits=len(base_vars), - axis=axis, - ) - - logps = [ - logprob(base_var, split_value) for base_var, split_value in zip(base_vars, split_values) - ] - - if len({logp.ndim for logp in logps}) != 1: - raise ValueError( - "Joined logps have different number of dimensions, this can happen when " - "joining univariate and multivariate distributions", - ) - - base_vars_ndim_supp = split_values[0].ndim - logps[0].ndim - join_logprob = at.concatenate( - [ - at.atleast_1d(logprob(base_var, split_value)) - for base_var, split_value in zip(base_vars, split_values) - ], - axis=axis - base_vars_ndim_supp, - ) - - return join_logprob diff --git a/pymc/distributions/mixture.py b/pymc/distributions/mixture.py index f19713bccc8..f62c534b9a5 100644 --- a/pymc/distributions/mixture.py +++ b/pymc/distributions/mixture.py @@ -17,8 +17,6 @@ import aesara.tensor as at import numpy as np -from aeppl.logprob import _logcdf, _logprob -from aeppl.transforms import IntervalTransform from aesara.graph.basic import Node, equal_computations from aesara.tensor import TensorVariable from aesara.tensor.random.op import RandomVariable @@ -35,6 +33,8 @@ from pymc.distributions.logprob import ignore_logprob, logcdf, logp from pymc.distributions.shape_utils import _change_dist_size, change_dist_size from pymc.distributions.transforms import _default_transform +from pymc.logprob.abstract import _logcdf, _logprob +from pymc.logprob.transforms import IntervalTransform from pymc.util import check_dist_not_registered from pymc.vartypes import continuous_types, discrete_types @@ -254,7 +254,7 @@ def rv_op(cls, weights, *components, size=None): assert weights_ndim_batch == 0 # Component RVs terms are accounted by the Mixture logprob, so they can be - # safely ignored by Aeppl + # safely ignored in the logprob graph components = [ignore_logprob(component) for component in components] # Create a OpFromGraph that encapsulates the random generating process diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py index 7c9df80685e..4ffcc019a69 100644 --- a/pymc/distributions/multivariate.py +++ b/pymc/distributions/multivariate.py @@ -25,7 +25,6 @@ import numpy as np import scipy -from aeppl.logprob import _logprob from aesara.graph.basic import Apply, Constant, Variable from aesara.graph.op import Op from aesara.raise_op import Assert @@ -69,6 +68,7 @@ to_tuple, ) from pymc.distributions.transforms import Interval, ZeroSumTransform, _default_transform +from pymc.logprob.abstract import _logprob from pymc.math import kron_diag, kron_dot from pymc.util import check_dist_not_registered diff --git a/pymc/distributions/simulator.py b/pymc/distributions/simulator.py index 9cc4d86db20..e180350d364 100644 --- a/pymc/distributions/simulator.py +++ b/pymc/distributions/simulator.py @@ -18,7 +18,6 @@ import aesara.tensor as at import numpy as np -from aeppl.logprob import _logprob from aesara.graph.op import Apply, Op from aesara.tensor.random.op import RandomVariable from aesara.tensor.var import TensorVariable @@ -26,6 +25,7 @@ from pymc.aesaraf import floatX from pymc.distributions.distribution import Distribution, _moment +from pymc.logprob.abstract import _logprob __all__ = ["Simulator"] diff --git a/pymc/distributions/timeseries.py b/pymc/distributions/timeseries.py index b1578d74a2a..3aed94a4d8d 100644 --- a/pymc/distributions/timeseries.py +++ b/pymc/distributions/timeseries.py @@ -21,7 +21,6 @@ import aesara.tensor as at import numpy as np -from aeppl.logprob import _logprob from aesara.graph.basic import Node, clone_replace from aesara.tensor import TensorVariable from aesara.tensor.random.op import RandomVariable @@ -43,6 +42,7 @@ get_support_shape_1d, ) from pymc.exceptions import NotConstantValueError +from pymc.logprob.abstract import _logprob from pymc.util import check_dist_not_registered __all__ = [ @@ -180,7 +180,7 @@ def rv_op(cls, init_dist, innovation_dist, steps, size=None): innovation_dist.type(), steps.type(), ) - # Aeppl can only infer the logp of a dimshuffled variables, if the dimshuffle is + # We can only infer the logp of a dimshuffled variables, if the dimshuffle is # done directly on top of a RandomVariable. Because of this we dimshuffle the # distributions and only then concatenate them, instead of the other way around. # shape = (B, 1, S) @@ -225,15 +225,15 @@ def random_walk_moment(op, rv, init_dist, innovation_dist, steps): @_logprob.register(RandomWalkRV) def random_walk_logp(op, values, *inputs, **kwargs): - # Although Aeppl can derive the logprob of random walks, it does not collapse - # what PyMC considers the core dimension of steps. We do it manually here. + # Although we can derive the logprob of random walks, it does not collapse + # what we consider the core dimension of steps. We do it manually here. (value,) = values # Recreate RV and obtain inner graph rv_node = op.make_node(*inputs) rv = clone_replace( op.inner_outputs, replace={u: v for u, v in zip(op.inner_inputs, rv_node.inputs)} )[op.default_output] - # Obtain logp via Aeppl of inner graph and collapse steps dimension + # Obtain logp of the inner graph and collapse steps dimension return logp(rv, value).sum(axis=-1) @@ -561,7 +561,7 @@ def dist( ) init_dist = Normal.dist(0, 100, shape=(*sigma.shape, ar_order)) - # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term + # We can ignore init_dist, as it will be accounted for in the logp term init_dist = ignore_logprob(init_dist) return super().dist([rhos, sigma, init_dist, steps, ar_order, constant], **kwargs) @@ -780,7 +780,7 @@ def dist(cls, omega, alpha_1, beta_1, initial_vol, *, steps=None, **kwargs): initial_vol = at.as_tensor_variable(initial_vol) init_dist = Normal.dist(0, initial_vol) - # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term + # We can ignore init_dist, as it will be accounted for in the logp term init_dist = ignore_logprob(init_dist) return super().dist([omega, alpha_1, beta_1, initial_vol, init_dist, steps], **kwargs) @@ -966,7 +966,7 @@ def dist(cls, dt, sde_fn, sde_pars, *, init_dist=None, steps=None, **kwargs): UserWarning, ) init_dist = Normal.dist(0, 100, shape=sde_pars[0].shape) - # Tell Aeppl to ignore init_dist, as it will be accounted for in the logp term + # We can ignore init_dist, as it will be accounted for in the logp term init_dist = ignore_logprob(init_dist) return super().dist([init_dist, steps, sde_pars, dt, sde_fn], **kwargs) diff --git a/pymc/distributions/transforms.py b/pymc/distributions/transforms.py index aaa7936fe91..a693441e625 100644 --- a/pymc/distributions/transforms.py +++ b/pymc/distributions/transforms.py @@ -16,7 +16,14 @@ import aesara.tensor as at import numpy as np -from aeppl.transforms import ( +from aesara.graph import Op +from aesara.tensor import TensorVariable + +# ignore mypy error because it somehow considers that +# "numpy.core.numeric has no attribute normalize_axis_tuple" +from numpy.core.numeric import normalize_axis_tuple # type: ignore + +from pymc.logprob.transforms import ( CircularTransform, IntervalTransform, LogOddsTransform, @@ -24,12 +31,6 @@ RVTransform, SimplexTransform, ) -from aesara.graph import Op -from aesara.tensor import TensorVariable - -# ignore mypy error because it somehow considers that -# "numpy.core.numeric has no attribute normalize_axis_tuple" -from numpy.core.numeric import normalize_axis_tuple # type: ignore __all__ = [ "RVTransform", @@ -207,17 +208,17 @@ def log_jac_det(self, value, *inputs): simplex = SimplexTransform() simplex.__doc__ = """ -Instantiation of :class:`aeppl.transforms.SimplexTransform` +Instantiation of :class:`pymc.logprob.transforms.SimplexTransform` for use in the ``transform`` argument of a random variable.""" logodds = LogOddsTransform() logodds.__doc__ = """ -Instantiation of :class:`aeppl.transforms.LogOddsTransform` +Instantiation of :class:`pymc.logprob.transforms.LogOddsTransform` for use in the ``transform`` argument of a random variable.""" class Interval(IntervalTransform): - """Wrapper around :class:`aeppl.transforms.IntervalTransform` for use in the + """Wrapper around :class:`pymc.logprob.transforms.IntervalTransform` for use in the ``transform`` argument of a random variable. Parameters @@ -380,7 +381,7 @@ def extend_axis_rev(array, axis): log = LogTransform() log.__doc__ = """ -Instantiation of :class:`aeppl.transforms.LogTransform` +Instantiation of :class:`pymc.logprob.transforms.LogTransform` for use in the ``transform`` argument of a random variable.""" univariate_sum_to_1 = SumTo1(ndim_supp=0) @@ -403,5 +404,5 @@ def extend_axis_rev(array, axis): circular = CircularTransform() circular.__doc__ = """ -Instantiation of :class:`aeppl.transforms.CircularTransform` +Instantiation of :class:`pymc.logprob.transforms.CircularTransform` for use in the ``transform`` argument of a random variable.""" diff --git a/pymc/distributions/truncated.py b/pymc/distributions/truncated.py index d187597a89b..288da18c821 100644 --- a/pymc/distributions/truncated.py +++ b/pymc/distributions/truncated.py @@ -4,8 +4,6 @@ import aesara.tensor as at import numpy as np -from aeppl.abstract import MeasurableVariable -from aeppl.logprob import _logcdf, _logprob, icdf, logcdf from aesara import scan from aesara.graph import Op from aesara.graph.basic import Node @@ -26,6 +24,7 @@ from pymc.distributions.shape_utils import _change_dist_size, change_dist_size, to_tuple from pymc.distributions.transforms import _default_transform from pymc.exceptions import TruncationError +from pymc.logprob.abstract import MeasurableVariable, _logcdf, _logprob, icdf, logcdf from pymc.math import logdiffexp from pymc.util import check_dist_not_registered diff --git a/pymc/initial_point.py b/pymc/initial_point.py index f2feb8185ff..a0b2514ddcc 100644 --- a/pymc/initial_point.py +++ b/pymc/initial_point.py @@ -20,12 +20,12 @@ import aesara.tensor as at import numpy as np -from aeppl.transforms import RVTransform from aesara.graph.basic import Variable from aesara.graph.fg import FunctionGraph from aesara.tensor.var import TensorVariable from pymc.aesaraf import compile_pymc, find_rng_nodes, replace_rng_nodes, reseed_rngs +from pymc.logprob.transforms import RVTransform from pymc.util import get_transformed_name, get_untransformed_name, is_transformed_name StartDict = Dict[Union[Variable, str], Union[np.ndarray, Variable, str]] diff --git a/pymc/logprob/LICENSE_AEPPL.txt b/pymc/logprob/LICENSE_AEPPL.txt new file mode 100644 index 00000000000..b7d994cfdc1 --- /dev/null +++ b/pymc/logprob/LICENSE_AEPPL.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021-2022 aesara-devs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/pymc/logprob/__init__.py b/pymc/logprob/__init__.py new file mode 100644 index 00000000000..a71498ae464 --- /dev/null +++ b/pymc/logprob/__init__.py @@ -0,0 +1,52 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from pymc.logprob.abstract import logprob # isort: split + +from pymc.logprob.joint_logprob import factorized_joint_logprob, joint_logprob + +# isort: off +# Add rewrites to the DBs +import pymc.logprob.censoring +import pymc.logprob.cumsum +import pymc.logprob.mixture +import pymc.logprob.scan +import pymc.logprob.tensor +import pymc.logprob.transforms + +# isort: on + +__all__ = () diff --git a/pymc/logprob/abstract.py b/pymc/logprob/abstract.py new file mode 100644 index 00000000000..e5818bd4621 --- /dev/null +++ b/pymc/logprob/abstract.py @@ -0,0 +1,249 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import abc + +from copy import copy +from functools import singledispatch +from typing import Callable, List, Sequence, Tuple + +from aesara.graph.basic import Apply, Variable +from aesara.graph.op import Op +from aesara.graph.utils import MetaType +from aesara.tensor import TensorVariable +from aesara.tensor.elemwise import Elemwise +from aesara.tensor.random.op import RandomVariable + + +def logprob(rv_var, *rv_values, **kwargs): + """Create a graph for the log-probability of a ``RandomVariable``.""" + logprob = _logprob(rv_var.owner.op, rv_values, *rv_var.owner.inputs, **kwargs) + + for rv_var in rv_values: + if rv_var.name: + logprob.name = f"{rv_var.name}_logprob" + + return logprob + + +def logcdf(rv_var, rv_value, **kwargs): + """Create a graph for the logcdf of a ``RandomVariable``.""" + logcdf = _logcdf(rv_var.owner.op, rv_value, *rv_var.owner.inputs, name=rv_var.name, **kwargs) + + if rv_var.name: + logcdf.name = f"{rv_var.name}_logcdf" + + return logcdf + + +def icdf(rv, value, **kwargs): + """Create a graph for the inverse CDF of a `RandomVariable`.""" + rv_icdf = _icdf(rv.owner.op, value, *rv.owner.inputs, **kwargs) + if rv.name: + rv_icdf.name = f"{rv.name}_icdf" + return rv_icdf + + +@singledispatch +def _logprob( + op: Op, + values: Sequence[TensorVariable], + *inputs: TensorVariable, + **kwargs, +): + """Create a graph for the log-density/mass of a ``RandomVariable``. + + This function dispatches on the type of ``op``, which should be a subclass + of ``RandomVariable``. If you want to implement new density/mass graphs + for a ``RandomVariable``, register a new function on this dispatcher. + + """ + raise NotImplementedError(f"Logprob method not implemented for {op}") + + +@singledispatch +def _logcdf( + op: Op, + value: TensorVariable, + *inputs: TensorVariable, + **kwargs, +): + """Create a graph for the logcdf of a ``RandomVariable``. + + This function dispatches on the type of ``op``, which should be a subclass + of ``RandomVariable``. If you want to implement new logcdf graphs + for a ``RandomVariable``, register a new function on this dispatcher. + """ + raise NotImplementedError(f"Logcdf method not implemented for {op}") + + +@singledispatch +def _icdf( + op: Op, + value: TensorVariable, + *inputs: TensorVariable, + **kwargs, +): + """Create a graph for the inverse CDF of a `RandomVariable`. + + This function dispatches on the type of `op`, which should be a subclass + of `RandomVariable`. + """ + raise NotImplementedError(f"icdf not implemented for {op}") + + +class MeasurableVariable(abc.ABC): + """A variable that can be assigned a measure/log-probability""" + + +MeasurableVariable.register(RandomVariable) + + +class UnmeasurableMeta(MetaType): + def __new__(cls, name, bases, dict): + if "id_obj" not in dict: + dict["id_obj"] = None + + return super().__new__(cls, name, bases, dict) + + def __eq__(self, other): + if isinstance(other, UnmeasurableMeta): + return hash(self.id_obj) == hash(other.id_obj) + return False + + def __hash__(self): + return hash(self.id_obj) + + +class UnmeasurableVariable(metaclass=UnmeasurableMeta): + """ + id_obj is an attribute, i.e. tuple of length two, of the unmeasurable class object. + e.g. id_obj = (NormalRV, noop_measurable_outputs_fn) + """ + + +def get_measurable_outputs(op: Op, node: Apply) -> List[Variable]: + """Return only the outputs that are measurable.""" + if isinstance(op, MeasurableVariable): + return _get_measurable_outputs(op, node) + else: + return [] + + +@singledispatch +def _get_measurable_outputs(op, node): + return node.outputs + + +@_get_measurable_outputs.register(RandomVariable) +def _get_measurable_outputs_RandomVariable(op, node): + return node.outputs[1:] + + +def noop_measurable_outputs_fn(*args, **kwargs): + return None + + +def assign_custom_measurable_outputs( + node: Apply, + measurable_outputs_fn: Callable = noop_measurable_outputs_fn, + type_prefix: str = "Unmeasurable", +) -> Apply: + """Assign a custom ``_get_measurable_outputs`` dispatch function to a measurable variable instance. + + The node is cloned and a custom `Op` that's a copy of the original node's + `Op` is created. That custom `Op` replaces the old `Op` in the cloned + node, and then a custom dispatch implementation is created for the clone + `Op` in `_get_measurable_outputs`. + + If `measurable_outputs_fn` isn't specified, a no-op is used; the result is + a clone of `node` that will effectively be ignored by + `factorized_joint_logprob`. + + Parameters + ========== + node + The node to recreate with a new cloned `Op`. + measurable_outputs_fn + The function that will be assigned to the new cloned `Op` in the + `_get_measurable_outputs` dispatcher. + The default is a no-op function (i.e. no measurable outputs) + type_prefix + The prefix used for the new type's name. + The default is ``"Unmeasurable"``, which matches the default + ``"measurable_outputs_fn"``. + """ + + new_node = node.clone() + op_type = type(new_node.op) + + if op_type in _get_measurable_outputs.registry.keys() and isinstance(op_type, UnmeasurableMeta): + if _get_measurable_outputs.registry[op_type] != measurable_outputs_fn: + raise ValueError( + f"The type {op_type.__name__} with hash value {hash(op_type)} " + "has already been dispatched a measurable outputs function." + ) + return node + + new_op_dict = op_type.__dict__.copy() + new_op_dict["id_obj"] = (new_node.op, measurable_outputs_fn) + + new_op_type = type( + f"{type_prefix}{op_type.__name__}", (op_type, UnmeasurableVariable), new_op_dict + ) + new_node.op = copy(new_node.op) + new_node.op.__class__ = new_op_type + + _get_measurable_outputs.register(new_op_type)(measurable_outputs_fn) + + return new_node + + +class MeasurableElemwise(Elemwise): + """Base class for Measurable Elemwise variables""" + + valid_scalar_types: Tuple[MetaType, ...] = () + + def __init__(self, scalar_op, *args, **kwargs): + if not isinstance(scalar_op, self.valid_scalar_types): + raise TypeError( + f"scalar_op {scalar_op} is not valid for class {self.__class__}. " + f"Acceptable types are {self.valid_scalar_types}" + ) + super().__init__(scalar_op, *args, **kwargs) + + +MeasurableVariable.register(MeasurableElemwise) diff --git a/pymc/logprob/censoring.py b/pymc/logprob/censoring.py new file mode 100644 index 00000000000..8beeb934545 --- /dev/null +++ b/pymc/logprob/censoring.py @@ -0,0 +1,285 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import List, Optional + +import aesara.tensor as at +import numpy as np + +from aesara.graph.basic import Node +from aesara.graph.fg import FunctionGraph +from aesara.graph.rewriting.basic import node_rewriter +from aesara.scalar.basic import Ceil, Clip, Floor, RoundHalfToEven +from aesara.scalar.basic import clip as scalar_clip +from aesara.tensor.elemwise import Elemwise +from aesara.tensor.var import TensorConstant + +from pymc.logprob.abstract import ( + MeasurableElemwise, + MeasurableVariable, + _logcdf, + _logprob, + assign_custom_measurable_outputs, +) +from pymc.logprob.rewriting import measurable_ir_rewrites_db +from pymc.logprob.utils import CheckParameterValue + + +class MeasurableClip(MeasurableElemwise): + """A placeholder used to specify a log-likelihood for a clipped RV sub-graph.""" + + valid_scalar_types = (Clip,) + + +measurable_clip = MeasurableClip(scalar_clip) + + +@node_rewriter(tracks=[Elemwise]) +def find_measurable_clips(fgraph: FunctionGraph, node: Node) -> Optional[List[MeasurableClip]]: + # TODO: Canonicalize x[x>ub] = ub -> clip(x, x, ub) + + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + if rv_map_feature is None: + return None # pragma: no cover + + if isinstance(node.op, MeasurableClip): + return None # pragma: no cover + + if not (isinstance(node.op, Elemwise) and isinstance(node.op.scalar_op, Clip)): + return None + + clipped_var = node.outputs[0] + base_var, lower_bound, upper_bound = node.inputs + + if not ( + base_var.owner + and isinstance(base_var.owner.op, MeasurableVariable) + and base_var not in rv_map_feature.rv_values + ): + return None + + # Replace bounds by `+-inf` if `y = clip(x, x, ?)` or `y=clip(x, ?, x)` + # This is used in `clip_logprob` to generate a more succint logprob graph + # for one-sided clipped random variables + lower_bound = lower_bound if (lower_bound is not base_var) else at.constant(-np.inf) + upper_bound = upper_bound if (upper_bound is not base_var) else at.constant(np.inf) + + # Make base_var unmeasurable + unmeasurable_base_var = assign_custom_measurable_outputs(base_var.owner) + clipped_rv_node = measurable_clip.make_node(unmeasurable_base_var, lower_bound, upper_bound) + clipped_rv = clipped_rv_node.outputs[0] + + clipped_rv.name = clipped_var.name + + return [clipped_rv] + + +measurable_ir_rewrites_db.register( + "find_measurable_clips", + find_measurable_clips, + 0, + "basic", + "censoring", +) + + +@_logprob.register(MeasurableClip) +def clip_logprob(op, values, base_rv, lower_bound, upper_bound, **kwargs): + r"""Logprob of a clipped censored distribution + + The probability is given by + .. math:: + \begin{cases} + 0 & \text{for } x < lower, \\ + \text{CDF}(lower, dist) & \text{for } x = lower, \\ + \text{P}(x, dist) & \text{for } lower < x < upper, \\ + 1-\text{CDF}(upper, dist) & \text {for} x = upper, \\ + 0 & \text{for } x > upper, + \end{cases} + + """ + (value,) = values + + base_rv_op = base_rv.owner.op + base_rv_inputs = base_rv.owner.inputs + + logprob = _logprob(base_rv_op, (value,), *base_rv_inputs, **kwargs) + logcdf = _logcdf(base_rv_op, value, *base_rv_inputs, **kwargs) + + if base_rv_op.name: + logprob.name = f"{base_rv_op}_logprob" + logcdf.name = f"{base_rv_op}_logcdf" + + is_lower_bounded, is_upper_bounded = False, False + if not (isinstance(upper_bound, TensorConstant) and np.all(np.isinf(upper_bound.value))): + is_upper_bounded = True + + logccdf = at.log1mexp(logcdf) + # For right clipped discrete RVs, we need to add an extra term + # corresponding to the pmf at the upper bound + if base_rv.dtype.startswith("int"): + logccdf = at.logaddexp(logccdf, logprob) + + logprob = at.switch( + at.eq(value, upper_bound), + logccdf, + at.switch(at.gt(value, upper_bound), -np.inf, logprob), + ) + if not (isinstance(lower_bound, TensorConstant) and np.all(np.isneginf(lower_bound.value))): + is_lower_bounded = True + logprob = at.switch( + at.eq(value, lower_bound), + logcdf, + at.switch(at.lt(value, lower_bound), -np.inf, logprob), + ) + + if is_lower_bounded and is_upper_bounded: + logprob = CheckParameterValue("lower_bound <= upper_bound")( + logprob, at.all(at.le(lower_bound, upper_bound)) + ) + + return logprob + + +class MeasurableRound(MeasurableElemwise): + """A placeholder used to specify a log-likelihood for a clipped RV sub-graph.""" + + valid_scalar_types = (RoundHalfToEven, Floor, Ceil) + + +@node_rewriter(tracks=[Elemwise]) +def find_measurable_roundings(fgraph: FunctionGraph, node: Node) -> Optional[List[MeasurableRound]]: + + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + if rv_map_feature is None: + return None # pragma: no cover + + if isinstance(node.op, MeasurableRound): + return None # pragma: no cover + + if not ( + isinstance(node.op, Elemwise) + and isinstance(node.op.scalar_op, MeasurableRound.valid_scalar_types) + ): + return None + + (rounded_var,) = node.outputs + (base_var,) = node.inputs + + if not ( + base_var.owner + and isinstance(base_var.owner.op, MeasurableVariable) + and base_var not in rv_map_feature.rv_values + # Rounding only makes sense for continuous variables + and base_var.dtype.startswith("float") + ): + return None + + # Make base_var unmeasurable + unmeasurable_base_var = assign_custom_measurable_outputs(base_var.owner) + + rounded_op = MeasurableRound(node.op.scalar_op) + rounded_rv = rounded_op.make_node(unmeasurable_base_var).default_output() + rounded_rv.name = rounded_var.name + return [rounded_rv] + + +measurable_ir_rewrites_db.register( + "find_measurable_roundings", + find_measurable_roundings, + 0, + "basic", + "censoring", +) + + +@_logprob.register(MeasurableRound) +def round_logprob(op, values, base_rv, **kwargs): + r"""Logprob of a rounded censored distribution + + The probability of a distribution rounded to the nearest integer is given by + .. math:: + \begin{cases} + \text{CDF}(x+\frac{1}{2}, dist) - \text{CDF}(x-\frac{1}{2}, dist) & \text{for } x \in \mathbb{Z}, \\ + 0 & \text{otherwise}, + \end{cases} + + The probability of a distribution rounded up is given by + .. math:: + \begin{cases} + \text{CDF}(x, dist) - \text{CDF}(x-1, dist) & \text{for } x \in \mathbb{Z}, \\ + 0 & \text{otherwise}, + \end{cases} + + The probability of a distribution rounded down is given by + .. math:: + \begin{cases} + \text{CDF}(x+1, dist) - \text{CDF}(x, dist) & \text{for } x \in \mathbb{Z}, \\ + 0 & \text{otherwise}, + \end{cases} + + """ + (value,) = values + + if isinstance(op.scalar_op, RoundHalfToEven): + value = at.round(value) + value_upper = value + 0.5 + value_lower = value - 0.5 + elif isinstance(op.scalar_op, Floor): + value = at.floor(value) + value_upper = value + 1.0 + value_lower = value + elif isinstance(op.scalar_op, Ceil): + value = at.ceil(value) + value_upper = value + value_lower = value - 1.0 + else: + raise TypeError(f"Unsupported scalar_op {op.scalar_op}") # pragma: no cover + + base_rv_op = base_rv.owner.op + base_rv_inputs = base_rv.owner.inputs + + logcdf_upper = _logcdf(base_rv_op, value_upper, *base_rv_inputs, **kwargs) + logcdf_lower = _logcdf(base_rv_op, value_lower, *base_rv_inputs, **kwargs) + + if base_rv_op.name: + logcdf_upper.name = f"{base_rv_op}_logcdf_upper" + logcdf_lower.name = f"{base_rv_op}_logcdf_lower" + + # TODO: Figure out better solution to avoid this circular import + from pymc.math import logdiffexp + + return logdiffexp(logcdf_upper, logcdf_lower) diff --git a/pymc/logprob/cumsum.py b/pymc/logprob/cumsum.py new file mode 100644 index 00000000000..5c070b8b044 --- /dev/null +++ b/pymc/logprob/cumsum.py @@ -0,0 +1,128 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import List, Optional + +import aesara.tensor as at + +from aesara.graph.rewriting.basic import node_rewriter +from aesara.tensor.extra_ops import CumOp + +from pymc.logprob.abstract import ( + MeasurableVariable, + _logprob, + assign_custom_measurable_outputs, + logprob, +) +from pymc.logprob.rewriting import PreserveRVMappings, measurable_ir_rewrites_db + + +class MeasurableCumsum(CumOp): + """A placeholder used to specify a log-likelihood for a cumsum sub-graph.""" + + +MeasurableVariable.register(MeasurableCumsum) + + +@_logprob.register(MeasurableCumsum) +def logprob_cumsum(op, values, base_rv, **kwargs): + """Compute the log-likelihood graph for a `Cumsum`.""" + (value,) = values + + value_diff = at.diff(value, axis=op.axis) + value_diff = at.concatenate( + ( + # Take first element of axis and add a broadcastable dimension so + # that it can be concatenated with the rest of value_diff + at.shape_padaxis( + at.take(value, 0, axis=op.axis), + axis=op.axis, + ), + value_diff, + ), + axis=op.axis, + ) + + cumsum_logp = logprob(base_rv, value_diff) + + return cumsum_logp + + +@node_rewriter([CumOp]) +def find_measurable_cumsums(fgraph, node) -> Optional[List[MeasurableCumsum]]: + r"""Finds `Cumsums`\s for which a `logprob` can be computed.""" + + if not (isinstance(node.op, CumOp) and node.op.mode == "add"): + return None # pragma: no cover + + if isinstance(node.op, MeasurableCumsum): + return None # pragma: no cover + + rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + rv = node.outputs[0] + + base_rv = node.inputs[0] + if not ( + base_rv.owner + and isinstance(base_rv.owner.op, MeasurableVariable) + and base_rv not in rv_map_feature.rv_values + ): + return None # pragma: no cover + + # Check that cumsum does not mix dimensions + if base_rv.ndim > 1 and node.op.axis is None: + return None + + new_op = MeasurableCumsum(axis=node.op.axis or 0, mode="add") + # Make base_var unmeasurable + unmeasurable_base_rv = assign_custom_measurable_outputs(base_rv.owner) + new_rv = new_op.make_node(unmeasurable_base_rv).default_output() + new_rv.name = rv.name + + return [new_rv] + + +measurable_ir_rewrites_db.register( + "find_measurable_cumsums", + find_measurable_cumsums, + 0, + "basic", + "cumsum", +) diff --git a/pymc/logprob/joint_logprob.py b/pymc/logprob/joint_logprob.py new file mode 100644 index 00000000000..aa23a0268ee --- /dev/null +++ b/pymc/logprob/joint_logprob.py @@ -0,0 +1,254 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import warnings + +from collections import deque +from typing import Dict, Optional, Union + +import aesara.tensor as at + +from aesara import config +from aesara.graph.basic import graph_inputs, io_toposort +from aesara.graph.op import compute_test_value +from aesara.graph.rewriting.basic import GraphRewriter, NodeRewriter +from aesara.tensor.var import TensorVariable + +from pymc.logprob.abstract import _logprob, get_measurable_outputs +from pymc.logprob.rewriting import construct_ir_fgraph +from pymc.logprob.utils import rvs_to_value_vars + + +def factorized_joint_logprob( + rv_values: Dict[TensorVariable, TensorVariable], + warn_missing_rvs: bool = True, + ir_rewriter: Optional[GraphRewriter] = None, + extra_rewrites: Optional[Union[GraphRewriter, NodeRewriter]] = None, + **kwargs, +) -> Dict[TensorVariable, TensorVariable]: + r"""Create a map between variables and their log-probabilities such that the + sum is their joint log-probability. + + The `rv_values` dictionary specifies a joint probability graph defined by + pairs of random variables and respective measure-space input parameters + + For example, consider the following + + .. code-block:: python + + import aesara.tensor as at + + sigma2_rv = at.random.invgamma(0.5, 0.5) + Y_rv = at.random.normal(0, at.sqrt(sigma2_rv)) + + This graph for ``Y_rv`` is equivalent to the following hierarchical model: + + .. math:: + + \sigma^2 \sim& \operatorname{InvGamma}(0.5, 0.5) \\ + Y \sim& \operatorname{N}(0, \sigma^2) + + If we create a value variable for ``Y_rv``, i.e. ``y_vv = at.scalar("y")``, + the graph of ``factorized_joint_logprob({Y_rv: y_vv})`` is equivalent to the + conditional probability :math:`\log p(Y = y \mid \sigma^2)`, with a stochastic + ``sigma2_rv``. If we specify a value variable for ``sigma2_rv``, i.e. + ``s_vv = at.scalar("s2")``, then ``factorized_joint_logprob({Y_rv: y_vv, sigma2_rv: s_vv})`` + yields the joint log-probability of the two variables. + + .. math:: + + \log p(Y = y, \sigma^2 = s) = + \log p(Y = y \mid \sigma^2 = s) + \log p(\sigma^2 = s) + + + Parameters + ========== + rv_values + A ``dict`` of variables that maps stochastic elements + (e.g. `RandomVariable`\s) to symbolic `Variable`\s representing their + values in a log-probability. + warn_missing_rvs + When ``True``, issue a warning when a `RandomVariable` is found in + the graph and doesn't have a corresponding value variable specified in + `rv_values`. + ir_rewriter + Rewriter that produces the intermediate representation of Measurable Variables. + extra_rewrites + Extra rewrites to be applied (e.g. reparameterizations, transforms, + etc.) + + Returns + ======= + A ``dict`` that maps each value variable to the log-probability factor derived + from the respective `RandomVariable`. + + """ + fgraph, rv_values, _ = construct_ir_fgraph(rv_values, ir_rewriter=ir_rewriter) + + if extra_rewrites is not None: + extra_rewrites.rewrite(fgraph) + + rv_remapper = fgraph.preserve_rv_mappings + + # This is the updated random-to-value-vars map with the lifted/rewritten + # variables. The rewrites are supposed to produce new + # `MeasurableVariable`s that are amenable to `_logprob`. + updated_rv_values = rv_remapper.rv_values + + # Some rewrites also transform the original value variables. This is the + # updated map from the new value variables to the original ones, which + # we want to use as the keys in the final dictionary output + original_values = rv_remapper.original_values + + # When a `_logprob` has been produced for a `MeasurableVariable` node, all + # other references to it need to be replaced with its value-variable all + # throughout the `_logprob`-produced graphs. The following `dict` + # cumulatively maintains remappings for all the variables/nodes that needed + # to be recreated after replacing `MeasurableVariable`s with their + # value-variables. Since these replacements work in topological order, all + # the necessary value-variable replacements should be present for each + # node. + replacements = updated_rv_values.copy() + + # To avoid cloning the value variables, we map them to themselves in the + # `replacements` `dict` (i.e. entries already existing in `replacements` + # aren't cloned) + replacements.update({v: v for v in rv_values.values()}) + + # Walk the graph from its inputs to its outputs and construct the + # log-probability + q = deque(fgraph.toposort()) + + logprob_vars = {} + + while q: + node = q.popleft() + + outputs = get_measurable_outputs(node.op, node) + + if not outputs: + continue + + if any(o not in updated_rv_values for o in outputs): + if warn_missing_rvs: + warnings.warn( + "Found a random variable that was neither among the observations " + f"nor the conditioned variables: {node.outputs}" + ) + continue + + q_value_vars = [replacements[q_rv_var] for q_rv_var in outputs] + + if not q_value_vars: + continue + + # Replace `RandomVariable`s in the inputs with value variables. + # Also, store the results in the `replacements` map for the nodes + # that follow. + remapped_vars, _ = rvs_to_value_vars( + q_value_vars + list(node.inputs), + initial_replacements=replacements, + ) + q_value_vars = remapped_vars[: len(q_value_vars)] + q_rv_inputs = remapped_vars[len(q_value_vars) :] + + q_logprob_vars = _logprob( + node.op, + q_value_vars, + *q_rv_inputs, + **kwargs, + ) + + if not isinstance(q_logprob_vars, (list, tuple)): + q_logprob_vars = [q_logprob_vars] + + for q_value_var, q_logprob_var in zip(q_value_vars, q_logprob_vars): + + q_value_var = original_values[q_value_var] + + if q_value_var.name: + q_logprob_var.name = f"{q_value_var.name}_logprob" + + if q_value_var in logprob_vars: + raise ValueError( + f"More than one logprob factor was assigned to the value var {q_value_var}" + ) + + logprob_vars[q_value_var] = q_logprob_var + + # Recompute test values for the changes introduced by the + # replacements above. + if config.compute_test_value != "off": + for node in io_toposort(graph_inputs(q_logprob_vars), q_logprob_vars): + compute_test_value(node) + + missing_value_terms = set(original_values.values()) - set(logprob_vars.keys()) + if missing_value_terms: + raise RuntimeError( + f"The logprob terms of the following value variables could not be derived: {missing_value_terms}" + ) + + return logprob_vars + + +def joint_logprob(*args, sum: bool = True, **kwargs) -> Optional[TensorVariable]: + """Create a graph representing the joint log-probability/measure of a graph. + + This function calls `factorized_joint_logprob` and returns the combined + log-probability factors as a single graph. + + Parameters + ---------- + sum: bool + If ``True`` each factor is collapsed to a scalar via ``sum`` before + being joined with the remaining factors. This may be necessary to + avoid incorrect broadcasting among independent factors. + + """ + logprob = factorized_joint_logprob(*args, **kwargs) + if not logprob: + return None + elif len(logprob) == 1: + logprob = tuple(logprob.values())[0] + if sum: + return at.sum(logprob) + else: + return logprob + else: + if sum: + return at.sum([at.sum(factor) for factor in logprob.values()]) + else: + return at.add(*logprob.values()) diff --git a/pymc/logprob/mixture.py b/pymc/logprob/mixture.py new file mode 100644 index 00000000000..973cc46b1d8 --- /dev/null +++ b/pymc/logprob/mixture.py @@ -0,0 +1,467 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import List, Optional, Tuple, Union, cast + +import aesara +import aesara.tensor as at + +from aesara.graph.basic import Apply, Variable +from aesara.graph.fg import FunctionGraph +from aesara.graph.op import Op, compute_test_value +from aesara.graph.rewriting.basic import ( + EquilibriumGraphRewriter, + node_rewriter, + pre_greedy_node_rewriter, +) +from aesara.ifelse import ifelse +from aesara.scalar.basic import Switch +from aesara.tensor.basic import Join, MakeVector +from aesara.tensor.elemwise import Elemwise +from aesara.tensor.random.rewriting import ( + local_dimshuffle_rv_lift, + local_subtensor_rv_lift, +) +from aesara.tensor.shape import shape_tuple +from aesara.tensor.subtensor import ( + as_index_literal, + as_nontensor_scalar, + get_canonical_form_slice, + is_basic_idx, +) +from aesara.tensor.type import TensorType +from aesara.tensor.type_other import NoneConst, NoneTypeT, SliceType +from aesara.tensor.var import TensorVariable + +from pymc.logprob.abstract import ( + MeasurableVariable, + _logprob, + assign_custom_measurable_outputs, + logprob, +) +from pymc.logprob.rewriting import ( + local_lift_DiracDelta, + logprob_rewrites_db, + subtensor_ops, +) +from pymc.logprob.tensor import naive_bcast_rv_lift + + +def is_newaxis(x): + return isinstance(x, type(None)) or isinstance(getattr(x, "type", None), NoneTypeT) + + +def expand_indices( + indices: Tuple[Optional[Union[Variable, slice]], ...], shape: Tuple[TensorVariable] +) -> Tuple[TensorVariable]: + """Convert basic and/or advanced indices into a single, broadcasted advanced indexing operation. + + Parameters + ---------- + indices + The indices to convert. + shape + The shape of the array being indexed. + + """ + n_non_newaxis = sum(1 for idx in indices if not is_newaxis(idx)) + n_missing_dims = len(shape) - n_non_newaxis + full_indices = list(indices) + [slice(None)] * n_missing_dims + + # We need to know if a "subspace" was generated by advanced indices + # bookending basic indices. If so, we move the advanced indexing subspace + # to the "front" of the shape (i.e. left-most indices/last-most + # dimensions). + index_types = [is_basic_idx(idx) for idx in full_indices] + + first_adv_idx = len(shape) + try: + first_adv_idx = index_types.index(False) + first_bsc_after_adv_idx = index_types.index(True, first_adv_idx) + index_types.index(False, first_bsc_after_adv_idx) + moved_subspace = True + except ValueError: + moved_subspace = False + + n_basic_indices = sum(index_types) + + # The number of dimensions in the subspace created by the advanced indices + n_subspace_dims = max( + ( + getattr(idx, "ndim", 0) + for idx, is_basic in zip(full_indices, index_types) + if not is_basic + ), + default=0, + ) + + # The number of dimensions for each expanded index + n_output_dims = n_subspace_dims + n_basic_indices + + adv_indices = [] + shape_copy = list(shape) + n_preceding_basics = 0 + for d, idx in enumerate(full_indices): + + if not is_basic_idx(idx): + s = shape_copy.pop(0) + + idx = at.as_tensor(idx) + + if moved_subspace: + # The subspace generated by advanced indices appear as the + # upper dimensions in the "expanded" index space, so we need to + # add broadcast dimensions for the non-basic indices to the end + # of these advanced indices + expanded_idx = idx[(Ellipsis,) + (None,) * n_basic_indices] + else: + # In this case, we need to add broadcast dimensions for the + # basic indices that proceed and follow the group of advanced + # indices; otherwise, a contiguous group of advanced indices + # forms a broadcasted set of indices that are iterated over + # within the same subspace, which means that all their + # corresponding "expanded" indices have exactly the same shape. + expanded_idx = idx[(None,) * n_preceding_basics][ + (Ellipsis,) + (None,) * (n_basic_indices - n_preceding_basics) + ] + else: + if is_newaxis(idx): + n_preceding_basics += 1 + continue + + s = shape_copy.pop(0) + + if isinstance(idx, slice) or isinstance(getattr(idx, "type", None), SliceType): + idx = as_index_literal(idx) + idx_slice, _ = get_canonical_form_slice(idx, s) + idx = at.arange(idx_slice.start, idx_slice.stop, idx_slice.step) + + if moved_subspace: + # Basic indices appear in the lower dimensions + # (i.e. right-most) in the output, and are preceded by + # the subspace generated by the advanced indices. + expanded_idx = idx[(None,) * (n_subspace_dims + n_preceding_basics)][ + (Ellipsis,) + (None,) * (n_basic_indices - n_preceding_basics - 1) + ] + else: + # In this case, we need to know when the basic indices have + # moved past the contiguous group of advanced indices (in the + # "expanded" index space), so that we can properly pad those + # dimensions in this basic index's shape. + # Don't forget that a single advanced index can introduce an + # arbitrary number of dimensions to the expanded index space. + + # If we're currently at a basic index that's past the first + # advanced index, then we're necessarily past the group of + # advanced indices. + n_preceding_dims = ( + n_subspace_dims if d > first_adv_idx else 0 + ) + n_preceding_basics + expanded_idx = idx[(None,) * n_preceding_dims][ + (Ellipsis,) + (None,) * (n_output_dims - n_preceding_dims - 1) + ] + + n_preceding_basics += 1 + + assert expanded_idx.ndim <= n_output_dims + + adv_indices.append(expanded_idx) + + return cast(Tuple[TensorVariable], tuple(at.broadcast_arrays(*adv_indices))) + + +def rv_pull_down(x: TensorVariable, dont_touch_vars=None) -> TensorVariable: + """Pull a ``RandomVariable`` ``Op`` down through a graph, when possible.""" + fgraph = FunctionGraph(outputs=dont_touch_vars or [], clone=False) + + return pre_greedy_node_rewriter( + fgraph, + [ + local_dimshuffle_rv_lift, + local_subtensor_rv_lift, + naive_bcast_rv_lift, + local_lift_DiracDelta, + ], + x, + ) + + +class MixtureRV(Op): + """A placeholder used to specify a log-likelihood for a mixture sub-graph.""" + + __props__ = ("indices_end_idx", "out_dtype", "out_broadcastable") + + def __init__(self, indices_end_idx, out_dtype, out_broadcastable): + super().__init__() + self.indices_end_idx = indices_end_idx + self.out_dtype = out_dtype + self.out_broadcastable = out_broadcastable + + def make_node(self, *inputs): + return Apply(self, list(inputs), [TensorType(self.out_dtype, self.out_broadcastable)()]) + + def perform(self, node, inputs, outputs): + raise NotImplementedError("This is a stand-in Op.") # pragma: no cover + + +MeasurableVariable.register(MixtureRV) + + +def get_stack_mixture_vars( + node: Apply, +) -> Tuple[Optional[List[TensorVariable]], Optional[int]]: + r"""Extract the mixture terms from a `*Subtensor*` applied to stacked `MeasurableVariable`\s.""" + if not isinstance(node.op, subtensor_ops): + return None, None # pragma: no cover + + join_axis = NoneConst + joined_rvs = node.inputs[0] + + # First, make sure that it's some sort of concatenation + if not (joined_rvs.owner and isinstance(joined_rvs.owner.op, (MakeVector, Join))): + # Node is not a compatible join `Op` + return None, join_axis # pragma: no cover + + if isinstance(joined_rvs.owner.op, MakeVector): + mixture_rvs = joined_rvs.owner.inputs + + elif isinstance(joined_rvs.owner.op, Join): + mixture_rvs = joined_rvs.owner.inputs[1:] + join_axis = joined_rvs.owner.inputs[0] + try: + # TODO: Find better solution to avoid this circular dependency + from pymc.aesaraf import constant_fold + + join_axis = int(constant_fold((join_axis,))[0]) + except ValueError: + # TODO: Support symbolic join axes + raise NotImplementedError("Symbolic `Join` axes are not supported in mixtures") + + join_axis = at.as_tensor(join_axis) + + if not all(rv.owner and isinstance(rv.owner.op, MeasurableVariable) for rv in mixture_rvs): + # Currently, all mixture components must be `MeasurableVariable` outputs + # TODO: Allow constants and make them Dirac-deltas + # raise NotImplementedError( + # "All mixture components must be `MeasurableVariable` outputs" + # ) + return None, join_axis + + return mixture_rvs, join_axis + + +@node_rewriter(subtensor_ops) +def mixture_replace(fgraph, node): + r"""Identify mixture sub-graphs and replace them with a place-holder `Op`. + + The basic idea is to find ``stack(mixture_comps)[I_rv]``, where + ``mixture_comps`` is a ``list`` of `MeasurableVariable`\s and ``I_rv`` is a + `MeasurableVariable` with a discrete and finite support. + From these terms, new terms ``Z_rv[i] = mixture_comps[i][i == I_rv]`` are + created for each ``i`` in ``enumerate(mixture_comps)``. + """ + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + old_mixture_rv = node.default_output() + + mixture_res, join_axis = get_stack_mixture_vars(node) + + if mixture_res is None or any(rv in rv_map_feature.rv_values for rv in mixture_res): + return None # pragma: no cover + + mixing_indices = node.inputs[1:] + + # We loop through mixture components and collect all the array elements + # that belong to each one (by way of their indices). + mixture_rvs = [] + for i, component_rv in enumerate(mixture_res): + + # We create custom types for the mixture components and assign them + # null `get_measurable_outputs` dispatches so that they aren't + # erroneously encountered in places like `factorized_joint_logprob`. + new_node = assign_custom_measurable_outputs(component_rv.owner) + out_idx = component_rv.owner.outputs.index(component_rv) + new_comp_rv = new_node.outputs[out_idx] + mixture_rvs.append(new_comp_rv) + + # Replace this sub-graph with a `MixtureRV` + mix_op = MixtureRV( + 1 + len(mixing_indices), + old_mixture_rv.dtype, + old_mixture_rv.broadcastable, + ) + new_node = mix_op.make_node(*([join_axis] + mixing_indices + mixture_rvs)) + + new_mixture_rv = new_node.default_output() + + if aesara.config.compute_test_value != "off": + # We can't use `MixtureRV` to compute a test value; instead, we'll use + # the original node's test value. + if not hasattr(old_mixture_rv.tag, "test_value"): + compute_test_value(node) + + new_mixture_rv.tag.test_value = old_mixture_rv.tag.test_value + + if old_mixture_rv.name: + new_mixture_rv.name = f"{old_mixture_rv.name}-mixture" + + return [new_mixture_rv] + + +@node_rewriter((Elemwise,)) +def switch_mixture_replace(fgraph, node): + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + if not isinstance(node.op.scalar_op, Switch): + return None # pragma: no cover + + old_mixture_rv = node.default_output() + # idx, component_1, component_2 = node.inputs + + mixture_rvs = [] + + for component_rv in node.inputs[1:]: + if not ( + component_rv.owner + and isinstance(component_rv.owner.op, MeasurableVariable) + and component_rv not in rv_map_feature.rv_values + ): + return None + new_node = assign_custom_measurable_outputs(component_rv.owner) + out_idx = component_rv.owner.outputs.index(component_rv) + new_comp_rv = new_node.outputs[out_idx] + mixture_rvs.append(new_comp_rv) + + mix_op = MixtureRV( + 2, + old_mixture_rv.dtype, + old_mixture_rv.broadcastable, + ) + new_node = mix_op.make_node(*([NoneConst, as_nontensor_scalar(node.inputs[0])] + mixture_rvs)) + + new_mixture_rv = new_node.default_output() + + if aesara.config.compute_test_value != "off": + if not hasattr(old_mixture_rv.tag, "test_value"): + compute_test_value(node) + + new_mixture_rv.tag.test_value = old_mixture_rv.tag.test_value + + if old_mixture_rv.name: + new_mixture_rv.name = f"{old_mixture_rv.name}-mixture" + + return [new_mixture_rv] + + +@_logprob.register(MixtureRV) +def logprob_MixtureRV( + op, values, *inputs: Optional[Union[TensorVariable, slice]], name=None, **kwargs +): + (value,) = values + + join_axis = cast(Variable, inputs[0]) + indices = cast(TensorVariable, inputs[1 : op.indices_end_idx]) + comp_rvs = cast(TensorVariable, inputs[op.indices_end_idx :]) + + assert len(indices) > 0 + + if len(indices) > 1 or indices[0].ndim > 0: + if isinstance(join_axis.type, NoneTypeT): + # `join_axis` will be `NoneConst` if the "join" was a `MakeVector` + # (i.e. scalar measurable variables were combined to make a + # vector). + # Since some form of advanced indexing is necessarily occurring, we + # need to reformat the MakeVector arguments so that they fit the + # `Join` format expected by the logic below. + join_axis_val = 0 + comp_rvs = [comp[None] for comp in comp_rvs] + original_shape = (len(comp_rvs),) + else: + # TODO: Find better solution to avoid this circular dependency + from pymc.aesaraf import constant_fold + + join_axis_val = constant_fold((join_axis,))[0].item() + original_shape = shape_tuple(comp_rvs[0]) + + bcast_indices = expand_indices(indices, original_shape) + + logp_val = at.empty(bcast_indices[0].shape) + + for m, rv in enumerate(comp_rvs): + idx_m_on_axis = at.nonzero(at.eq(bcast_indices[join_axis_val], m)) + m_indices = tuple( + v[idx_m_on_axis] for i, v in enumerate(bcast_indices) if i != join_axis_val + ) + # Drop superfluous join dimension + rv = rv[0] + # TODO: Do we really need to do this now? + # Could we construct this form earlier and + # do the lifting for everything at once, instead of + # this intentional one-off? + rv_m = rv_pull_down(rv[m_indices] if m_indices else rv) + val_m = value[idx_m_on_axis] + logp_m = logprob(rv_m, val_m) + logp_val = at.set_subtensor(logp_val[idx_m_on_axis], logp_m) + + else: + logp_val = 0.0 + for i, comp_rv in enumerate(comp_rvs): + comp_logp = logprob(comp_rv, value) + logp_val += ifelse( + at.eq(indices[0], i), + comp_logp, + at.zeros_like(value), + ) + + return logp_val + + +logprob_rewrites_db.register( + "mixture_replace", + EquilibriumGraphRewriter( + [mixture_replace, switch_mixture_replace], + max_use_ratio=aesara.config.optdb__max_use_ratio, + ), + 0, + "basic", + "mixture", +) diff --git a/pymc/logprob/rewriting.py b/pymc/logprob/rewriting.py new file mode 100644 index 00000000000..f4712bfc57f --- /dev/null +++ b/pymc/logprob/rewriting.py @@ -0,0 +1,353 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import Dict, Optional, Tuple + +import aesara.tensor as at + +from aesara.compile.mode import optdb +from aesara.graph.basic import Variable +from aesara.graph.features import Feature +from aesara.graph.fg import FunctionGraph +from aesara.graph.rewriting.basic import GraphRewriter, node_rewriter +from aesara.graph.rewriting.db import EquilibriumDB, RewriteDatabaseQuery, SequenceDB +from aesara.tensor.elemwise import DimShuffle, Elemwise +from aesara.tensor.extra_ops import BroadcastTo +from aesara.tensor.random.rewriting import local_subtensor_rv_lift +from aesara.tensor.rewriting.basic import register_canonicalize, register_useless +from aesara.tensor.rewriting.shape import ShapeFeature +from aesara.tensor.subtensor import ( + AdvancedIncSubtensor, + AdvancedIncSubtensor1, + AdvancedSubtensor, + AdvancedSubtensor1, + IncSubtensor, + Subtensor, +) +from aesara.tensor.var import TensorVariable + +from pymc.logprob.abstract import MeasurableVariable +from pymc.logprob.utils import DiracDelta, indices_from_subtensor + +inc_subtensor_ops = (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1) +subtensor_ops = (AdvancedSubtensor, AdvancedSubtensor1, Subtensor) + + +class NoCallbackEquilibriumDB(EquilibriumDB): + r"""This `EquilibriumDB` doesn't hide its exceptions. + + By setting `failure_callback` to ``None`` in the `EquilibriumGraphRewriter`\s + that `EquilibriumDB` generates, we're able to directly emit the desired + exceptions from within the `NodeRewriter`\s themselves. + """ + + def query(self, *tags, **kwtags): + res = super().query(*tags, **kwtags) + res.failure_callback = None + return res + + +class PreserveRVMappings(Feature): + r"""Keeps track of random variables and their respective value variables during + graph rewrites in `rv_values` + + When a random variable is replaced in a rewrite, this `Feature` automatically + updates the `rv_values` mapping, so that the new variable is linked to the + original value variable. + + In addition this `Feature` provides functionality to manually update a random + and/or value variable. A mapping from the transformed value variables to the + the original value variables is kept in `original_values`. + + Likewise, a `measurable_conversions` map is maintained, which holds + information about un-valued and un-measurable variables that were replaced + with measurable variables. This information can be used to revert these + rewrites. + + """ + + def __init__(self, rv_values: Dict[TensorVariable, TensorVariable]): + """ + Parameters + ========== + rv_values + Mappings between random variables and their value variables. + The keys of this map are what this `Feature` keeps updated. + The ``dict`` is updated in-place. + """ + self.rv_values = rv_values + self.original_values = {v: v for v in rv_values.values()} + self.measurable_conversions: Dict[Variable, Variable] = {} + + def on_attach(self, fgraph): + if hasattr(fgraph, "preserve_rv_mappings"): + raise ValueError(f"{fgraph} already has the `PreserveRVMappings` feature attached.") + + fgraph.preserve_rv_mappings = self + + def update_rv_maps( + self, + old_rv: TensorVariable, + new_value: TensorVariable, + new_rv: Optional[TensorVariable] = None, + ): + """Update mappings for a random variable. + + It also creates/updates a map from new value variables to their + original value variables. + + Parameters + ========== + old_rv + The random variable whose mappings will be updated. + new_value + The new value variable that will replace the current one assigned + to `old_rv`. + new_rv + When non-``None``, `old_rv` will also be replaced with `new_rv` in + the mappings, as well. + """ + old_value = self.rv_values.pop(old_rv) + original_value = self.original_values.pop(old_value) + + if new_rv is None: + new_rv = old_rv + + self.rv_values[new_rv] = new_value + self.original_values[new_value] = original_value + + def on_change_input(self, fgraph, node, i, r, new_r, reason=None): + """ + Whenever a node is replaced during rewrite, we check if it had a value + variable associated with it and map it to the new node. + """ + r_value_var = self.rv_values.pop(r, None) + if r_value_var is not None: + self.rv_values[new_r] = r_value_var + elif ( + new_r not in self.rv_values + and r.owner + and new_r.owner + and not isinstance(r.owner.op, MeasurableVariable) + and isinstance(new_r.owner.op, MeasurableVariable) + ): + self.measurable_conversions[r] = new_r + + +@register_canonicalize +@node_rewriter((Elemwise, BroadcastTo, DimShuffle) + subtensor_ops) +def local_lift_DiracDelta(fgraph, node): + r"""Lift basic `Op`\s through `DiracDelta`\s.""" + + if len(node.outputs) > 1: + return + + # Only handle scalar `Elemwise` `Op`s + if isinstance(node.op, Elemwise) and len(node.inputs) != 1: + return + + dd_inp = node.inputs[0] + + if dd_inp.owner is None or not isinstance(dd_inp.owner.op, DiracDelta): + return + + dd_val = dd_inp.owner.inputs[0] + + new_value_node = node.op.make_node(dd_val, *node.inputs[1:]) + new_node = dd_inp.owner.op.make_node(new_value_node.outputs[0]) + return new_node.outputs + + +@register_useless +@node_rewriter((DiracDelta,)) +def local_remove_DiracDelta(fgraph, node): + r"""Remove `DiracDelta`\s.""" + dd_val = node.inputs[0] + return [dd_val] + + +@node_rewriter(inc_subtensor_ops) +def incsubtensor_rv_replace(fgraph, node): + r"""Replace `*IncSubtensor*` `Op`\s and their value variables for log-probability calculations. + + This is used to derive the log-probability graph for ``Y[idx] = data``, where + ``Y`` is a `RandomVariable`, ``idx`` indices, and ``data`` some arbitrary data. + + To compute the log-probability of a statement like ``Y[idx] = data``, we must + first realize that our objective is equivalent to computing ``logprob(Y, z)``, + where ``z = at.set_subtensor(y[idx], data)`` and ``y`` is the value variable + for ``Y``. + + In other words, the log-probability for an `*IncSubtensor*` is the log-probability + of the underlying `RandomVariable` evaluated at ``data`` for the indices + given by ``idx`` and at the value variable for ``~idx``. + + This provides a means of specifying "missing data", for instance. + """ + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + if not isinstance(node.op, inc_subtensor_ops): + return None # pragma: no cover + + rv_var = node.outputs[0] + if rv_var not in rv_map_feature.rv_values: + return None # pragma: no cover + + base_rv_var = node.inputs[0] + + if not ( + base_rv_var.owner + and isinstance(base_rv_var.owner.op, MeasurableVariable) + and base_rv_var not in rv_map_feature.rv_values + ): + return None # pragma: no cover + + data = node.inputs[1] + idx = indices_from_subtensor(getattr(node.op, "idx_list", None), node.inputs[2:]) + + # Create a new value variable with the indices `idx` set to `data` + value_var = rv_map_feature.rv_values[rv_var] + new_value_var = at.set_subtensor(value_var[idx], data) + rv_map_feature.update_rv_maps(rv_var, new_value_var, base_rv_var) + + # Return the `RandomVariable` being indexed + return [base_rv_var] + + +logprob_rewrites_db = SequenceDB() +logprob_rewrites_db.name = "logprob_rewrites_db" +logprob_rewrites_db.register("pre-canonicalize", optdb.query("+canonicalize"), -10, "basic") + +# These rewrites convert un-measurable variables into their measurable forms, +# but they need to be reapplied, because some of the measurable forms require +# their inputs to be measurable. +measurable_ir_rewrites_db = NoCallbackEquilibriumDB() +measurable_ir_rewrites_db.name = "measurable_ir_rewrites_db" + +logprob_rewrites_db.register("measurable_ir_rewrites", measurable_ir_rewrites_db, -10, "basic") + +# These rewrites push random/measurable variables "down", making them closer to +# (or eventually) the graph outputs. Often this is done by lifting other `Op`s +# "up" through the random/measurable variables and into their inputs. +measurable_ir_rewrites_db.register("subtensor_lift", local_subtensor_rv_lift, -5, "basic") +measurable_ir_rewrites_db.register("incsubtensor_lift", incsubtensor_rv_replace, -5, "basic") + +logprob_rewrites_db.register("post-canonicalize", optdb.query("+canonicalize"), 10, "basic") + + +def construct_ir_fgraph( + rv_values: Dict[Variable, Variable], + ir_rewriter: Optional[GraphRewriter] = None, +) -> Tuple[FunctionGraph, Dict[Variable, Variable], Dict[Variable, Variable]]: + r"""Construct a `FunctionGraph` in measurable IR form for the keys in `rv_values`. + + A custom IR rewriter can be specified. By default, + `logprob_rewrites_db.query(RewriteDatabaseQuery(include=["basic"]))` is used. + + Our measurable IR takes the form of an Aesara graph that is more-or-less + equivalent to a given Aesara graph (i.e. the keys of `rv_values`) but + contains `Op`s that are subclasses of the `MeasurableVariable` type in + place of ones that do not inherit from `MeasurableVariable` in the original + graph but are nevertheless measurable. + + `MeasurableVariable`\s are mapped to log-probabilities, so this IR is how + non-trivial log-probabilities are constructed, especially when the + "measurability" of a term depends on the measurability of its inputs + (e.g. a mixture). + + In some cases, entire sub-graphs in the original graph are replaced with a + single measurable node. In other cases, the relevant nodes are already + measurable and there is no difference between the resulting measurable IR + graph and the original. In general, some changes will be present, + because--at the very least--canonicalization is always performed and the + measurable IR includes manipulations that are not applicable to outside of + the context of measurability/log-probabilities. + + For instance, some `Op`s will be lifted through `MeasurableVariable`\s in + this IR, and the resulting graphs will not be computationally sound, + because they wouldn't produce independent samples when the original graph + would. See https://github.com/aesara-devs/aeppl/pull/78. + + Returns + ------- + A `FunctionGraph` of the measurable IR, a copy of `rv_values` containing + the new, cloned versions of the original variables in `rv_values`, and + a ``dict`` mapping all the original variables to their cloned values in + `FunctionGraph`. + """ + + # Since we're going to clone the entire graph, we need to keep a map from + # the old nodes to the new ones; otherwise, we won't be able to use + # `rv_values`. + # We start the `dict` with mappings from the value variables to themselves, + # to prevent them from being cloned. + memo = {v: v for v in rv_values.values()} + + # We add `ShapeFeature` because it will get rid of references to the old + # `RandomVariable`s that have been lifted; otherwise, it will be difficult + # to give good warnings when an unaccounted for `RandomVariable` is + # encountered + fgraph = FunctionGraph( + outputs=list(rv_values.keys()), + clone=True, + memo=memo, + copy_orphans=False, + copy_inputs=False, + features=[ShapeFeature()], + ) + + # Update `rv_values` so that it uses the new cloned variables + rv_values = {memo[k]: v for k, v in rv_values.items()} + + # This `Feature` preserves the relationships between the original + # random variables (i.e. keys in `rv_values`) and the new ones + # produced when `Op`s are lifted through them. + rv_remapper = PreserveRVMappings(rv_values) + fgraph.attach_feature(rv_remapper) + + if ir_rewriter is None: + ir_rewriter = logprob_rewrites_db.query(RewriteDatabaseQuery(include=["basic"])) + ir_rewriter.rewrite(fgraph) + + if rv_remapper.measurable_conversions: + # Undo un-valued measurable IR rewrites + new_to_old = tuple((v, k) for k, v in rv_remapper.measurable_conversions.items()) + fgraph.replace_all(new_to_old) + + return fgraph, rv_values, memo diff --git a/pymc/logprob/scan.py b/pymc/logprob/scan.py new file mode 100644 index 00000000000..ec9b05cf6fc --- /dev/null +++ b/pymc/logprob/scan.py @@ -0,0 +1,548 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from copy import copy +from typing import Callable, Dict, Iterable, List, Tuple, cast + +import aesara +import aesara.tensor as at +import numpy as np + +from aesara.graph.basic import Variable +from aesara.graph.fg import FunctionGraph +from aesara.graph.op import compute_test_value +from aesara.graph.rewriting.basic import node_rewriter +from aesara.graph.rewriting.db import RewriteDatabaseQuery +from aesara.scan.op import Scan +from aesara.scan.rewriting import scan_eqopt1, scan_eqopt2 +from aesara.scan.utils import ScanArgs +from aesara.tensor.random.type import RandomType +from aesara.tensor.subtensor import Subtensor, indices_from_subtensor +from aesara.tensor.var import TensorVariable +from aesara.updates import OrderedUpdates + +from pymc.logprob.abstract import MeasurableVariable, _get_measurable_outputs, _logprob +from pymc.logprob.joint_logprob import factorized_joint_logprob +from pymc.logprob.rewriting import ( + inc_subtensor_ops, + logprob_rewrites_db, + measurable_ir_rewrites_db, +) + + +class MeasurableScan(Scan): + """A placeholder used to specify a log-likelihood for a scan sub-graph.""" + + +MeasurableVariable.register(MeasurableScan) + + +def convert_outer_out_to_in( + input_scan_args: ScanArgs, + outer_out_vars: Iterable[TensorVariable], + new_outer_input_vars: Dict[TensorVariable, TensorVariable], + inner_out_fn: Callable[[Dict[TensorVariable, TensorVariable]], Iterable[TensorVariable]], +) -> ScanArgs: + r"""Convert outer-graph outputs into outer-graph inputs. + + Parameters + ========== + input_scan_args: + The source `Scan` arguments. + outer_out_vars: + The outer-graph output variables that are to be converted into an + outer-graph input. + new_outer_input_vars: + The variables used for the new outer-graph input computed for + `outer_out_vars`. + inner_out_fn: + A function that takes the remapped outer-out variables and produces new + inner-graph outputs. This can be used to transform the + `outer_out_vars`\s' corresponding inner-graph outputs into something + else entirely, like log-probabilities. + + Outputs + ======= + A `ScanArgs` object for a `Scan` in which `outer_out_vars` has been converted to an + outer-graph input. + """ + + output_scan_args = copy(input_scan_args) + inner_outs_to_new_inner_ins = {} + + # Map inner-outputs to outer-outputs + old_inner_outs_to_outer_outs = {} + + for oo_var in outer_out_vars: + + var_info = output_scan_args.find_among_fields( + oo_var, field_filter=lambda x: x.startswith("outer_out") + ) + + assert var_info is not None + assert oo_var in new_outer_input_vars + + io_var = output_scan_args.get_alt_field(var_info, "inner_out") + old_inner_outs_to_outer_outs[io_var] = oo_var + + # In this loop, we gather information about the new inner-inputs that have + # been created and what their corresponding inner-outputs were, and we + # update the outer and inner-inputs to reflect the addition of new + # inner-inputs. + for old_inner_out_var, oo_var in old_inner_outs_to_outer_outs.items(): + + # Couldn't one do the same with `var_info`? + inner_out_info = output_scan_args.find_among_fields( + old_inner_out_var, field_filter=lambda x: x.startswith("inner_out") + ) + + output_scan_args.remove_from_fields(old_inner_out_var, rm_dependents=False) + + # Remove the old outer-output variable. + # Not sure if this really matters, since we don't use the outer-outputs + # when building a new `Scan`, but doing it keeps the `ScanArgs` object + # consistent. + output_scan_args.remove_from_fields(oo_var, rm_dependents=False) + + # Use the index for the specific inner-graph sub-collection to which this + # variable belongs (e.g. index `1` among the inner-graph sit-sot terms) + var_idx = inner_out_info.index + + # The old inner-output variable becomes the a new inner-input + new_inner_in_var = old_inner_out_var.clone() + if new_inner_in_var.name: + new_inner_in_var.name = f"{new_inner_in_var.name}_vv" + + inner_outs_to_new_inner_ins[old_inner_out_var] = new_inner_in_var + + # We want to remove elements from both lists and tuples, because the + # members of `ScanArgs` could switch from being `list`s to `tuple`s + # soon + def remove(x, i): + return x[:i] + x[i + 1 :] + + # If we're replacing a [m|s]it-sot, then we need to add a new nit-sot + add_nit_sot = False + if inner_out_info.name.endswith("mit_sot"): + inner_in_mit_sot_var = cast( + Tuple[int, ...], tuple(output_scan_args.inner_in_mit_sot[var_idx]) + ) + new_inner_in_seqs = inner_in_mit_sot_var + (new_inner_in_var,) + new_inner_in_mit_sot = remove(output_scan_args.inner_in_mit_sot, var_idx) + new_outer_in_mit_sot = remove(output_scan_args.outer_in_mit_sot, var_idx) + new_inner_in_sit_sot = tuple(output_scan_args.inner_in_sit_sot) + new_outer_in_sit_sot = tuple(output_scan_args.outer_in_sit_sot) + add_nit_sot = True + elif inner_out_info.name.endswith("sit_sot"): + new_inner_in_seqs = (output_scan_args.inner_in_sit_sot[var_idx],) + (new_inner_in_var,) + new_inner_in_sit_sot = remove(output_scan_args.inner_in_sit_sot, var_idx) + new_outer_in_sit_sot = remove(output_scan_args.outer_in_sit_sot, var_idx) + new_inner_in_mit_sot = tuple(output_scan_args.inner_in_mit_sot) + new_outer_in_mit_sot = tuple(output_scan_args.outer_in_mit_sot) + add_nit_sot = True + else: + new_inner_in_seqs = (new_inner_in_var,) + new_inner_in_mit_sot = tuple(output_scan_args.inner_in_mit_sot) + new_outer_in_mit_sot = tuple(output_scan_args.outer_in_mit_sot) + new_inner_in_sit_sot = tuple(output_scan_args.inner_in_sit_sot) + new_outer_in_sit_sot = tuple(output_scan_args.outer_in_sit_sot) + + output_scan_args.inner_in_mit_sot = list(new_inner_in_mit_sot) + output_scan_args.inner_in_sit_sot = list(new_inner_in_sit_sot) + output_scan_args.outer_in_mit_sot = list(new_outer_in_mit_sot) + output_scan_args.outer_in_sit_sot = list(new_outer_in_sit_sot) + + if inner_out_info.name.endswith("mit_sot"): + mit_sot_var_taps = cast( + Tuple[int, ...], tuple(output_scan_args.mit_sot_in_slices[var_idx]) + ) + taps = mit_sot_var_taps + (0,) + new_mit_sot_in_slices = remove(output_scan_args.mit_sot_in_slices, var_idx) + elif inner_out_info.name.endswith("sit_sot"): + taps = (-1, 0) + new_mit_sot_in_slices = tuple(output_scan_args.mit_sot_in_slices) + else: + taps = (0,) + new_mit_sot_in_slices = tuple(output_scan_args.mit_sot_in_slices) + + output_scan_args.mit_sot_in_slices = list(new_mit_sot_in_slices) + + taps, new_inner_in_seqs = zip(*sorted(zip(taps, new_inner_in_seqs), key=lambda x: x[0])) + + new_inner_in_seqs = tuple(output_scan_args.inner_in_seqs) + tuple( + reversed(new_inner_in_seqs) + ) + + output_scan_args.inner_in_seqs = list(new_inner_in_seqs) + + slice_seqs = zip(-np.asarray(taps), [n if n < 0 else None for n in reversed(taps)]) + + # XXX: If the caller passes the variables output by `aesara.scan`, it's + # likely that this will fail, because those variables can sometimes be + # slices of the actual outer-inputs (e.g. `out[1:]` instead of `out` + # when `taps=[-1]`). + var_slices = [new_outer_input_vars[oo_var][b:e] for b, e in slice_seqs] + n_steps = at.min([at.shape(n)[0] for n in var_slices]) + + output_scan_args.n_steps = n_steps + + new_outer_in_seqs = tuple(output_scan_args.outer_in_seqs) + tuple( + v[:n_steps] for v in var_slices + ) + + output_scan_args.outer_in_seqs = list(new_outer_in_seqs) + + if add_nit_sot: + new_outer_in_nit_sot = tuple(output_scan_args.outer_in_nit_sot) + (n_steps,) + else: + new_outer_in_nit_sot = tuple(output_scan_args.outer_in_nit_sot) + + output_scan_args.outer_in_nit_sot = list(new_outer_in_nit_sot) + + # Now, we can add new inner-outputs for the custom calculations. + # We don't need to create corresponding outer-outputs, because `Scan` will + # do that when we call `Scan.make_node`. All we need is a consistent + # outer-inputs and inner-graph spec., which we should have in + # `output_scan_args`. + remapped_io_to_ii = inner_outs_to_new_inner_ins + new_inner_out_nit_sot = tuple(output_scan_args.inner_out_nit_sot) + tuple( + inner_out_fn(remapped_io_to_ii) + ) + + output_scan_args.inner_out_nit_sot = list(new_inner_out_nit_sot) + + return output_scan_args + + +def get_random_outer_outputs( + scan_args: ScanArgs, +) -> List[Tuple[int, TensorVariable, TensorVariable]]: + """Get the `MeasurableVariable` outputs of a `Scan` (well, its `ScanArgs`). + + Returns + ======= + A tuple of tuples containing the index of each outer-output variable, the + outer-output variable itself, and the inner-output variable that + is an instance of `MeasurableVariable`. + """ + rv_vars = [] + for n, oo_var in enumerate( + [o for o in scan_args.outer_outputs if not isinstance(o.type, RandomType)] + ): + oo_info = scan_args.find_among_fields(oo_var) + io_type = oo_info.name[(oo_info.name.index("_", 6) + 1) :] + inner_out_type = f"inner_out_{io_type}" + io_var = getattr(scan_args, inner_out_type)[oo_info.index] + if io_var.owner and isinstance(io_var.owner.op, MeasurableVariable): + rv_vars.append((n, oo_var, io_var)) + return rv_vars + + +def construct_scan(scan_args: ScanArgs, **kwargs) -> Tuple[List[TensorVariable], OrderedUpdates]: + scan_op = Scan(scan_args.inner_inputs, scan_args.inner_outputs, scan_args.info, **kwargs) + node = scan_op.make_node(*scan_args.outer_inputs) + updates = OrderedUpdates(zip(scan_args.outer_in_shared, scan_args.outer_out_shared)) + return node.outputs, updates + + +@_logprob.register(MeasurableScan) +def logprob_ScanRV(op, values, *inputs, name=None, **kwargs): + + new_node = op.make_node(*inputs) + scan_args = ScanArgs.from_node(new_node) + rv_outer_outs = get_random_outer_outputs(scan_args) + + var_indices, rv_vars, io_vars = zip(*rv_outer_outs) + value_map = {_rv: _val for _rv, _val in zip(rv_vars, values)} + + def create_inner_out_logp(value_map: Dict[TensorVariable, TensorVariable]) -> TensorVariable: + """Create a log-likelihood inner-output for a `Scan`.""" + logp_parts = factorized_joint_logprob(value_map, warn_missing_rvs=False) + return logp_parts.values() + + logp_scan_args = convert_outer_out_to_in( + scan_args, + rv_vars, + value_map, + inner_out_fn=create_inner_out_logp, + ) + + # Remove the shared variables corresponding to replaced terms. + + # TODO FIXME: This is a really dirty approach, because it effectively + # assumes that all sampling is being removed, and, thus, all shared updates + # relating to `RandomType`s. Instead, we should be more precise and only + # remove the `RandomType`s associated with `values`. + logp_scan_args.outer_in_shared = [ + i for i in logp_scan_args.outer_in_shared if not isinstance(i.type, RandomType) + ] + logp_scan_args.inner_in_shared = [ + i for i in logp_scan_args.inner_in_shared if not isinstance(i.type, RandomType) + ] + logp_scan_args.inner_out_shared = [ + i for i in logp_scan_args.inner_out_shared if not isinstance(i.type, RandomType) + ] + # XXX TODO: Remove this properly + # logp_scan_args.outer_out_shared = [] + + logp_scan_out, updates = construct_scan(logp_scan_args, mode=op.mode) + + # Automatically pick up updates so that we don't have to pass them around + for key, value in updates.items(): + key.default_update = value + + return logp_scan_out + + +@node_rewriter([Scan]) +def find_measurable_scans(fgraph, node): + r"""Finds `Scan`\s for which a `logprob` can be computed. + + This will convert said `Scan`\s into `MeasurableScan`\s. It also updates + random variable and value variable mappings that have been specified for + parts of a `Scan`\s outputs (e.g. everything except the initial values). + """ + + if not isinstance(node.op, Scan): + return None + + if isinstance(node.op, MeasurableScan): + return None + + if not hasattr(fgraph, "shape_feature"): + return None # pragma: no cover + + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + curr_scanargs = ScanArgs.from_node(node) + + # Find the un-output `MeasurableVariable`s created in the inner-graph + clients: Dict[Variable, List[Variable]] = {} + + local_fgraph_topo = aesara.graph.basic.io_toposort( + curr_scanargs.inner_inputs, + [o for o in curr_scanargs.inner_outputs if not isinstance(o.type, RandomType)], + clients=clients, + ) + for n in local_fgraph_topo: + if isinstance(n.op, MeasurableVariable): + non_output_node_clients = [ + c for c in clients[n] if c not in curr_scanargs.inner_outputs + ] + + if non_output_node_clients: + # This node is a `MeasurableVariable`, but it depends on + # variable that's not being output? + # TODO: Why can't we make this a `MeasurableScan`? + return None + + if not any(out in rv_map_feature.rv_values for out in node.outputs): + # We need to remap user inputs that have been specified in terms of + # `Subtensor`s of this `Scan`'s node's outputs. + # + # For example, the output that the user got was something like + # `out[1:]` for `outputs_info = [{"initial": x0, "taps": [-1]}]`, so + # they likely passed `{out[1:]: x_1T_vv}` to `joint_logprob`. + # Since `out[1:]` isn't really the output of a `Scan`, but a + # `Subtensor` of the output `out` of a `Scan`, we need to account for + # that. + + # Get any `Subtensor` outputs that have been applied to outputs of this + # `Scan` (and get the corresponding indices of the outputs from this + # `Scan`) + output_clients: List[Tuple[Variable, int]] = sum( + [ + [ + # This is expected to work for `Subtensor` `Op`s, + # because they only ever have one output + (cl.default_output(), i) + for cl, _ in fgraph.get_clients(out) + if isinstance(cl.op, Subtensor) + ] + for i, out in enumerate(node.outputs) + ], + [], + ) + + # The second items in these tuples are the value variables mapped to + # the *user-specified* measurable variables (i.e. the first items) that + # are `Subtensor`s of the outputs of this `Scan`. The second items are + # the index of the corresponding output of this `Scan` node. + indirect_rv_vars = [ + (out, rv_map_feature.rv_values[out], out_idx) + for out, out_idx in output_clients + if out in rv_map_feature.rv_values + ] + + if not indirect_rv_vars: + return None + + # We need this for the `clone` in the loop that follows + if aesara.config.compute_test_value != "off": + compute_test_value(node) + + # We're going to replace the user's random variable/value variable mappings + # with ones that map directly to outputs of this `Scan`. + for rv_var, val_var, out_idx in indirect_rv_vars: + + # The full/un-`Subtensor`ed `Scan` output that we need to use + full_out = node.outputs[out_idx] + + assert rv_var.owner.inputs[0] == full_out + + # A new value variable that spans the full output. + # We don't want the old graph to appear in the new log-probability + # graph, so we use the shape feature to (hopefully) get the shape + # without the entire `Scan` itself. + full_out_shape = tuple( + fgraph.shape_feature.get_shape(full_out, i) for i in range(full_out.ndim) + ) + new_val_var = at.empty(full_out_shape, dtype=full_out.dtype) + + # Set the parts of this new value variable that applied to the + # user-specified value variable to the user's value variable + subtensor_indices = indices_from_subtensor( + rv_var.owner.inputs[1:], rv_var.owner.op.idx_list + ) + # E.g. for a single `-1` TAPS, `s_0T[1:] = s_1T` where `s_0T` is + # `new_val_var` and `s_1T` is the user-specified value variable + # that only spans times `t=1` to `t=T`. + new_val_var = at.set_subtensor(new_val_var[subtensor_indices], val_var) + + # This is the outer-input that sets `s_0T[i] = taps[i]` where `i` + # is a TAP index (e.g. a TAP of `-1` maps to index `0` in a vector + # of the entire series). + var_info = curr_scanargs.find_among_fields(full_out) + alt_type = var_info.name[(var_info.name.index("_", 6) + 1) :] + outer_input_var = getattr(curr_scanargs, f"outer_in_{alt_type}")[var_info.index] + + # These outer-inputs are using by `aesara.scan.utils.expand_empty`, and + # are expected to consist of only a single `set_subtensor` call. + # That's why we can simply replace the first argument of the node. + assert isinstance(outer_input_var.owner.op, inc_subtensor_ops) + + # We're going to set those values on our `new_val_var` so that it can + # serve as a complete replacement for the old input `outer_input_var`. + # from aesara.graph import clone_replace + # + new_val_var = outer_input_var.owner.clone_with_new_inputs( + [new_val_var] + outer_input_var.owner.inputs[1:] + ).default_output() + + # Replace the mapping + rv_map_feature.update_rv_maps(rv_var, new_val_var, full_out) + + op = MeasurableScan( + curr_scanargs.inner_inputs, + curr_scanargs.inner_outputs, + curr_scanargs.info, + mode=node.op.mode, + ) + new_node = op.make_node(*curr_scanargs.outer_inputs) + + return dict(zip(node.outputs, new_node.outputs)) + + +@node_rewriter([Scan]) +def add_opts_to_inner_graphs(fgraph, node): + """Update the `Mode`(s) used to compile the inner-graph of a `Scan` `Op`. + + This is how we add the measurable IR rewrites to the "body" + (i.e. inner-graph) of a `Scan` loop. + """ + + if not isinstance(node.op, Scan): + return None + + # Avoid unnecessarily re-applying this rewrite + if getattr(node.op.mode, "had_logprob_rewrites", False): + return None + + inner_fgraph = FunctionGraph( + node.op.inner_inputs, + node.op.inner_outputs, + clone=True, + copy_inputs=False, + copy_orphans=False, + ) + + logprob_rewrites_db.query(RewriteDatabaseQuery(include=["basic"])).rewrite(inner_fgraph) + + new_outputs = list(inner_fgraph.outputs) + + # TODO FIXME: This is pretty hackish. + new_mode = copy(node.op.mode) + new_mode.had_logprob_rewrites = True + + op = Scan(node.op.inner_inputs, new_outputs, node.op.info, mode=new_mode) + new_node = op.make_node(*node.inputs) + + return dict(zip(node.outputs, new_node.outputs)) + + +@_get_measurable_outputs.register(MeasurableScan) +def _get_measurable_outputs_MeasurableScan(op, node): + # TODO: This should probably use `get_random_outer_outputs` + # scan_args = ScanArgs.from_node(node) + # rv_outer_outs = get_random_outer_outputs(scan_args) + return [o for o in node.outputs if not isinstance(o.type, RandomType)] + + +measurable_ir_rewrites_db.register( + "add_opts_to_inner_graphs", + add_opts_to_inner_graphs, + # out2in( + # add_opts_to_inner_graphs, name="add_opts_to_inner_graphs", ignore_newtrees=True + # ), + -100, + "basic", + "scan", +) + +measurable_ir_rewrites_db.register( + "find_measurable_scans", + find_measurable_scans, + 0, + "basic", + "scan", +) + +# Add scan canonicalizations that aren't in the canonicalization DB +logprob_rewrites_db.register("scan_eqopt1", scan_eqopt1, -9, "basic", "scan") +logprob_rewrites_db.register("scan_eqopt2", scan_eqopt2, -9, "basic", "scan") diff --git a/pymc/logprob/tensor.py b/pymc/logprob/tensor.py new file mode 100644 index 00000000000..f6efe0fba43 --- /dev/null +++ b/pymc/logprob/tensor.py @@ -0,0 +1,331 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import List, Optional, Union + +import aesara + +from aesara import tensor as at +from aesara.graph.op import compute_test_value +from aesara.graph.rewriting.basic import node_rewriter +from aesara.tensor.basic import Join, MakeVector +from aesara.tensor.elemwise import DimShuffle +from aesara.tensor.extra_ops import BroadcastTo +from aesara.tensor.random.op import RandomVariable +from aesara.tensor.random.rewriting import local_dimshuffle_rv_lift, local_rv_size_lift + +from pymc.logprob.abstract import ( + MeasurableVariable, + _logprob, + assign_custom_measurable_outputs, + logprob, +) +from pymc.logprob.rewriting import PreserveRVMappings, measurable_ir_rewrites_db + + +@node_rewriter([BroadcastTo]) +def naive_bcast_rv_lift(fgraph, node): + """Lift a ``BroadcastTo`` through a ``RandomVariable`` ``Op``. + + XXX: This implementation simply broadcasts the ``RandomVariable``'s + parameters, which won't always work (e.g. multivariate distributions). + + TODO: Instead, it should use ``RandomVariable.ndim_supp``--and the like--to + determine which dimensions of each parameter need to be broadcasted. + Also, this doesn't need to remove ``size`` to perform the lifting, like it + currently does. + """ + + if not ( + isinstance(node.op, BroadcastTo) + and node.inputs[0].owner + and isinstance(node.inputs[0].owner.op, RandomVariable) + ): + return None # pragma: no cover + + bcast_shape = node.inputs[1:] + + rv_var = node.inputs[0] + rv_node = rv_var.owner + + if hasattr(fgraph, "dont_touch_vars") and rv_var in fgraph.dont_touch_vars: + return None # pragma: no cover + + # Do not replace RV if it is associated with a value variable + rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, "preserve_rv_mappings", None) + if rv_map_feature is not None and rv_var in rv_map_feature.rv_values: + return None + + if not bcast_shape: + # The `BroadcastTo` is broadcasting a scalar to a scalar (i.e. doing nothing) + assert rv_var.ndim == 0 + return [rv_var] + + size_lift_res = local_rv_size_lift.transform(fgraph, rv_node) + if size_lift_res is None: + lifted_node = rv_node + else: + _, lifted_rv = size_lift_res + lifted_node = lifted_rv.owner + + rng, size, dtype, *dist_params = lifted_node.inputs + + new_dist_params = [ + at.broadcast_to( + param, + at.broadcast_shape(tuple(param.shape), tuple(bcast_shape), arrays_are_shapes=True), + ) + for param in dist_params + ] + bcasted_node = lifted_node.op.make_node(rng, size, dtype, *new_dist_params) + + if aesara.config.compute_test_value != "off": + compute_test_value(bcasted_node) + + return [bcasted_node.outputs[1]] + + +class MeasurableMakeVector(MakeVector): + """A placeholder used to specify a log-likelihood for a cumsum sub-graph.""" + + +MeasurableVariable.register(MeasurableMakeVector) + + +@_logprob.register(MeasurableMakeVector) +def logprob_make_vector(op, values, *base_vars, **kwargs): + """Compute the log-likelihood graph for a `MeasurableMakeVector`.""" + (value,) = values + + return at.stack([logprob(base_var, value[i]) for i, base_var in enumerate(base_vars)]) + + +class MeasurableJoin(Join): + """A placeholder used to specify a log-likelihood for a join sub-graph.""" + + +MeasurableVariable.register(MeasurableJoin) + + +@_logprob.register(MeasurableJoin) +def logprob_join(op, values, axis, *base_vars, **kwargs): + """Compute the log-likelihood graph for a `Join`.""" + (value,) = values + + base_var_shapes = [base_var.shape[axis] for base_var in base_vars] + + # TODO: Find better way to avoid circular dependency + from pymc.aesaraf import constant_fold + + # We don't need the graph to be constant, just to have RandomVariables removed + base_var_shapes = constant_fold(base_var_shapes, raise_not_constant=False) + + split_values = at.split( + value, + splits_size=base_var_shapes, + n_splits=len(base_vars), + axis=axis, + ) + + logps = [ + logprob(base_var, split_value) for base_var, split_value in zip(base_vars, split_values) + ] + + if len({logp.ndim for logp in logps}) != 1: + raise ValueError( + "Joined logps have different number of dimensions, this can happen when " + "joining univariate and multivariate distributions", + ) + + base_vars_ndim_supp = split_values[0].ndim - logps[0].ndim + join_logprob = at.concatenate( + [ + at.atleast_1d(logprob(base_var, split_value)) + for base_var, split_value in zip(base_vars, split_values) + ], + axis=axis - base_vars_ndim_supp, + ) + + return join_logprob + + +@node_rewriter([MakeVector, Join]) +def find_measurable_stacks( + fgraph, node +) -> Optional[List[Union[MeasurableMakeVector, MeasurableJoin]]]: + r"""Finds `Joins`\s and `MakeVector`\s for which a `logprob` can be computed.""" + + if isinstance(node.op, (MeasurableMakeVector, MeasurableJoin)): + return None # pragma: no cover + + rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + stack_out = node.outputs[0] + + is_join = isinstance(node.op, Join) + + if is_join: + axis, *base_vars = node.inputs + else: + base_vars = node.inputs + + if not all( + base_var.owner + and isinstance(base_var.owner.op, MeasurableVariable) + and base_var not in rv_map_feature.rv_values + for base_var in base_vars + ): + return None # pragma: no cover + + # Make base_vars unmeasurable + base_vars = [assign_custom_measurable_outputs(base_var.owner) for base_var in base_vars] + + if is_join: + measurable_stack = MeasurableJoin()(axis, *base_vars) + else: + measurable_stack = MeasurableMakeVector(node.op.dtype)(*base_vars) + + measurable_stack.name = stack_out.name + + return [measurable_stack] + + +class MeasurableDimShuffle(DimShuffle): + """A placeholder used to specify a log-likelihood for a dimshuffle sub-graph.""" + + # Need to get the absolute path of `c_func_file`, otherwise it tries to + # find it locally and fails when a new `Op` is initialized + c_func_file = DimShuffle.get_path(DimShuffle.c_func_file) + + +MeasurableVariable.register(MeasurableDimShuffle) + + +@_logprob.register(MeasurableDimShuffle) +def logprob_dimshuffle(op, values, base_var, **kwargs): + """Compute the log-likelihood graph for a `MeasurableDimShuffle`.""" + (value,) = values + + # Reverse the effects of dimshuffle on the value variable + # First, drop any augmented dimensions and reinsert any dropped dimensions + undo_ds: List[Union[int, str]] = [i for i, o in enumerate(op.new_order) if o != "x"] + dropped_dims = tuple(sorted(set(op.transposition) - set(op.shuffle))) + for dropped_dim in dropped_dims: + undo_ds.insert(dropped_dim, "x") + value = value.dimshuffle(undo_ds) + + # Then, unshuffle remaining dims + original_shuffle = list(op.shuffle) + for dropped_dim in dropped_dims: + original_shuffle.insert(dropped_dim, dropped_dim) + undo_ds = [original_shuffle.index(i) for i in range(len(original_shuffle))] + value = value.dimshuffle(undo_ds) + + raw_logp = logprob(base_var, value) + + # Re-apply original dimshuffle, ignoring any support dimensions consumed by + # the logprob function. This assumes that support dimensions are always in + # the rightmost positions, and all we need to do is to discard the highest + # indexes in the original dimshuffle order. Otherwise, there is no way of + # knowing which dimensions were consumed by the logprob function. + redo_ds = [o for o in op.new_order if o == "x" or o < raw_logp.ndim] + return raw_logp.dimshuffle(redo_ds) + + +@node_rewriter([DimShuffle]) +def find_measurable_dimshuffles(fgraph, node) -> Optional[List[MeasurableDimShuffle]]: + r"""Finds `Dimshuffle`\s for which a `logprob` can be computed.""" + + if isinstance(node.op, MeasurableDimShuffle): + return None # pragma: no cover + + rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, "preserve_rv_mappings", None) + + if rv_map_feature is None: + return None # pragma: no cover + + base_var = node.inputs[0] + + # We can only apply this rewrite directly to `RandomVariable`s, as those are + # the only `Op`s for which we always know the support axis. Other measurable + # variables can have arbitrary support axes (e.g., if they contain separate + # `MeasurableDimShuffle`s). Most measurable variables with `DimShuffle`s + # should still be supported as long as the `DimShuffle`s can be merged/ + # lifted towards the base RandomVariable. + # TODO: If we include the support axis as meta information in each + # intermediate MeasurableVariable, we can lift this restriction. + if not ( + base_var.owner + and isinstance(base_var.owner.op, RandomVariable) + and base_var not in rv_map_feature.rv_values + ): + return None # pragma: no cover + + # Make base_vars unmeasurable + base_var = assign_custom_measurable_outputs(base_var.owner) + + measurable_dimshuffle = MeasurableDimShuffle(node.op.input_broadcastable, node.op.new_order)( + base_var + ) + measurable_dimshuffle.name = node.outputs[0].name + + return [measurable_dimshuffle] + + +measurable_ir_rewrites_db.register( + "dimshuffle_lift", local_dimshuffle_rv_lift, -5, "basic", "tensor" +) + + +# We register this later than `dimshuffle_lift` so that it is only applied as a fallback +measurable_ir_rewrites_db.register( + "find_measurable_dimshuffles", find_measurable_dimshuffles, 0, "basic", "tensor" +) + + +measurable_ir_rewrites_db.register("broadcast_to_lift", naive_bcast_rv_lift, -5, "basic", "tensor") + + +measurable_ir_rewrites_db.register( + "find_measurable_stacks", + find_measurable_stacks, + 0, + "basic", + "tensor", +) diff --git a/pymc/logprob/transforms.py b/pymc/logprob/transforms.py new file mode 100644 index 00000000000..fb1dc19e8dd --- /dev/null +++ b/pymc/logprob/transforms.py @@ -0,0 +1,727 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import abc + +from copy import copy +from functools import partial, singledispatch +from typing import Callable, Dict, List, Optional, Tuple, Union + +import aesara.tensor as at + +from aesara.gradient import DisconnectedType, jacobian +from aesara.graph.basic import Apply, Node, Variable +from aesara.graph.features import AlreadyThere, Feature +from aesara.graph.fg import FunctionGraph +from aesara.graph.op import Op +from aesara.graph.rewriting.basic import GraphRewriter, in2out, node_rewriter +from aesara.scalar import Add, Exp, Log, Mul +from aesara.tensor.elemwise import Elemwise +from aesara.tensor.rewriting.basic import ( + register_specialize, + register_stabilize, + register_useless, +) +from aesara.tensor.var import TensorVariable + +from pymc.logprob.abstract import ( + MeasurableElemwise, + MeasurableVariable, + _get_measurable_outputs, + _logprob, + assign_custom_measurable_outputs, + logprob, +) +from pymc.logprob.rewriting import PreserveRVMappings, measurable_ir_rewrites_db +from pymc.logprob.utils import walk_model + + +@singledispatch +def _default_transformed_rv( + op: Op, + node: Node, +) -> Optional[Apply]: + """Create a node for a transformed log-probability of a `MeasurableVariable`. + + This function dispatches on the type of `op`. If you want to implement + new transforms for a `MeasurableVariable`, register a function on this + dispatcher. + + """ + return None + + +class TransformedVariable(Op): + """A no-op that identifies a transform and its un-transformed input.""" + + view_map = {0: [0]} + + def make_node(self, tran_value: TensorVariable, value: TensorVariable): + return Apply(self, [tran_value, value], [tran_value.type()]) + + def perform(self, node, inputs, outputs): + raise NotImplementedError("These `Op`s should be removed from graphs used for computation.") + + def connection_pattern(self, node): + return [[True], [False]] + + def infer_shape(self, fgraph, node, input_shapes): + return [input_shapes[0]] + + def grad(self, args, g_outs): + return g_outs[0], DisconnectedType()() + + +transformed_variable = TransformedVariable() + + +@register_specialize +@register_stabilize +@register_useless +@node_rewriter([TransformedVariable]) +def remove_TransformedVariables(fgraph, node): + if isinstance(node.op, TransformedVariable): + return [node.inputs[0]] + + +class RVTransform(abc.ABC): + @abc.abstractmethod + def forward(self, value: TensorVariable, *inputs: Variable) -> TensorVariable: + """Apply the transformation.""" + + @abc.abstractmethod + def backward(self, value: TensorVariable, *inputs: Variable) -> TensorVariable: + """Invert the transformation.""" + + def log_jac_det(self, value: TensorVariable, *inputs) -> TensorVariable: + """Construct the log of the absolute value of the Jacobian determinant.""" + # jac = at.reshape( + # gradient(at.sum(self.backward(value, *inputs)), [value]), value.shape + # ) + # return at.log(at.abs(jac)) + phi_inv = self.backward(value, *inputs) + return at.log(at.abs(at.nlinalg.det(at.atleast_2d(jacobian(phi_inv, [value])[0])))) + + +class DefaultTransformSentinel: + pass + + +DEFAULT_TRANSFORM = DefaultTransformSentinel() + + +@node_rewriter(tracks=None) +def transform_values(fgraph: FunctionGraph, node: Node) -> Optional[List[Node]]: + """Apply transforms to value variables. + + It is assumed that the input value variables correspond to forward + transformations, usually chosen in such a way that the values are + unconstrained on the real line. + + For example, if ``Y = halfnormal(...)``, we assume the respective value + variable is specified on the log scale and back-transform it to obtain + ``Y`` on the natural scale. + """ + + rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None) + values_to_transforms = getattr(fgraph, "values_to_transforms", None) + + if rv_map_feature is None or values_to_transforms is None: + return None # pragma: no cover + + try: + rv_var = node.default_output() + rv_var_out_idx = node.outputs.index(rv_var) + except ValueError: + return None + + value_var = rv_map_feature.rv_values.get(rv_var, None) + if value_var is None: + return None + + transform = values_to_transforms.get(value_var, None) + + if transform is None: + return None + elif transform is DEFAULT_TRANSFORM: + trans_node = _default_transformed_rv(node.op, node) + if trans_node is None: + return None + transform = trans_node.op.transform + else: + new_op = _create_transformed_rv_op(node.op, transform) + # Create a new `Apply` node and outputs + trans_node = node.clone() + trans_node.op = new_op + trans_node.outputs[rv_var_out_idx].name = node.outputs[rv_var_out_idx].name + + # We now assume that the old value variable represents the *transformed space*. + # This means that we need to replace all instance of the old value variable + # with "inversely/un-" transformed versions of itself. + new_value_var = transformed_variable( + transform.backward(value_var, *trans_node.inputs), value_var + ) + if value_var.name and getattr(transform, "name", None): + new_value_var.name = f"{value_var.name}_{transform.name}" + + rv_map_feature.update_rv_maps(rv_var, new_value_var, trans_node.outputs[rv_var_out_idx]) + + return trans_node.outputs + + +class TransformValuesMapping(Feature): + r"""A `Feature` that maintains a map between value variables and their transforms.""" + + def __init__(self, values_to_transforms): + self.values_to_transforms = values_to_transforms + + def on_attach(self, fgraph): + if hasattr(fgraph, "values_to_transforms"): + raise AlreadyThere() + + fgraph.values_to_transforms = self.values_to_transforms + + +class TransformValuesRewrite(GraphRewriter): + r"""Transforms value variables according to a map and/or per-`RandomVariable` defaults.""" + + default_transform_rewrite = in2out(transform_values, ignore_newtrees=True) + + def __init__( + self, + values_to_transforms: Dict[ + TensorVariable, Union[RVTransform, DefaultTransformSentinel, None] + ], + ): + """ + Parameters + ========== + values_to_transforms + Mapping between value variables and their transformations. Each + value variable can be assigned one of `RVTransform`, + ``DEFAULT_TRANSFORM``, or ``None``. If a transform is not specified + for a specific value variable it will not be transformed. + + """ + + self.values_to_transforms = values_to_transforms + + def add_requirements(self, fgraph): + values_transforms_feature = TransformValuesMapping(self.values_to_transforms) + fgraph.attach_feature(values_transforms_feature) + + def apply(self, fgraph: FunctionGraph): + return self.default_transform_rewrite.rewrite(fgraph) + + +class MeasurableTransform(MeasurableElemwise): + """A placeholder used to specify a log-likelihood for a transformed measurable variable""" + + valid_scalar_types = (Exp, Log, Add, Mul) + + # Cannot use `transform` as name because it would clash with the property added by + # the `TransformValuesRewrite` + transform_elemwise: RVTransform + measurable_input_idx: int + + def __init__(self, *args, transform: RVTransform, measurable_input_idx: int, **kwargs): + self.transform_elemwise = transform + self.measurable_input_idx = measurable_input_idx + super().__init__(*args, **kwargs) + + +@_get_measurable_outputs.register(MeasurableTransform) +def _get_measurable_outputs_Transform(op, node): + return [node.default_output()] + + +@_logprob.register(MeasurableTransform) +def measurable_transform_logprob(op: MeasurableTransform, values, *inputs, **kwargs): + """Compute the log-probability graph for a `MeasurabeTransform`.""" + # TODO: Could other rewrites affect the order of inputs? + (value,) = values + other_inputs = list(inputs) + measurable_input = other_inputs.pop(op.measurable_input_idx) + + # The value variable must still be back-transformed to be on the natural support of + # the respective measurable input. + backward_value = op.transform_elemwise.backward(value, *other_inputs) + input_logprob = logprob(measurable_input, backward_value, **kwargs) + + jacobian = op.transform_elemwise.log_jac_det(value, *other_inputs) + + return input_logprob + jacobian + + +@node_rewriter([Elemwise]) +def find_measurable_transforms(fgraph: FunctionGraph, node: Node) -> Optional[List[Node]]: + """Find measurable transformations from Elemwise operators.""" + scalar_op = node.op.scalar_op + if not isinstance(scalar_op, MeasurableTransform.valid_scalar_types): + return None + + # Node was already converted + if isinstance(node.op, MeasurableVariable): + return None # pragma: no cover + + rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, "preserve_rv_mappings", None) + if rv_map_feature is None: + return None # pragma: no cover + + # Check that we have a single source of measurement + measurable_inputs = [ + inp + for idx, inp in enumerate(node.inputs) + if inp.owner + and isinstance(inp.owner.op, MeasurableVariable) + and inp not in rv_map_feature.rv_values + ] + + if len(measurable_inputs) != 1: + return None + + measurable_input: TensorVariable = measurable_inputs[0] + + # Do not apply rewrite to discrete variables + if measurable_input.type.dtype.startswith("int"): + return None + + # Check that other inputs are not potentially measurable, in which case this rewrite + # would be invalid + other_inputs = tuple(inp for inp in node.inputs if inp is not measurable_input) + if any( + ancestor_node + for ancestor_node in walk_model( + other_inputs, + walk_past_rvs=False, + stop_at_vars=set(rv_map_feature.rv_values), + ) + if ( + ancestor_node.owner + and isinstance(ancestor_node.owner.op, MeasurableVariable) + and ancestor_node not in rv_map_feature.rv_values + ) + ): + return None + + # Make base_measure outputs unmeasurable + # This seems to be the only thing preventing nested rewrites from being erased + measurable_input = assign_custom_measurable_outputs(measurable_input.owner) + + measurable_input_idx = 0 + transform_inputs: Tuple[TensorVariable, ...] = (measurable_input,) + transform: RVTransform + if isinstance(scalar_op, Exp): + transform = ExpTransform() + elif isinstance(scalar_op, Log): + transform = LogTransform() + elif isinstance(scalar_op, Add): + transform_inputs = (measurable_input, at.add(*other_inputs)) + transform = LocTransform( + transform_args_fn=lambda *inputs: inputs[-1], + ) + else: + transform_inputs = (measurable_input, at.mul(*other_inputs)) + transform = ScaleTransform( + transform_args_fn=lambda *inputs: inputs[-1], + ) + + transform_op = MeasurableTransform( + scalar_op=scalar_op, + transform=transform, + measurable_input_idx=measurable_input_idx, + ) + transform_out = transform_op.make_node(*transform_inputs).default_output() + transform_out.name = node.outputs[0].name + + return [transform_out] + + +measurable_ir_rewrites_db.register( + "find_measurable_transforms", + find_measurable_transforms, + 0, + "basic", + "transform", +) + + +class LocTransform(RVTransform): + name = "loc" + + def __init__(self, transform_args_fn): + self.transform_args_fn = transform_args_fn + + def forward(self, value, *inputs): + loc = self.transform_args_fn(*inputs) + return value + loc + + def backward(self, value, *inputs): + loc = self.transform_args_fn(*inputs) + return value - loc + + def log_jac_det(self, value, *inputs): + return at.zeros_like(value) + + +class ScaleTransform(RVTransform): + name = "scale" + + def __init__(self, transform_args_fn): + self.transform_args_fn = transform_args_fn + + def forward(self, value, *inputs): + scale = self.transform_args_fn(*inputs) + return value * scale + + def backward(self, value, *inputs): + scale = self.transform_args_fn(*inputs) + return value / scale + + def log_jac_det(self, value, *inputs): + scale = self.transform_args_fn(*inputs) + return -at.log(at.abs(scale)) + + +class LogTransform(RVTransform): + name = "log" + + def forward(self, value, *inputs): + return at.log(value) + + def backward(self, value, *inputs): + return at.exp(value) + + def log_jac_det(self, value, *inputs): + return value + + +class ExpTransform(RVTransform): + name = "exp" + + def forward(self, value, *inputs): + return at.exp(value) + + def backward(self, value, *inputs): + return at.log(value) + + def log_jac_det(self, value, *inputs): + return -at.log(value) + + +class IntervalTransform(RVTransform): + name = "interval" + + def __init__(self, args_fn: Callable[..., Tuple[Optional[Variable], Optional[Variable]]]): + """ + + Parameters + ========== + args_fn + Function that expects inputs of RandomVariable and returns the lower + and upper bounds for the interval transformation. If one of these is + None, the RV is considered to be unbounded on the respective edge. + """ + self.args_fn = args_fn + + def forward(self, value, *inputs): + a, b = self.args_fn(*inputs) + + if a is not None and b is not None: + return at.log(value - a) - at.log(b - value) + elif a is not None: + return at.log(value - a) + elif b is not None: + return at.log(b - value) + else: + raise ValueError("Both edges of IntervalTransform cannot be None") + + def backward(self, value, *inputs): + a, b = self.args_fn(*inputs) + + if a is not None and b is not None: + sigmoid_x = at.sigmoid(value) + return sigmoid_x * b + (1 - sigmoid_x) * a + elif a is not None: + return at.exp(value) + a + elif b is not None: + return b - at.exp(value) + else: + raise ValueError("Both edges of IntervalTransform cannot be None") + + def log_jac_det(self, value, *inputs): + a, b = self.args_fn(*inputs) + + if a is not None and b is not None: + s = at.softplus(-value) + return at.log(b - a) - 2 * s - value + elif a is None and b is None: + raise ValueError("Both edges of IntervalTransform cannot be None") + else: + return value + + +class LogOddsTransform(RVTransform): + name = "logodds" + + def backward(self, value, *inputs): + return at.expit(value) + + def forward(self, value, *inputs): + return at.log(value / (1 - value)) + + def log_jac_det(self, value, *inputs): + sigmoid_value = at.sigmoid(value) + return at.log(sigmoid_value) + at.log1p(-sigmoid_value) + + +class SimplexTransform(RVTransform): + name = "simplex" + + def forward(self, value, *inputs): + log_value = at.log(value) + shift = at.sum(log_value, -1, keepdims=True) / value.shape[-1] + return log_value[..., :-1] - shift + + def backward(self, value, *inputs): + value = at.concatenate([value, -at.sum(value, -1, keepdims=True)], axis=-1) + exp_value_max = at.exp(value - at.max(value, -1, keepdims=True)) + return exp_value_max / at.sum(exp_value_max, -1, keepdims=True) + + def log_jac_det(self, value, *inputs): + N = value.shape[-1] + 1 + sum_value = at.sum(value, -1, keepdims=True) + value_sum_expanded = value + sum_value + value_sum_expanded = at.concatenate([value_sum_expanded, at.zeros(sum_value.shape)], -1) + logsumexp_value_expanded = at.logsumexp(value_sum_expanded, -1, keepdims=True) + res = at.log(N) + (N * sum_value) - (N * logsumexp_value_expanded) + return at.sum(res, -1) + + +class CircularTransform(RVTransform): + name = "circular" + + def backward(self, value, *inputs): + return at.arctan2(at.sin(value), at.cos(value)) + + def forward(self, value, *inputs): + return at.as_tensor_variable(value) + + def log_jac_det(self, value, *inputs): + return at.zeros(value.shape) + + +class ChainedTransform(RVTransform): + name = "chain" + + def __init__(self, transform_list, base_op): + self.transform_list = transform_list + self.base_op = base_op + + def forward(self, value, *inputs): + for transform in self.transform_list: + value = transform.forward(value, *inputs) + return value + + def backward(self, value, *inputs): + for transform in reversed(self.transform_list): + value = transform.backward(value, *inputs) + return value + + def log_jac_det(self, value, *inputs): + value = at.as_tensor_variable(value) + det_list = [] + ndim0 = value.ndim + for transform in reversed(self.transform_list): + det_ = transform.log_jac_det(value, *inputs) + det_list.append(det_) + ndim0 = min(ndim0, det_.ndim) + value = transform.backward(value, *inputs) + # match the shape of the smallest jacobian_det + det = 0.0 + for det_ in det_list: + if det_.ndim > ndim0: + det += det_.sum(axis=-1) + else: + det += det_ + return det + + +def _create_transformed_rv_op( + rv_op: Op, + transform: RVTransform, + *, + default: bool = False, + cls_dict_extra: Optional[Dict] = None, +) -> Op: + """Create a new transformed variable instance given a base `RandomVariable` `Op`. + + This will essentially copy the `type` of the given `Op` instance, create a + copy of said `Op` instance and change it's `type` to the new one. + + In the end, we have an `Op` instance that will map to a `RVTransform` while + also behaving exactly as it did before. + + Parameters + ========== + rv_op + The `RandomVariable` for which we want to construct a `TransformedRV`. + transform + The `RVTransform` for `rv_op`. + default + If ``False`` do not make `transform` the default transform for `rv_op`. + cls_dict_extra + Additional class members to add to the constructed `TransformedRV`. + + """ + + trans_name = getattr(transform, "name", "transformed") + rv_op_type = type(rv_op) + rv_type_name = rv_op_type.__name__ + cls_dict = rv_op_type.__dict__.copy() + rv_name = cls_dict.get("name", "") + if rv_name: + cls_dict["name"] = f"{rv_name}_{trans_name}" + cls_dict["transform"] = transform + + if cls_dict_extra is not None: + cls_dict.update(cls_dict_extra) + + new_op_type = type(f"Transformed{rv_type_name}", (rv_op_type,), cls_dict) + + MeasurableVariable.register(new_op_type) + + @_logprob.register(new_op_type) + def transformed_logprob(op, values, *inputs, use_jacobian=True, **kwargs): + """Compute the log-likelihood graph for a `TransformedRV`. + + We assume that the value variable was back-transformed to be on the natural + support of the respective random variable. + """ + (value,) = values + + logprob = _logprob(rv_op, values, *inputs, **kwargs) + + if use_jacobian: + assert isinstance(value.owner.op, TransformedVariable) + original_forward_value = value.owner.inputs[1] + jacobian = op.transform.log_jac_det(original_forward_value, *inputs) + logprob += jacobian + + return logprob + + transform_op = rv_op_type if default else new_op_type + + @_default_transformed_rv.register(transform_op) + def class_transformed_rv(op, node): + new_op = new_op_type() + res = new_op.make_node(*node.inputs) + res.outputs[1].name = node.outputs[1].name + return res + + new_op = copy(rv_op) + new_op.__class__ = new_op_type + + return new_op + + +create_default_transformed_rv_op = partial(_create_transformed_rv_op, default=True) + + +TransformedUniformRV = create_default_transformed_rv_op( + at.random.uniform, + # inputs[3] = lower; inputs[4] = upper + IntervalTransform(lambda *inputs: (inputs[3], inputs[4])), +) +TransformedParetoRV = create_default_transformed_rv_op( + at.random.pareto, + # inputs[3] = alpha + IntervalTransform(lambda *inputs: (inputs[3], None)), +) +TransformedTriangularRV = create_default_transformed_rv_op( + at.random.triangular, + # inputs[3] = lower; inputs[5] = upper + IntervalTransform(lambda *inputs: (inputs[3], inputs[5])), +) +TransformedHalfNormalRV = create_default_transformed_rv_op( + at.random.halfnormal, + # inputs[3] = loc + IntervalTransform(lambda *inputs: (inputs[3], None)), +) +TransformedWaldRV = create_default_transformed_rv_op( + at.random.wald, + LogTransform(), +) +TransformedExponentialRV = create_default_transformed_rv_op( + at.random.exponential, + LogTransform(), +) +TransformedLognormalRV = create_default_transformed_rv_op( + at.random.lognormal, + LogTransform(), +) +TransformedHalfCauchyRV = create_default_transformed_rv_op( + at.random.halfcauchy, + LogTransform(), +) +TransformedGammaRV = create_default_transformed_rv_op( + at.random.gamma, + LogTransform(), +) +TransformedInvGammaRV = create_default_transformed_rv_op( + at.random.invgamma, + LogTransform(), +) +TransformedChiSquareRV = create_default_transformed_rv_op( + at.random.chisquare, + LogTransform(), +) +TransformedWeibullRV = create_default_transformed_rv_op( + at.random.weibull, + LogTransform(), +) +TransformedBetaRV = create_default_transformed_rv_op( + at.random.beta, + LogOddsTransform(), +) +TransformedVonMisesRV = create_default_transformed_rv_op( + at.random.vonmises, + CircularTransform(), +) +TransformedDirichletRV = create_default_transformed_rv_op( + at.random.dirichlet, + SimplexTransform(), +) diff --git a/pymc/logprob/utils.py b/pymc/logprob/utils.py new file mode 100644 index 00000000000..4f7e0058c94 --- /dev/null +++ b/pymc/logprob/utils.py @@ -0,0 +1,254 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import warnings + +from typing import Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple + +import numpy as np + +from aesara import tensor as at +from aesara.graph import Apply, Op +from aesara.graph.basic import Constant, clone_get_equiv, graph_inputs, walk +from aesara.graph.fg import FunctionGraph +from aesara.link.c.type import CType +from aesara.raise_op import CheckAndRaise +from aesara.tensor.var import TensorVariable + +from pymc.logprob.abstract import MeasurableVariable, _logprob + + +def walk_model( + graphs: Iterable[TensorVariable], + walk_past_rvs: bool = False, + stop_at_vars: Optional[Set[TensorVariable]] = None, + expand_fn: Callable[[TensorVariable], List[TensorVariable]] = lambda var: [], +) -> Generator[TensorVariable, None, None]: + """Walk model graphs and yield their nodes. + + By default, these walks will not go past ``MeasurableVariable`` nodes. + + Parameters + ========== + graphs + The graphs to walk. + walk_past_rvs + If ``True``, the walk will not terminate at ``MeasurableVariable``s. + stop_at_vars + A list of variables at which the walk will terminate. + expand_fn + A function that returns the next variable(s) to be traversed. + """ + if stop_at_vars is None: + stop_at_vars = set() + + def expand(var: TensorVariable, stop_at_vars=stop_at_vars) -> List[TensorVariable]: + new_vars = expand_fn(var) + + if ( + var.owner + and (walk_past_rvs or not isinstance(var.owner.op, MeasurableVariable)) + and (var not in stop_at_vars) + ): + new_vars.extend(reversed(var.owner.inputs)) + + return new_vars + + yield from walk(graphs, expand, False) + + +def replace_rvs_in_graphs( + graphs: Iterable[TensorVariable], + replacement_fn: Callable[ + [TensorVariable, Dict[TensorVariable, TensorVariable]], + Dict[TensorVariable, TensorVariable], + ], + initial_replacements: Optional[Dict[TensorVariable, TensorVariable]] = None, + **kwargs, +) -> Tuple[TensorVariable, Dict[TensorVariable, TensorVariable]]: + """Replace random variables in graphs. + + This will *not* recompute test values. + + Parameters + ========== + graphs + The graphs in which random variables are to be replaced. + + Returns + ======= + A ``tuple`` containing the transformed graphs and a ``dict`` of the + replacements that were made. + """ + replacements = {} + if initial_replacements: + replacements.update(initial_replacements) + + def expand_replace(var: TensorVariable) -> List[TensorVariable]: + new_nodes: List[TensorVariable] = [] + if var.owner and isinstance(var.owner.op, MeasurableVariable): + new_nodes.extend(replacement_fn(var, replacements)) + return new_nodes + + for var in walk_model(graphs, expand_fn=expand_replace, **kwargs): + pass + + if replacements: + inputs = [i for i in graph_inputs(graphs) if not isinstance(i, Constant)] + equiv = {k: k for k in replacements.keys()} + equiv = clone_get_equiv(inputs, graphs, False, False, equiv) + + fg = FunctionGraph( + [equiv[i] for i in inputs], + [equiv[o] for o in graphs], + clone=False, + ) + + fg.replace_all(replacements.items(), import_missing=True) + + graphs = list(fg.outputs) + + return graphs, replacements + + +def rvs_to_value_vars( + graphs: Iterable[TensorVariable], + initial_replacements: Optional[Dict[TensorVariable, TensorVariable]] = None, + **kwargs, +) -> Tuple[TensorVariable, Dict[TensorVariable, TensorVariable]]: + """Replace random variables in graphs with their value variables. + + This will *not* recompute test values in the resulting graphs. + + Parameters + ========== + graphs + The graphs in which to perform the replacements. + initial_replacements + A ``dict`` containing the initial replacements to be made. + + """ + + def replace_fn(var, replacements): + rv_value_var = replacements.get(var, None) + if rv_value_var is not None: + replacements[var] = rv_value_var + # In case the value variable is itself a graph, we walk it for + # potential replacements + return [rv_value_var] + return [] + + return replace_rvs_in_graphs(graphs, replace_fn, initial_replacements, **kwargs) + + +def convert_indices(indices, entry): + if indices and isinstance(entry, CType): + rval = indices.pop(0) + return rval + elif isinstance(entry, slice): + return slice( + convert_indices(indices, entry.start), + convert_indices(indices, entry.stop), + convert_indices(indices, entry.step), + ) + else: + return entry + + +def indices_from_subtensor(idx_list, indices): + """Compute a useable index tuple from the inputs of a ``*Subtensor**`` ``Op``.""" + return tuple( + tuple(convert_indices(list(indices), idx) for idx in idx_list) if idx_list else indices + ) + + +class ParameterValueError(ValueError): + """Exception for invalid parameters values in logprob graphs""" + + +class CheckParameterValue(CheckAndRaise): + """Implements a parameter value check in a logprob graph. + + Raises `ParameterValueError` if the check is not True. + """ + + def __init__(self, msg=""): + super().__init__(ParameterValueError, msg) + + def __str__(self): + return f"Check{{{self.msg}}}" + + +class DiracDelta(Op): + """An `Op` that represents a Dirac-delta distribution.""" + + __props__ = ("rtol", "atol") + + def __init__(self, rtol=1e-5, atol=1e-8): + self.rtol = rtol + self.atol = atol + + def make_node(self, x): + x = at.as_tensor(x) + return Apply(self, [x], [x.type()]) + + def do_constant_folding(self, fgraph, node): + # Without this, the `Op` would be removed from the graph during + # canonicalization + return False + + def perform(self, node, inp, out): + (x,) = inp + (z,) = out + warnings.warn("DiracDelta is a dummy Op that shouldn't be used in a compiled graph") + z[0] = x + + def infer_shape(self, fgraph, node, input_shapes): + return input_shapes + + +MeasurableVariable.register(DiracDelta) + + +dirac_delta = DiracDelta() + + +@_logprob.register(DiracDelta) +def diracdelta_logprob(op, values, *inputs, **kwargs): + (values,) = values + (const_value,) = inputs + values, const_value = at.broadcast_arrays(values, const_value) + return at.switch(at.isclose(values, const_value, rtol=op.rtol, atol=op.atol), 0.0, -np.inf) diff --git a/pymc/sampling/jax.py b/pymc/sampling/jax.py index 880e6c5cdb1..9a8b437b54c 100644 --- a/pymc/sampling/jax.py +++ b/pymc/sampling/jax.py @@ -20,7 +20,6 @@ import jax import numpy as np -from aeppl.logprob import CheckParameterValue from aesara.compile import SharedVariable, Supervisor, mode from aesara.graph.basic import clone_replace, graph_inputs from aesara.graph.fg import FunctionGraph @@ -32,6 +31,7 @@ from pymc import Model, modelcontext from pymc.backends.arviz import find_constants, find_observations +from pymc.logprob.utils import CheckParameterValue from pymc.util import RandomSeed, _get_seeds_per_chain, get_default_varnames warnings.warn("This module is experimental.") diff --git a/pymc/tests/distributions/test_continuous.py b/pymc/tests/distributions/test_continuous.py index 3a6de9ed203..7cd9e7219af 100644 --- a/pymc/tests/distributions/test_continuous.py +++ b/pymc/tests/distributions/test_continuous.py @@ -22,15 +22,15 @@ import scipy.special as sp import scipy.stats as st -from aeppl.logprob import ParameterValueError from aesara.compile.mode import Mode import pymc as pm from pymc.aesaraf import floatX from pymc.distributions import logcdf, logp -from pymc.distributions.continuous import get_tau_sigma, interpolated +from pymc.distributions.continuous import Normal, get_tau_sigma, interpolated from pymc.distributions.dist_math import clipped_beta_rvs +from pymc.logprob.utils import ParameterValueError from pymc.tests.distributions.util import ( BaseTestDistributionRandom, Circ, @@ -49,6 +49,7 @@ seeded_scipy_distribution_builder, ) from pymc.tests.helpers import select_by_precision +from pymc.tests.logprob.utils import create_aesara_params, scipy_logprob_tester try: from polyagamma import polyagamma_cdf, polyagamma_pdf, random_polyagamma @@ -2248,3 +2249,22 @@ def dist(cls, **kwargs): extra_args={"rng": aesara.shared(rng)}, ref_rand=ref_rand, ) + + +class TestICDF: + @pytest.mark.parametrize( + "dist_params, obs, size", + [ + ((0, 1), np.array([-0.5, 0, 0.3, 0.5, 1, 1.5], dtype=np.float64), ()), + ((-1, 20), np.array([-0.5, 0, 0.3, 0.5, 1, 1.5], dtype=np.float64), ()), + ((-1, 20), np.array([-0.5, 0, 0.3, 0.5, 1, 1.5], dtype=np.float64), (2, 3)), + ], + ) + def test_normal_icdf(self, dist_params, obs, size): + + dist_params_at, obs_at, size_at = create_aesara_params(dist_params, obs, size) + dist_params = dict(zip(dist_params_at, dist_params)) + + x = Normal.dist(*dist_params_at, size=size_at) + + scipy_logprob_tester(x, obs, dist_params, test_fn=st.norm.ppf, test="icdf") diff --git a/pymc/tests/distributions/test_discrete.py b/pymc/tests/distributions/test_discrete.py index 7f326c79fde..30cac3cbb7b 100644 --- a/pymc/tests/distributions/test_discrete.py +++ b/pymc/tests/distributions/test_discrete.py @@ -23,7 +23,6 @@ import scipy.special as sp import scipy.stats as st -from aeppl.logprob import ParameterValueError from aesara.compile.mode import Mode from aesara.tensor import TensorVariable @@ -31,7 +30,8 @@ from pymc.aesaraf import floatX from pymc.distributions import logcdf, logp -from pymc.distributions.discrete import _OrderedLogistic, _OrderedProbit +from pymc.distributions.discrete import Geometric, _OrderedLogistic, _OrderedProbit +from pymc.logprob.utils import ParameterValueError from pymc.tests.distributions.util import ( BaseTestDistributionRandom, Bool, @@ -56,6 +56,7 @@ seeded_numpy_distribution_builder, seeded_scipy_distribution_builder, ) +from pymc.tests.logprob.utils import create_aesara_params, scipy_logprob_tester from pymc.vartypes import discrete_types @@ -1147,3 +1148,30 @@ def test_shape_inputs(self, eta, cutpoints, sigma, expected): ) p = categorical.owner.inputs[3].eval() assert p.shape == expected + + +class TestICDF: + @pytest.mark.parametrize( + "dist_params, obs, size", + [ + ((0.1,), np.array([-0.5, 0, 0.1, 0.5, 0.9, 1.0, 1.5], dtype=np.int64), ()), + ((0.5,), np.array([-0.5, 0, 0.1, 0.5, 0.9, 1.0, 1.5], dtype=np.int64), (3, 2)), + ( + (np.array([0.0, 0.2, 0.5, 1.0]),), + np.array([0.7, 0.7, 0.7, 0.7], dtype=np.int64), + (), + ), + ], + ) + def test_geometric_icdf(self, dist_params, obs, size): + + dist_params_at, obs_at, size_at = create_aesara_params(dist_params, obs, size) + dist_params = dict(zip(dist_params_at, dist_params)) + + x = Geometric.dist(*dist_params_at, size=size_at) + + def scipy_geom_icdf(value, p): + # Scipy ppf returns floats + return st.geom.ppf(value, p).astype(value.dtype) + + scipy_logprob_tester(x, obs, dist_params, test_fn=scipy_geom_icdf, test="icdf") diff --git a/pymc/tests/distributions/test_dist_math.py b/pymc/tests/distributions/test_dist_math.py index 528d4b3ea67..cecfa07853f 100644 --- a/pymc/tests/distributions/test_dist_math.py +++ b/pymc/tests/distributions/test_dist_math.py @@ -18,7 +18,6 @@ import pytest import scipy.special -from aeppl.logprob import ParameterValueError from aesara import config, function from aesara.tensor.random.basic import multinomial from scipy import interpolate, stats @@ -37,6 +36,7 @@ incomplete_beta, multigammaln, ) +from pymc.logprob.utils import ParameterValueError from pymc.tests.checks import close_to from pymc.tests.helpers import verify_grad diff --git a/pymc/tests/distributions/test_distribution.py b/pymc/tests/distributions/test_distribution.py index ed0afeefe56..80b91cb3604 100644 --- a/pymc/tests/distributions/test_distribution.py +++ b/pymc/tests/distributions/test_distribution.py @@ -21,7 +21,6 @@ import pytest import scipy.stats as st -from aeppl.abstract import get_measurable_outputs from aesara.tensor import TensorVariable import pymc as pm @@ -29,6 +28,7 @@ from pymc.distributions import DiracDelta, Flat, MvNormal, MvStudentT, logp from pymc.distributions.distribution import SymbolicRandomVariable, _moment, moment from pymc.distributions.shape_utils import change_dist_size, to_tuple +from pymc.logprob.abstract import get_measurable_outputs from pymc.tests.distributions.util import assert_moment_is_expected from pymc.util import _FutureWarningValidatingScratchpad @@ -352,7 +352,7 @@ class TestSymbolicRV(SymbolicRandomVariable): logp(x, 0) class TestInlinedSymbolicRV(SymbolicRandomVariable): - inline_aeppl = True + inline_logprob = True x_inline = TestInlinedSymbolicRV([], [Flat.dist()], ndim_supp=0)() assert np.isclose(logp(x_inline, 0).eval(), 0) diff --git a/pymc/tests/distributions/test_logprob.py b/pymc/tests/distributions/test_logprob.py index 8ca6ee97f43..560055b04d7 100644 --- a/pymc/tests/distributions/test_logprob.py +++ b/pymc/tests/distributions/test_logprob.py @@ -19,7 +19,6 @@ import pytest import scipy.stats.distributions as sp -from aeppl.abstract import get_measurable_outputs from aesara.graph.basic import ancestors from aesara.tensor.random.op import RandomVariable from aesara.tensor.subtensor import ( @@ -50,6 +49,7 @@ logcdf, logp, ) +from pymc.logprob.abstract import get_measurable_outputs from pymc.model import Model, Potential from pymc.tests.helpers import assert_no_rvs, select_by_precision diff --git a/pymc/tests/distributions/test_mixture.py b/pymc/tests/distributions/test_mixture.py index 94df80f8955..50d66ef7cb5 100644 --- a/pymc/tests/distributions/test_mixture.py +++ b/pymc/tests/distributions/test_mixture.py @@ -21,7 +21,6 @@ import pytest import scipy.stats as st -from aeppl.transforms import IntervalTransform, LogTransform, SimplexTransform from aesara import tensor as at from aesara.tensor import TensorVariable from aesara.tensor.random.op import RandomVariable @@ -53,6 +52,7 @@ from pymc.distributions.mixture import MixtureTransformWarning from pymc.distributions.shape_utils import change_dist_size, to_tuple from pymc.distributions.transforms import _default_transform +from pymc.logprob.transforms import IntervalTransform, LogTransform, SimplexTransform from pymc.math import expand_packed_triangular from pymc.model import Model from pymc.sampling.forward import ( diff --git a/pymc/tests/distributions/test_multivariate.py b/pymc/tests/distributions/test_multivariate.py index d2d1f3cea59..5ab9dc6b318 100644 --- a/pymc/tests/distributions/test_multivariate.py +++ b/pymc/tests/distributions/test_multivariate.py @@ -25,7 +25,6 @@ import scipy.special as sp import scipy.stats as st -from aeppl.logprob import ParameterValueError from aesara.tensor import TensorVariable from aesara.tensor.random.utils import broadcast_params @@ -40,6 +39,7 @@ quaddist_matrix, ) from pymc.distributions.shape_utils import change_dist_size, to_tuple +from pymc.logprob.utils import ParameterValueError from pymc.math import kronecker from pymc.sampling.forward import draw from pymc.tests.distributions.util import ( diff --git a/pymc/tests/distributions/test_truncated.py b/pymc/tests/distributions/test_truncated.py index fee242658f6..c089e24e55f 100644 --- a/pymc/tests/distributions/test_truncated.py +++ b/pymc/tests/distributions/test_truncated.py @@ -4,8 +4,6 @@ import pytest import scipy -from aeppl.logprob import ParameterValueError, _icdf -from aeppl.transforms import IntervalTransform from aesara.tensor.random.basic import GeometricRV, NormalRV from pymc import Censored, Model, draw, find_MAP, logp @@ -14,6 +12,9 @@ from pymc.distributions.transforms import _default_transform from pymc.distributions.truncated import Truncated, TruncatedRV, _truncated from pymc.exceptions import TruncationError +from pymc.logprob.abstract import _icdf +from pymc.logprob.transforms import IntervalTransform +from pymc.logprob.utils import ParameterValueError from pymc.tests.distributions.util import assert_moment_is_expected diff --git a/pymc/tests/distributions/util.py b/pymc/tests/distributions/util.py index 0a501da4e8c..35bc8dcadad 100644 --- a/pymc/tests/distributions/util.py +++ b/pymc/tests/distributions/util.py @@ -13,7 +13,6 @@ import scipy.special as sp import scipy.stats as st -from aeppl.logprob import ParameterValueError from aesara.compile.mode import Mode import pymc as pm @@ -23,6 +22,7 @@ from pymc.distributions.logprob import _joint_logp from pymc.distributions.shape_utils import change_dist_size from pymc.initial_point import make_initial_point_fn +from pymc.logprob.utils import ParameterValueError from pymc.tests.helpers import SeededTest, select_by_precision diff --git a/pymc/tests/logprob/__init__.py b/pymc/tests/logprob/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pymc/tests/logprob/test_abstract.py b/pymc/tests/logprob/test_abstract.py new file mode 100644 index 00000000000..684cc3319a8 --- /dev/null +++ b/pymc/tests/logprob/test_abstract.py @@ -0,0 +1,149 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import re + +import aesara.tensor as at +import pytest + +from aesara.scalar import Exp, exp +from aesara.tensor.random.basic import NormalRV + +from pymc.logprob.abstract import ( + MeasurableElemwise, + MeasurableVariable, + UnmeasurableVariable, + _get_measurable_outputs, + assign_custom_measurable_outputs, + noop_measurable_outputs_fn, +) + + +def assert_equal_hash(classA, classB): + assert hash(classA) == hash(classA.id_obj) + assert hash(classB) == hash(classB.id_obj) + assert classA == classB + assert hash(classA) == hash(classB) + + +@pytest.mark.parametrize( + "op, id_obj, class_dict", + [ + (None, None, UnmeasurableVariable.__dict__), + (None, (1, 2), UnmeasurableVariable.__dict__), + ( + NormalRV, + (NormalRV, noop_measurable_outputs_fn), + UnmeasurableVariable.__dict__, + ), + ], +) +def test_unmeasurable_variable_class(op, id_obj, class_dict): + A_dict = class_dict.copy() + B_dict = class_dict.copy() + + if id_obj is not None: + A_dict["id_obj"] = id_obj + B_dict["id_obj"] = id_obj + + if op is None: + parent_classes = (UnmeasurableVariable,) + else: + parent_classes = (op, UnmeasurableVariable) + + A = type("A", parent_classes, A_dict) + B = type("B", parent_classes, B_dict) + + assert_equal_hash(A, B) + + +def test_unmeasurable_meta_hash_reassignment(): + A_dict = UnmeasurableVariable.__dict__.copy() + B_dict = UnmeasurableVariable.__dict__.copy() + + A_dict["id_obj"] = (1, 2) + B_dict["id_obj"] = (1, 3) + + A = type("A", (UnmeasurableVariable,), A_dict) + B = type("B", (UnmeasurableVariable,), B_dict) + + assert A != B + assert hash(A) != hash(B) + + A.id_obj = (1, 3) + + assert_equal_hash(A, B) + + +def test_assign_custom_measurable_outputs(): + srng = at.random.RandomStream(seed=2320) + + X_rv = srng.normal(-10.0, 0.1, name="X") + Y_rv = srng.normal(10.0, 0.1, name="Y") + + # manually checking assign_custom_measurable_outputs + unmeasurable_X = assign_custom_measurable_outputs(X_rv.owner).op + unmeasurable_Y = assign_custom_measurable_outputs(Y_rv.owner).op + + assert_equal_hash(unmeasurable_X.__class__, unmeasurable_Y.__class__) + assert unmeasurable_X.__class__.__name__.startswith("Unmeasurable") + assert unmeasurable_X.__class__ in _get_measurable_outputs.registry + + # passing unmeasurable_X into assign_custom_measurable_outputs does nothing + + unmeas_X_rv = unmeasurable_X(-5, 0.1, name="unmeas_X") + + unmeasurable_X2_node = assign_custom_measurable_outputs(unmeas_X_rv.owner) + unmeasurable_X2 = unmeasurable_X2_node.op + + assert unmeasurable_X2_node == unmeas_X_rv.owner + assert_equal_hash(unmeasurable_X.__class__, unmeasurable_X2.__class__) + + with pytest.raises(ValueError): + assign_custom_measurable_outputs(unmeas_X_rv.owner, lambda x: x) + + +def test_measurable_elemwise(): + # Default does not accept any scalar_op + with pytest.raises(TypeError, match=re.escape("scalar_op exp is not valid")): + MeasurableElemwise(exp) + + class TestMeasurableElemwise(MeasurableElemwise): + valid_scalar_types = (Exp,) + + measurable_exp_op = TestMeasurableElemwise(scalar_op=exp) + measurable_exp = measurable_exp_op(0.0) + assert isinstance(measurable_exp.owner.op, MeasurableVariable) diff --git a/pymc/tests/logprob/test_censoring.py b/pymc/tests/logprob/test_censoring.py new file mode 100644 index 00000000000..2353e8c0b31 --- /dev/null +++ b/pymc/tests/logprob/test_censoring.py @@ -0,0 +1,254 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest +import scipy as sp +import scipy.stats as st + +from pymc.logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.transforms import LogTransform, TransformValuesRewrite +from pymc.tests.helpers import assert_no_rvs + + +@aesara.config.change_flags(compute_test_value="raise") +def test_continuous_rv_clip(): + x_rv = at.random.normal(0.5, 1) + cens_x_rv = at.clip(x_rv, -2, 2) + + cens_x_vv = cens_x_rv.clone() + cens_x_vv.tag.test_value = 0 + + logp = joint_logprob({cens_x_rv: cens_x_vv}) + assert_no_rvs(logp) + + logp_fn = aesara.function([cens_x_vv], logp) + ref_scipy = st.norm(0.5, 1) + + assert logp_fn(-3) == -np.inf + assert logp_fn(3) == -np.inf + + assert np.isclose(logp_fn(-2), ref_scipy.logcdf(-2)) + assert np.isclose(logp_fn(2), ref_scipy.logsf(2)) + assert np.isclose(logp_fn(0), ref_scipy.logpdf(0)) + + +def test_discrete_rv_clip(): + x_rv = at.random.poisson(2) + cens_x_rv = at.clip(x_rv, 1, 4) + + cens_x_vv = cens_x_rv.clone() + + logp = joint_logprob({cens_x_rv: cens_x_vv}) + assert_no_rvs(logp) + + logp_fn = aesara.function([cens_x_vv], logp) + ref_scipy = st.poisson(2) + + assert logp_fn(0) == -np.inf + assert logp_fn(5) == -np.inf + + assert np.isclose(logp_fn(1), ref_scipy.logcdf(1)) + assert np.isclose(logp_fn(4), np.logaddexp(ref_scipy.logsf(4), ref_scipy.logpmf(4))) + assert np.isclose(logp_fn(2), ref_scipy.logpmf(2)) + + +def test_one_sided_clip(): + x_rv = at.random.normal(0, 1) + lb_cens_x_rv = at.clip(x_rv, -1, x_rv) + ub_cens_x_rv = at.clip(x_rv, x_rv, 1) + + lb_cens_x_vv = lb_cens_x_rv.clone() + ub_cens_x_vv = ub_cens_x_rv.clone() + + lb_logp = joint_logprob({lb_cens_x_rv: lb_cens_x_vv}) + ub_logp = joint_logprob({ub_cens_x_rv: ub_cens_x_vv}) + assert_no_rvs(lb_logp) + assert_no_rvs(ub_logp) + + logp_fn = aesara.function([lb_cens_x_vv, ub_cens_x_vv], [lb_logp, ub_logp]) + ref_scipy = st.norm(0, 1) + + assert np.all(np.array(logp_fn(-2, 2)) == -np.inf) + assert np.all(np.array(logp_fn(2, -2)) != -np.inf) + np.testing.assert_almost_equal(logp_fn(-1, 1), ref_scipy.logcdf(-1)) + np.testing.assert_almost_equal(logp_fn(1, -1), ref_scipy.logpdf(-1)) + + +def test_useless_clip(): + x_rv = at.random.normal(0.5, 1, size=3) + cens_x_rv = at.clip(x_rv, x_rv, x_rv) + + cens_x_vv = cens_x_rv.clone() + + logp = joint_logprob({cens_x_rv: cens_x_vv}, sum=False) + assert_no_rvs(logp) + + logp_fn = aesara.function([cens_x_vv], logp) + ref_scipy = st.norm(0.5, 1) + + np.testing.assert_allclose(logp_fn([-2, 0, 2]), ref_scipy.logpdf([-2, 0, 2])) + + +def test_random_clip(): + lb_rv = at.random.normal(0, 1, size=2) + x_rv = at.random.normal(0, 2) + cens_x_rv = at.clip(x_rv, lb_rv, [1, 1]) + + lb_vv = lb_rv.clone() + cens_x_vv = cens_x_rv.clone() + logp = joint_logprob({cens_x_rv: cens_x_vv, lb_rv: lb_vv}, sum=False) + assert_no_rvs(logp) + + logp_fn = aesara.function([lb_vv, cens_x_vv], logp) + res = logp_fn([0, -1], [-1, -1]) + assert res[0] == -np.inf + assert res[1] != -np.inf + + +def test_broadcasted_clip_constant(): + lb_rv = at.random.uniform(0, 1) + x_rv = at.random.normal(0, 2) + cens_x_rv = at.clip(x_rv, lb_rv, [1, 1]) + + lb_vv = lb_rv.clone() + cens_x_vv = cens_x_rv.clone() + + logp = joint_logprob({cens_x_rv: cens_x_vv, lb_rv: lb_vv}) + assert_no_rvs(logp) + + +def test_broadcasted_clip_random(): + lb_rv = at.random.normal(0, 1) + x_rv = at.random.normal(0, 2, size=2) + cens_x_rv = at.clip(x_rv, lb_rv, 1) + + lb_vv = lb_rv.clone() + cens_x_vv = cens_x_rv.clone() + + logp = joint_logprob({cens_x_rv: cens_x_vv, lb_rv: lb_vv}) + assert_no_rvs(logp) + + +def test_fail_base_and_clip_have_values(): + """Test failure when both base_rv and clipped_rv are given value vars""" + x_rv = at.random.normal(0, 1) + cens_x_rv = at.clip(x_rv, x_rv, 1) + cens_x_rv.name = "cens_x" + + x_vv = x_rv.clone() + cens_x_vv = cens_x_rv.clone() + with pytest.raises(RuntimeError, match="could not be derived: {cens_x}"): + factorized_joint_logprob({cens_x_rv: cens_x_vv, x_rv: x_vv}) + + +def test_fail_multiple_clip_single_base(): + """Test failure when multiple clipped_rvs share a single base_rv""" + base_rv = at.random.normal(0, 1) + cens_rv1 = at.clip(base_rv, -1, 1) + cens_rv1.name = "cens1" + cens_rv2 = at.clip(base_rv, -1, 1) + cens_rv2.name = "cens2" + + cens_vv1 = cens_rv1.clone() + cens_vv2 = cens_rv2.clone() + with pytest.raises(RuntimeError, match="could not be derived: {cens2}"): + factorized_joint_logprob({cens_rv1: cens_vv1, cens_rv2: cens_vv2}) + + +def test_deterministic_clipping(): + x_rv = at.random.normal(0, 1) + clip = at.clip(x_rv, 0, 0) + y_rv = at.random.normal(clip, 1) + + x_vv = x_rv.clone() + y_vv = y_rv.clone() + logp = joint_logprob({x_rv: x_vv, y_rv: y_vv}) + assert_no_rvs(logp) + + logp_fn = aesara.function([x_vv, y_vv], logp) + assert np.isclose( + logp_fn(-1, 1), + st.norm(0, 1).logpdf(-1) + st.norm(0, 1).logpdf(1), + ) + + +def test_clip_transform(): + x_rv = at.random.normal(0.5, 1) + cens_x_rv = at.clip(x_rv, 0, x_rv) + + cens_x_vv = cens_x_rv.clone() + + transform = TransformValuesRewrite({cens_x_vv: LogTransform()}) + logp = joint_logprob({cens_x_rv: cens_x_vv}, extra_rewrites=transform) + + cens_x_vv_testval = -1 + obs_logp = logp.eval({cens_x_vv: cens_x_vv_testval}) + exp_logp = sp.stats.norm(0.5, 1).logpdf(np.exp(cens_x_vv_testval)) + cens_x_vv_testval + + assert np.isclose(obs_logp, exp_logp) + + +@pytest.mark.parametrize("rounding_op", (at.round, at.floor, at.ceil)) +def test_rounding(rounding_op): + loc = 1 + scale = 2 + test_value = np.arange(-3, 4) + + x = at.random.normal(loc, scale, size=test_value.shape, name="x") + xr = rounding_op(x) + xr.name = "xr" + + xr_vv = xr.clone() + logp = joint_logprob({xr: xr_vv}, sum=False) + assert logp is not None + + x_sp = st.norm(loc, scale) + if rounding_op == at.round: + expected_logp = np.log(x_sp.cdf(test_value + 0.5) - x_sp.cdf(test_value - 0.5)) + elif rounding_op == at.floor: + expected_logp = np.log(x_sp.cdf(test_value + 1.0) - x_sp.cdf(test_value)) + elif rounding_op == at.ceil: + expected_logp = np.log(x_sp.cdf(test_value) - x_sp.cdf(test_value - 1.0)) + else: + raise NotImplementedError() + + assert np.allclose( + logp.eval({xr_vv: test_value}), + expected_logp, + ) diff --git a/pymc/tests/logprob/test_composite_logprob.py b/pymc/tests/logprob/test_composite_logprob.py new file mode 100644 index 00000000000..85ac92e7df1 --- /dev/null +++ b/pymc/tests/logprob/test_composite_logprob.py @@ -0,0 +1,213 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import scipy.stats as st + +from pymc.logprob import joint_logprob +from pymc.logprob.censoring import MeasurableClip +from pymc.logprob.rewriting import construct_ir_fgraph +from pymc.tests.helpers import assert_no_rvs + + +def test_scalar_clipped_mixture(): + x = at.clip(at.random.normal(loc=1), 0.5, 1.5) + x.name = "x" + y = at.random.beta(1, 2, name="y") + + comps = at.stack([x, y]) + comps.name = "comps" + idxs = at.random.bernoulli(0.4, name="idxs") + mix = comps[idxs] + mix.name = "mix" + + mix_vv = mix.clone() + mix_vv.name = "mix_val" + idxs_vv = idxs.clone() + idxs_vv.name = "idxs_val" + + logp = joint_logprob({idxs: idxs_vv, mix: mix_vv}) + + logp_fn = aesara.function([idxs_vv, mix_vv], logp) + assert logp_fn(0, 0.4) == -np.inf + assert np.isclose(logp_fn(0, 0.5), st.norm.logcdf(0.5, 1) + np.log(0.6)) + assert np.isclose(logp_fn(0, 1.3), st.norm.logpdf(1.3, 1) + np.log(0.6)) + assert np.isclose(logp_fn(1, 0.4), st.beta.logpdf(0.4, 1, 2) + np.log(0.4)) + + +def test_nested_scalar_mixtures(): + x = at.random.normal(loc=-50, name="x") + y = at.random.normal(loc=50, name="y") + comps1 = at.stack([x, y]) + comps1.name = "comps1" + idxs1 = at.random.bernoulli(0.5, name="idxs1") + mix1 = comps1[idxs1] + mix1.name = "mix1" + + w = at.random.normal(loc=-100, name="w") + z = at.random.normal(loc=100, name="z") + comps2 = at.stack([w, z]) + comps2.name = "comps2" + idxs2 = at.random.bernoulli(0.5, name="idxs2") + mix2 = comps2[idxs2] + mix2.name = "mix2" + + comps12 = at.stack([mix1, mix2]) + comps12.name = "comps12" + idxs12 = at.random.bernoulli(0.5, name="idxs12") + mix12 = comps12[idxs12] + mix12.name = "mix12" + + idxs1_vv = idxs1.clone() + idxs2_vv = idxs2.clone() + idxs12_vv = idxs12.clone() + mix12_vv = mix12.clone() + + logp = joint_logprob({idxs1: idxs1_vv, idxs2: idxs2_vv, idxs12: idxs12_vv, mix12: mix12_vv}) + logp_fn = aesara.function([idxs1_vv, idxs2_vv, idxs12_vv, mix12_vv], logp) + + expected_mu_logpdf = st.norm.logpdf(0) + np.log(0.5) * 3 + assert np.isclose(logp_fn(0, 0, 0, -50), expected_mu_logpdf) + assert np.isclose(logp_fn(0, 1, 0, -50), expected_mu_logpdf) + assert np.isclose(logp_fn(1, 0, 0, 50), expected_mu_logpdf) + assert np.isclose(logp_fn(1, 1, 0, 50), expected_mu_logpdf) + assert np.isclose(logp_fn(0, 0, 1, -100), expected_mu_logpdf) + assert np.isclose(logp_fn(0, 1, 1, 100), expected_mu_logpdf) + assert np.isclose(logp_fn(1, 0, 1, -100), expected_mu_logpdf) + assert np.isclose(logp_fn(1, 1, 1, 100), expected_mu_logpdf) + + assert np.isclose(logp_fn(0, 0, 0, 50), st.norm.logpdf(100) + np.log(0.5) * 3) + assert np.isclose(logp_fn(0, 0, 1, 50), st.norm.logpdf(150) + np.log(0.5) * 3) + + +def test_unvalued_ir_reversion(): + """Make sure that un-valued IR rewrites are reverted.""" + x_rv = at.random.normal() + y_rv = at.clip(x_rv, 0, 1) + z_rv = at.random.normal(y_rv, 1, name="z") + z_vv = z_rv.clone() + + # Only the `z_rv` is "valued", so `y_rv` doesn't need to be converted into + # measurable IR. + rv_values = {z_rv: z_vv} + + z_fgraph, _, memo = construct_ir_fgraph(rv_values) + + assert memo[y_rv] in z_fgraph.preserve_rv_mappings.measurable_conversions + + measurable_y_rv = z_fgraph.preserve_rv_mappings.measurable_conversions[memo[y_rv]] + assert isinstance(measurable_y_rv.owner.op, MeasurableClip) + + # `construct_ir_fgraph` should've reverted the un-valued measurable IR + # change + assert measurable_y_rv not in z_fgraph + + +def test_shifted_cumsum(): + x = at.random.normal(size=(5,), name="x") + y = 5 + at.cumsum(x) + y.name = "y" + + y_vv = y.clone() + logp = joint_logprob({y: y_vv}) + assert np.isclose( + logp.eval({y_vv: np.arange(5) + 1 + 5}), + st.norm.logpdf(1) * 5, + ) + + +def test_double_log_transform_rv(): + base_rv = at.random.normal(0, 1) + y_rv = at.log(at.log(base_rv)) + y_rv.name = "y" + + y_vv = y_rv.clone() + logp = joint_logprob({y_rv: y_vv}, sum=False) + logp_fn = aesara.function([y_vv], logp) + + log_log_y_val = np.asarray(0.5) + log_y_val = np.exp(log_log_y_val) + y_val = np.exp(log_y_val) + np.testing.assert_allclose( + logp_fn(log_log_y_val), + st.norm().logpdf(y_val) + log_y_val + log_log_y_val, + ) + + +def test_affine_transform_rv(): + loc = at.scalar("loc") + scale = at.vector("scale") + rv_size = 3 + + y_rv = loc + at.random.normal(0, 1, size=rv_size, name="base_rv") * scale + y_rv.name = "y" + y_vv = y_rv.clone() + + logp = joint_logprob({y_rv: y_vv}, sum=False) + assert_no_rvs(logp) + logp_fn = aesara.function([loc, scale, y_vv], logp) + + loc_test_val = 4.0 + scale_test_val = np.full(rv_size, 0.5) + y_test_val = np.full(rv_size, 1.0) + + np.testing.assert_allclose( + logp_fn(loc_test_val, scale_test_val, y_test_val), + st.norm(loc_test_val, scale_test_val).logpdf(y_test_val), + ) + + +def test_affine_log_transform_rv(): + a, b = at.scalars("a", "b") + base_rv = at.random.lognormal(0, 1, name="base_rv", size=(1, 2)) + y_rv = a + at.log(base_rv) * b + y_rv.name = "y" + + y_vv = y_rv.clone() + + logp = joint_logprob({y_rv: y_vv}, sum=False) + logp_fn = aesara.function([a, b, y_vv], logp) + + a_val = -1.5 + b_val = 3.0 + y_val = [[0.1, 0.1]] + + assert np.allclose( + logp_fn(a_val, b_val, y_val), + st.norm(a_val, b_val).logpdf(y_val), + ) diff --git a/pymc/tests/logprob/test_cumsum.py b/pymc/tests/logprob/test_cumsum.py new file mode 100644 index 00000000000..e1229a29a16 --- /dev/null +++ b/pymc/tests/logprob/test_cumsum.py @@ -0,0 +1,118 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest +import scipy.stats as st + +from pymc.logprob import joint_logprob +from pymc.tests.helpers import assert_no_rvs + + +@pytest.mark.parametrize( + "size, axis", + [ + (10, None), + (10, 0), + ((2, 10), 0), + ((2, 10), 1), + ((3, 2, 10), 0), + ((3, 2, 10), 1), + ((3, 2, 10), 2), + ], +) +def test_normal_cumsum(size, axis): + rv = at.random.normal(0, 1, size=size).cumsum(axis) + vv = rv.clone() + logp = joint_logprob({rv: vv}) + assert_no_rvs(logp) + + assert np.isclose( + st.norm(0, 1).logpdf(np.ones(size)).sum(), + logp.eval({vv: np.ones(size).cumsum(axis)}), + ) + + +@pytest.mark.parametrize( + "size, axis", + [ + (10, None), + (10, 0), + ((2, 10), 0), + ((2, 10), 1), + ((3, 2, 10), 0), + ((3, 2, 10), 1), + ((3, 2, 10), 2), + ], +) +def test_bernoulli_cumsum(size, axis): + rv = at.random.bernoulli(0.9, size=size).cumsum(axis) + vv = rv.clone() + logp = joint_logprob({rv: vv}) + assert_no_rvs(logp) + + assert np.isclose( + st.bernoulli(0.9).logpmf(np.ones(size)).sum(), + logp.eval({vv: np.ones(size, int).cumsum(axis)}), + ) + + +def test_destructive_cumsum_fails(): + """Test that a cumsum that mixes dimensions fails""" + x_rv = at.random.normal(size=(2, 2, 2)).cumsum() + x_vv = x_rv.clone() + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({x_rv: x_vv}) + + +def test_deterministic_cumsum(): + """Test that deterministic cumsum is not affected""" + x_rv = at.random.normal(1, 1, size=5) + cumsum_x_rv = at.cumsum(x_rv) + y_rv = at.random.normal(cumsum_x_rv, 1) + + x_vv = x_rv.clone() + y_vv = y_rv.clone() + logp = joint_logprob({x_rv: x_vv, y_rv: y_vv}) + assert_no_rvs(logp) + + logp_fn = aesara.function([x_vv, y_vv], logp) + assert np.isclose( + logp_fn(np.ones(5), np.arange(5) + 1), + st.norm(1, 1).logpdf(1) * 10, + ) diff --git a/pymc/tests/logprob/test_joint_logprob.py b/pymc/tests/logprob/test_joint_logprob.py new file mode 100644 index 00000000000..82d697f3dd2 --- /dev/null +++ b/pymc/tests/logprob/test_joint_logprob.py @@ -0,0 +1,311 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import warnings + +import aesara +import aesara.tensor as at +import numpy as np +import pytest +import scipy.stats.distributions as sp + +from aesara.graph.basic import ancestors, equal_computations +from aesara.tensor.subtensor import ( + AdvancedIncSubtensor, + AdvancedIncSubtensor1, + AdvancedSubtensor, + AdvancedSubtensor1, + IncSubtensor, + Subtensor, +) + +from pymc.logprob.abstract import logprob +from pymc.logprob.joint_logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.utils import rvs_to_value_vars, walk_model +from pymc.tests.helpers import assert_no_rvs + + +def test_joint_logprob_basic(): + # A simple check for when `joint_logprob` is the same as `logprob` + a = at.random.uniform(0.0, 1.0) + a.name = "a" + a_value_var = a.clone() + + a_logp = joint_logprob({a: a_value_var}, sum=False) + a_logp_exp = logprob(a, a_value_var) + + assert equal_computations([a_logp], [a_logp_exp]) + + # Let's try a hierarchical model + sigma = at.random.invgamma(0.5, 0.5) + Y = at.random.normal(0.0, sigma) + + sigma_value_var = sigma.clone() + y_value_var = Y.clone() + + total_ll = joint_logprob({Y: y_value_var, sigma: sigma_value_var}, sum=False) + + # We need to replace the reference to `sigma` in `Y` with its value + # variable + ll_Y = logprob(Y, y_value_var) + (ll_Y,), _ = rvs_to_value_vars( + [ll_Y], + initial_replacements={sigma: sigma_value_var}, + ) + total_ll_exp = logprob(sigma, sigma_value_var) + ll_Y + + assert equal_computations([total_ll], [total_ll_exp]) + + # Now, make sure we can compute a joint log-probability for a hierarchical + # model with some non-`RandomVariable` nodes + c = at.random.normal() + c.name = "c" + b_l = c * a + 2.0 + b = at.random.uniform(b_l, b_l + 1.0) + b.name = "b" + + b_value_var = b.clone() + c_value_var = c.clone() + + b_logp = joint_logprob({a: a_value_var, b: b_value_var, c: c_value_var}) + + # There shouldn't be any `RandomVariable`s in the resulting graph + assert_no_rvs(b_logp) + + res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True)) + assert b_value_var in res_ancestors + assert c_value_var in res_ancestors + assert a_value_var in res_ancestors + + +def test_joint_logprob_multi_obs(): + + a = at.random.uniform(0.0, 1.0) + b = at.random.normal(0.0, 1.0) + + a_val = a.clone() + b_val = b.clone() + + logp = joint_logprob({a: a_val, b: b_val}, sum=False) + logp_exp = logprob(a, a_val) + logprob(b, b_val) + + assert equal_computations([logp], [logp_exp]) + + x = at.random.normal(0, 1) + y = at.random.normal(x, 1) + + x_val = x.clone() + y_val = y.clone() + + logp = joint_logprob({x: x_val, y: y_val}) + exp_logp = joint_logprob({x: x_val, y: y_val}) + + assert equal_computations([logp], [exp_logp]) + + +def test_joint_logprob_diff_dims(): + M = at.matrix("M") + x = at.random.normal(0, 1, size=M.shape[1], name="X") + y = at.random.normal(M.dot(x), 1, name="Y") + + x_vv = x.clone() + x_vv.name = "x" + y_vv = y.clone() + y_vv.name = "y" + + logp = joint_logprob({x: x_vv, y: y_vv}) + + M_val = np.random.normal(size=(10, 3)) + x_val = np.random.normal(size=(3,)) + y_val = np.random.normal(size=(10,)) + + point = {M: M_val, x_vv: x_val, y_vv: y_val} + logp_val = logp.eval(point) + + exp_logp_val = ( + sp.norm.logpdf(x_val, 0, 1).sum() + sp.norm.logpdf(y_val, M_val.dot(x_val), 1).sum() + ) + assert exp_logp_val == pytest.approx(logp_val) + + +@pytest.mark.parametrize( + "indices, size", + [ + (slice(0, 2), 5), + (np.r_[True, True, False, False, True], 5), + (np.r_[0, 1, 4], 5), + ((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)), + ], +) +def test_joint_logprob_incsubtensor(indices, size): + """Make sure we can compute a joint log-probability for ``Y[idx] = data`` where ``Y`` is univariate.""" + + rng = np.random.RandomState(232) + mu = np.power(10, np.arange(np.prod(size))).reshape(size) + sigma = 0.001 + data = rng.normal(mu[indices], 1.0) + y_val = rng.normal(mu, sigma, size=size) + + Y_base_rv = at.random.normal(mu, sigma, size=size) + Y_rv = at.set_subtensor(Y_base_rv[indices], data) + Y_rv.name = "Y" + y_value_var = Y_rv.clone() + y_value_var.name = "y" + + assert isinstance(Y_rv.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)) + + Y_rv_logp = joint_logprob({Y_rv: y_value_var}, sum=False) + + obs_logps = Y_rv_logp.eval({y_value_var: y_val}) + + y_val_idx = y_val.copy() + y_val_idx[indices] = data + exp_obs_logps = sp.norm.logpdf(y_val_idx, mu, sigma) + + np.testing.assert_almost_equal(obs_logps, exp_obs_logps) + + +def test_incsubtensor_original_values_output_dict(): + """ + Test that the original un-incsubtensor value variable appears an the key of + the logprob factor + """ + + base_rv = at.random.normal(0, 1, size=2) + rv = at.set_subtensor(base_rv[0], 5) + vv = rv.clone() + + logp_dict = factorized_joint_logprob({rv: vv}) + assert vv in logp_dict + + +def test_joint_logprob_subtensor(): + """Make sure we can compute a joint log-probability for ``Y[I]`` where ``Y`` and ``I`` are random variables.""" + + size = 5 + + mu_base = np.power(10, np.arange(np.prod(size))).reshape(size) + mu = np.stack([mu_base, -mu_base]) + sigma = 0.001 + rng = aesara.shared(np.random.RandomState(232), borrow=True) + + A_rv = at.random.normal(mu, sigma, rng=rng) + A_rv.name = "A" + + p = 0.5 + + I_rv = at.random.bernoulli(p, size=size, rng=rng) + I_rv.name = "I" + + A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1] :]] + + assert isinstance(A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)) + + A_idx_value_var = A_idx.type() + A_idx_value_var.name = "A_idx_value" + + I_value_var = I_rv.type() + I_value_var.name = "I_value" + + A_idx_logp = joint_logprob({A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False) + + logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp) + + # The compiled graph should not contain any `RandomVariables` + assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0]) + + decimals = 6 if aesara.config.floatX == "float64" else 4 + + test_val_rng = np.random.RandomState(3238) + + for i in range(10): + bern_sp = sp.bernoulli(p) + I_value = bern_sp.rvs(size=size, random_state=test_val_rng).astype(I_rv.dtype) + + norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1] :]], sigma) + A_idx_value = norm_sp.rvs(random_state=test_val_rng).astype(A_idx.dtype) + + exp_obs_logps = norm_sp.logpdf(A_idx_value) + exp_obs_logps += bern_sp.logpmf(I_value) + + logp_vals = logp_vals_fn(A_idx_value, I_value) + + np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) + + +def test_persist_inputs(): + """Make sure we don't unnecessarily clone variables.""" + x = at.scalar("x") + beta_rv = at.random.normal(0, 1, name="beta") + Y_rv = at.random.normal(beta_rv * x, 1, name="y") + + beta_vv = beta_rv.type() + y_vv = Y_rv.clone() + + logp = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv}) + + assert x in ancestors([logp]) + + # Make sure we don't clone value variables when they're graphs. + y_vv_2 = y_vv * 2 + logp_2 = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv_2}) + + assert y_vv_2 in ancestors([logp_2]) + + +def test_warn_random_not_found(): + x_rv = at.random.normal(name="x") + y_rv = at.random.normal(x_rv, 1, name="y") + + y_vv = y_rv.clone() + + with pytest.warns(UserWarning): + factorized_joint_logprob({y_rv: y_vv}) + + with warnings.catch_warnings(): + warnings.simplefilter("error") + factorized_joint_logprob({y_rv: y_vv}, warn_missing_rvs=False) + + +def test_multiple_rvs_to_same_value_raises(): + x_rv1 = at.random.normal(name="x1") + x_rv2 = at.random.normal(name="x2") + x = x_rv1.type() + x.name = "x" + + msg = "More than one logprob factor was assigned to the value var x" + with pytest.raises(ValueError, match=msg): + joint_logprob({x_rv1: x, x_rv2: x}) diff --git a/pymc/tests/logprob/test_mixture.py b/pymc/tests/logprob/test_mixture.py new file mode 100644 index 00000000000..4731717347d --- /dev/null +++ b/pymc/tests/logprob/test_mixture.py @@ -0,0 +1,792 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest +import scipy.stats.distributions as sp + +from aesara.graph.basic import Variable, equal_computations +from aesara.tensor.random.basic import CategoricalRV +from aesara.tensor.shape import shape_tuple +from aesara.tensor.subtensor import as_index_constant + +from pymc.logprob.joint_logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.mixture import MixtureRV, expand_indices +from pymc.logprob.rewriting import construct_ir_fgraph +from pymc.logprob.utils import dirac_delta +from pymc.tests.helpers import assert_no_rvs +from pymc.tests.logprob.utils import scipy_logprob + + +def test_mixture_basics(): + srng = at.random.RandomStream(29833) + + def create_mix_model(size, axis): + X_rv = srng.normal(0, 1, size=size, name="X") + Y_rv = srng.gamma(0.5, 0.5, size=size, name="Y") + + p_at = at.scalar("p") + p_at.tag.test_value = 0.5 + + I_rv = srng.bernoulli(p_at, size=size, name="I") + i_vv = I_rv.clone() + i_vv.name = "i" + + if isinstance(axis, Variable): + M_rv = at.join(axis, X_rv, Y_rv)[I_rv] + else: + M_rv = at.stack([X_rv, Y_rv], axis=axis)[I_rv] + + M_rv.name = "M" + m_vv = M_rv.clone() + m_vv.name = "m" + + return locals() + + env = create_mix_model(None, 0) + X_rv = env["X_rv"] + I_rv = env["I_rv"] + i_vv = env["i_vv"] + M_rv = env["M_rv"] + m_vv = env["m_vv"] + + x_vv = X_rv.clone() + x_vv.name = "x" + + with pytest.raises(RuntimeError, match="could not be derived: {m}"): + factorized_joint_logprob({M_rv: m_vv, I_rv: i_vv, X_rv: x_vv}) + + with pytest.raises(NotImplementedError): + axis_at = at.lscalar("axis") + axis_at.tag.test_value = 0 + env = create_mix_model((2,), axis_at) + I_rv = env["I_rv"] + i_vv = env["i_vv"] + M_rv = env["M_rv"] + m_vv = env["m_vv"] + joint_logprob({M_rv: m_vv, I_rv: i_vv}) + + +@aesara.config.change_flags(compute_test_value="warn") +@pytest.mark.parametrize( + "op_constructor", + [ + lambda _I, _X, _Y: at.stack([_X, _Y])[_I], + lambda _I, _X, _Y: at.switch(_I, _X, _Y), + ], +) +def test_compute_test_value(op_constructor): + + srng = at.random.RandomStream(29833) + + X_rv = srng.normal(0, 1, name="X") + Y_rv = srng.gamma(0.5, 0.5, name="Y") + + p_at = at.scalar("p") + p_at.tag.test_value = 0.3 + + I_rv = srng.bernoulli(p_at, name="I") + + i_vv = I_rv.clone() + i_vv.name = "i" + + M_rv = op_constructor(I_rv, X_rv, Y_rv) + M_rv.name = "M" + + m_vv = M_rv.clone() + m_vv.name = "m" + + del M_rv.tag.test_value + + M_logp = joint_logprob({M_rv: m_vv, I_rv: i_vv}, sum=False) + + assert isinstance(M_logp.tag.test_value, np.ndarray) + + +@pytest.mark.parametrize( + "p_val, size", + [ + (np.array(0.0, dtype=aesara.config.floatX), ()), + (np.array(1.0, dtype=aesara.config.floatX), ()), + (np.array(0.0, dtype=aesara.config.floatX), (2,)), + (np.array(1.0, dtype=aesara.config.floatX), (2, 1)), + (np.array(1.0, dtype=aesara.config.floatX), (2, 3)), + (np.array([0.1, 0.9], dtype=aesara.config.floatX), (2, 3)), + ], +) +def test_hetero_mixture_binomial(p_val, size): + srng = at.random.RandomStream(29833) + + X_rv = srng.normal(0, 1, size=size, name="X") + Y_rv = srng.gamma(0.5, 0.5, size=size, name="Y") + + if np.ndim(p_val) == 0: + p_at = at.scalar("p") + p_at.tag.test_value = p_val + I_rv = srng.bernoulli(p_at, size=size, name="I") + p_val_1 = p_val + else: + p_at = at.vector("p") + p_at.tag.test_value = np.array(p_val, dtype=aesara.config.floatX) + I_rv = srng.categorical(p_at, size=size, name="I") + p_val_1 = p_val[1] + + i_vv = I_rv.clone() + i_vv.name = "i" + + M_rv = at.stack([X_rv, Y_rv])[I_rv] + M_rv.name = "M" + + m_vv = M_rv.clone() + m_vv.name = "m" + + M_logp = joint_logprob({M_rv: m_vv, I_rv: i_vv}, sum=False) + + M_logp_fn = aesara.function([p_at, m_vv, i_vv], M_logp) + + assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0]) + + decimals = 6 if aesara.config.floatX == "float64" else 4 + + test_val_rng = np.random.RandomState(3238) + + bern_sp = sp.bernoulli(p_val_1) + norm_sp = sp.norm(loc=0, scale=1) + gamma_sp = sp.gamma(0.5, scale=1.0 / 0.5) + + for i in range(10): + i_val = bern_sp.rvs(size=size, random_state=test_val_rng) + x_val = norm_sp.rvs(size=size, random_state=test_val_rng) + y_val = gamma_sp.rvs(size=size, random_state=test_val_rng) + + component_logps = np.stack([norm_sp.logpdf(x_val), gamma_sp.logpdf(y_val)])[i_val] + exp_obs_logps = component_logps + bern_sp.logpmf(i_val) + + m_val = np.stack([x_val, y_val])[i_val] + logp_vals = M_logp_fn(p_val, m_val, i_val) + + np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) + + +@pytest.mark.parametrize( + "X_args, Y_args, Z_args, p_val, comp_size, idx_size, extra_indices, join_axis", + [ + # Scalar mixture components, scalar index + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (), + (), + (), + 0, + ), + # Scalar mixture components, vector index + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (), + (6,), + (), + 0, + ), + ( + ( + np.array([0, -100], dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array([0.5, 1], dtype=aesara.config.floatX), + np.array([0.5, 1], dtype=aesara.config.floatX), + ), + ( + np.array([100, 1000], dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([[0.1, 0.5, 0.4], [0.4, 0.1, 0.5]], dtype=aesara.config.floatX), + (2,), + (2,), + (), + 0, + ), + ( + ( + np.array([0, -100], dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array([0.5, 1], dtype=aesara.config.floatX), + np.array([0.5, 1], dtype=aesara.config.floatX), + ), + ( + np.array([100, 1000], dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([[0.1, 0.5, 0.4], [0.4, 0.1, 0.5]], dtype=aesara.config.floatX), + None, + None, + (), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (), + (), + (), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (2,), + (2,), + (), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (2, 3), + (2, 3), + (), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (2, 3), + (), + (), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (3,), + (3,), + (slice(None),), + 1, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (5,), + (5,), + (np.arange(5),), + 0, + ), + ( + ( + np.array(0, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + ( + np.array(0.5, dtype=aesara.config.floatX), + np.array(0.5, dtype=aesara.config.floatX), + ), + ( + np.array(100, dtype=aesara.config.floatX), + np.array(1, dtype=aesara.config.floatX), + ), + np.array([0.1, 0.5, 0.4], dtype=aesara.config.floatX), + (5,), + (5,), + (np.arange(5), None), + 0, + ), + ], +) +def test_hetero_mixture_categorical( + X_args, Y_args, Z_args, p_val, comp_size, idx_size, extra_indices, join_axis +): + srng = at.random.RandomStream(29833) + + X_rv = srng.normal(*X_args, size=comp_size, name="X") + Y_rv = srng.gamma(*Y_args, size=comp_size, name="Y") + Z_rv = srng.normal(*Z_args, size=comp_size, name="Z") + + p_at = at.as_tensor(p_val).type() + p_at.name = "p" + p_at.tag.test_value = np.array(p_val, dtype=aesara.config.floatX) + I_rv = srng.categorical(p_at, size=idx_size, name="I") + + i_vv = I_rv.clone() + i_vv.name = "i" + + indices_at = list(extra_indices) + indices_at.insert(join_axis, I_rv) + indices_at = tuple(indices_at) + + M_rv = at.stack([X_rv, Y_rv, Z_rv], axis=join_axis)[indices_at] + M_rv.name = "M" + + m_vv = M_rv.clone() + m_vv.name = "m" + + logp_parts = factorized_joint_logprob({M_rv: m_vv, I_rv: i_vv}, sum=False) + + I_logp_fn = aesara.function([p_at, i_vv], logp_parts[i_vv]) + M_logp_fn = aesara.function([m_vv, i_vv], logp_parts[m_vv]) + + assert_no_rvs(I_logp_fn.maker.fgraph.outputs[0]) + assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0]) + + decimals = 6 if aesara.config.floatX == "float64" else 4 + + test_val_rng = np.random.RandomState(3238) + + norm_1_sp = sp.norm(loc=X_args[0], scale=X_args[1]) + gamma_sp = sp.gamma(Y_args[0], scale=1 / Y_args[1]) + norm_2_sp = sp.norm(loc=Z_args[0], scale=Z_args[1]) + + for i in range(10): + i_val = CategoricalRV.rng_fn(test_val_rng, p_val, idx_size) + + indices_val = list(extra_indices) + indices_val.insert(join_axis, i_val) + indices_val = tuple(indices_val) + + x_val = norm_1_sp.rvs(size=comp_size, random_state=test_val_rng) + y_val = gamma_sp.rvs(size=comp_size, random_state=test_val_rng) + z_val = norm_2_sp.rvs(size=comp_size, random_state=test_val_rng) + + component_logps = np.stack( + [norm_1_sp.logpdf(x_val), gamma_sp.logpdf(y_val), norm_2_sp.logpdf(z_val)], + axis=join_axis, + )[indices_val] + index_logps = scipy_logprob(i_val, p_val) + exp_obs_logps = component_logps + index_logps[(Ellipsis,) + (None,) * join_axis] + + m_val = np.stack([x_val, y_val, z_val], axis=join_axis)[indices_val] + + I_logp_vals = I_logp_fn(p_val, i_val) + M_logp_vals = M_logp_fn(m_val, i_val) + + logp_vals = M_logp_vals + I_logp_vals[(Ellipsis,) + (None,) * join_axis] + + np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals) + + +@pytest.mark.parametrize( + "A_parts, indices", + [ + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (np.array([[0, 1], [2, 2]]), slice(2, 3)), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), np.array([[0, 1], [2, 2]])), + ), + ( + ( + np.random.normal(size=(5, 4, 3)), + np.random.normal(size=(5, 4, 3)), + np.random.normal(size=(5, 4, 3)), + ), + ( + np.array([[0], [2], [1]]), + slice(None), + np.array([2, 1]), + slice(2, 3), + ), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), np.array([0, 1, 2])), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (np.array([[0, 1], [2, 2]]), np.array([[0, 1], [2, 2]])), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + ( + np.array([[0, 1], [2, 2]]), + np.array([[0, 1], [2, 2]]), + np.array([[0, 1], [2, 2]]), + ), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (np.array([[0, 1], [2, 2]]), np.array([[0, 1], [2, 2]]), 1), + ), + ( + ( + np.random.normal(size=(5, 4, 3)), + np.random.normal(size=(5, 4, 3)), + ), + (slice(0, 2),), + ), + ( + ( + np.random.normal(size=(5, 4, 3)), + np.random.normal(size=(5, 4, 3)), + ), + (slice(0, 2), np.random.randint(3, size=(2, 3))), + ), + ], +) +def test_expand_indices_basic(A_parts, indices): + A = at.stack(A_parts) + at_indices = [as_index_constant(idx) for idx in indices] + full_indices = expand_indices(at_indices, shape_tuple(A)) + assert len(full_indices) == A.ndim + exp_res = A[indices].eval() + res = A[full_indices].eval() + assert np.array_equal(res, exp_res) + + +@pytest.mark.parametrize( + "A_parts, indices", + [ + ( + ( + np.random.normal(size=(6, 5, 4, 3)), + np.random.normal(size=(6, 5, 4, 3)), + np.random.normal(size=(6, 5, 4, 3)), + ), + ( + slice(None), + np.array([[0], [2], [1]]), + slice(None), + np.array([2, 1]), + slice(2, 3), + ), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (np.array([[0, 1], [2, 2]]), slice(None), np.array([[0, 1], [2, 2]])), + ), + ], +) +def test_expand_indices_moved_subspaces(A_parts, indices): + A = at.stack(A_parts) + at_indices = [as_index_constant(idx) for idx in indices] + full_indices = expand_indices(at_indices, shape_tuple(A)) + assert len(full_indices) == A.ndim + exp_res = A[indices].eval() + res = A[full_indices].eval() + assert np.array_equal(res, exp_res) + + +@pytest.mark.parametrize( + "A_parts, indices", + [ + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), np.array([0, 1, 2]), 1), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), 1, np.array([0, 1, 2])), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (1, slice(2, 3), np.array([0, 1, 2])), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (np.random.randint(2, size=(4, 3)), 1, 0), + ), + ], +) +def test_expand_indices_single_indices(A_parts, indices): + A = at.stack(A_parts) + at_indices = [as_index_constant(idx) for idx in indices] + full_indices = expand_indices(at_indices, shape_tuple(A)) + assert len(full_indices) == A.ndim + exp_res = A[indices].eval() + res = A[full_indices].eval() + assert np.array_equal(res, exp_res) + + +@pytest.mark.parametrize( + "A_parts, indices", + [ + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (None,), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (None, None, None), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (None, 1, None, 0, None), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), None, 1, None, 0, None), + ), + ( + ( + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + np.random.normal(size=(4, 3)), + ), + (slice(2, 3), None, 1, 0, None), + ), + ], +) +def test_expand_indices_newaxis(A_parts, indices): + A = at.stack(A_parts) + at_indices = [as_index_constant(idx) for idx in indices] + full_indices = expand_indices(at_indices, shape_tuple(A)) + assert len(full_indices) == A.ndim + exp_res = A[indices].eval() + res = A[full_indices].eval() + assert np.array_equal(res, exp_res) + + +def test_mixture_with_DiracDelta(): + srng = at.random.RandomStream(29833) + + X_rv = srng.normal(0, 1, name="X") + Y_rv = dirac_delta(0.0) + Y_rv.name = "Y" + + I_rv = srng.categorical([0.5, 0.5], size=4) + + i_vv = I_rv.clone() + i_vv.name = "i" + + M_rv = at.stack([X_rv, Y_rv])[I_rv] + M_rv.name = "M" + + m_vv = M_rv.clone() + m_vv.name = "m" + + logp_res = factorized_joint_logprob({M_rv: m_vv, I_rv: i_vv}) + + assert m_vv in logp_res + + +def test_switch_mixture(): + srng = at.random.RandomStream(29833) + + X_rv = srng.normal(-10.0, 0.1, name="X") + Y_rv = srng.normal(10.0, 0.1, name="Y") + + I_rv = srng.bernoulli(0.5, name="I") + i_vv = I_rv.clone() + i_vv.name = "i" + + Z1_rv = at.switch(I_rv, X_rv, Y_rv) + z_vv = Z1_rv.clone() + z_vv.name = "z1" + + fgraph, _, _ = construct_ir_fgraph({Z1_rv: z_vv, I_rv: i_vv}) + + assert isinstance(fgraph.outputs[0].owner.op, MixtureRV) + assert not hasattr( + fgraph.outputs[0].tag, "test_value" + ) # aesara.config.compute_test_value == "off" + assert fgraph.outputs[0].name is None + + Z1_rv.name = "Z1" + + fgraph, _, _ = construct_ir_fgraph({Z1_rv: z_vv, I_rv: i_vv}) + + assert fgraph.outputs[0].name == "Z1-mixture" + + # building the identical graph but with a stack to check that mixture computations are identical + + Z2_rv = at.stack((X_rv, Y_rv))[I_rv] + + fgraph2, _, _ = construct_ir_fgraph({Z2_rv: z_vv, I_rv: i_vv}) + + assert equal_computations(fgraph.outputs, fgraph2.outputs) + + z1_logp = joint_logprob({Z1_rv: z_vv, I_rv: i_vv}) + z2_logp = joint_logprob({Z2_rv: z_vv, I_rv: i_vv}) + + # below should follow immediately from the equal_computations assertion above + assert equal_computations([z1_logp], [z2_logp]) + + np.testing.assert_almost_equal(0.69049938, z1_logp.eval({z_vv: -10, i_vv: 0})) + np.testing.assert_almost_equal(0.69049938, z2_logp.eval({z_vv: -10, i_vv: 0})) diff --git a/pymc/tests/logprob/test_rewriting.py b/pymc/tests/logprob/test_rewriting.py new file mode 100644 index 00000000000..09066f97988 --- /dev/null +++ b/pymc/tests/logprob/test_rewriting.py @@ -0,0 +1,85 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at + +from aesara.graph.rewriting.basic import in2out +from aesara.graph.rewriting.utils import rewrite_graph +from aesara.tensor.elemwise import DimShuffle, Elemwise +from aesara.tensor.subtensor import Subtensor + +from pymc.logprob.rewriting import local_lift_DiracDelta +from pymc.logprob.utils import DiracDelta, dirac_delta + + +def test_local_lift_DiracDelta(): + c_at = at.vector() + dd_at = dirac_delta(c_at) + + Z_at = at.cast(dd_at, "int64") + + res = rewrite_graph(Z_at, custom_rewrite=in2out(local_lift_DiracDelta), clone=False) + assert isinstance(res.owner.op, DiracDelta) + assert isinstance(res.owner.inputs[0].owner.op, Elemwise) + + Z_at = dd_at.dimshuffle("x", 0) + + res = rewrite_graph(Z_at, custom_rewrite=in2out(local_lift_DiracDelta), clone=False) + assert isinstance(res.owner.op, DiracDelta) + assert isinstance(res.owner.inputs[0].owner.op, DimShuffle) + + Z_at = dd_at[0] + + res = rewrite_graph(Z_at, custom_rewrite=in2out(local_lift_DiracDelta), clone=False) + assert isinstance(res.owner.op, DiracDelta) + assert isinstance(res.owner.inputs[0].owner.op, Subtensor) + + # Don't lift multi-output `Op`s + c_at = at.matrix() + dd_at = dirac_delta(c_at) + Z_at = at.nlinalg.svd(dd_at)[0] + + res = rewrite_graph(Z_at, custom_rewrite=in2out(local_lift_DiracDelta), clone=False) + assert res is Z_at + + +def test_local_remove_DiracDelta(): + c_at = at.vector() + dd_at = dirac_delta(c_at) + + fn = aesara.function([c_at], dd_at) + assert not any(isinstance(node.op, DiracDelta) for node in fn.maker.fgraph.toposort()) diff --git a/pymc/tests/logprob/test_scan.py b/pymc/tests/logprob/test_scan.py new file mode 100644 index 00000000000..3802c1072c5 --- /dev/null +++ b/pymc/tests/logprob/test_scan.py @@ -0,0 +1,459 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest + +from aesara import Mode +from aesara.raise_op import assert_op +from aesara.scan.utils import ScanArgs + +from pymc.logprob.abstract import logprob +from pymc.logprob.joint_logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.scan import ( + construct_scan, + convert_outer_out_to_in, + get_random_outer_outputs, +) +from pymc.tests.helpers import assert_no_rvs + + +def create_inner_out_logp(value_map): + """Create a log-likelihood inner-output. + + This is intended to be use with `get_random_outer_outputs`. + + """ + res = [] + for old_inner_out_var, new_inner_in_var in value_map.items(): + logp = logprob(old_inner_out_var, new_inner_in_var) + if new_inner_in_var.name: + logp.name = f"logp({new_inner_in_var.name})" + res.append(logp) + + return res + + +def test_convert_outer_out_to_in_sit_sot(): + """Test a single replacement with `convert_outer_out_to_in`. + + This should be a single SIT-SOT replacement. + """ + + rng_state = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(1234))) + rng_tt = aesara.shared(rng_state, name="rng", borrow=True) + rng_tt.tag.is_rng = True + rng_tt.default_update = rng_tt + + # + # We create a `Scan` representing a time-series model with normally + # distributed responses that are dependent on lagged values of both the + # response `RandomVariable` and a lagged "deterministic" that also depends + # on the lagged response values. + # + def input_step_fn(mu_tm1, y_tm1, rng): + mu_tm1.name = "mu_tm1" + y_tm1.name = "y_tm1" + mu = mu_tm1 + y_tm1 + 1 + mu.name = "mu_t" + return mu, at.random.normal(mu, 1.0, rng=rng, name="Y_t") + + (mu_tt, Y_rv), _ = aesara.scan( + fn=input_step_fn, + outputs_info=[ + { + "initial": at.as_tensor_variable(0.0, dtype=aesara.config.floatX), + "taps": [-1], + }, + { + "initial": at.as_tensor_variable(0.0, dtype=aesara.config.floatX), + "taps": [-1], + }, + ], + non_sequences=[rng_tt], + n_steps=10, + ) + + mu_tt.name = "mu_tt" + mu_tt.owner.inputs[0].name = "mu_all" + Y_rv.name = "Y_rv" + Y_all = Y_rv.owner.inputs[0] + Y_all.name = "Y_all" + + input_scan_args = ScanArgs.from_node(Y_rv.owner.inputs[0].owner) + + # TODO FIXME: Everything below needs to be replaced with explicit asserts + # on the values in `input_scan_args` + + # + # Sample from the model and create another `Scan` that computes the + # log-likelihood of the model at the sampled point. + # + Y_obs = at.as_tensor_variable(Y_rv.eval()) + Y_obs.name = "Y_obs" + + def output_step_fn(y_t, y_tm1, mu_tm1): + mu_tm1.name = "mu_tm1" + y_tm1.name = "y_tm1" + mu = mu_tm1 + y_tm1 + 1 + mu.name = "mu_t" + logp = logprob(at.random.normal(mu, 1.0), y_t) + logp.name = "logp" + return mu, logp + + (mu_tt, Y_logp), _ = aesara.scan( + fn=output_step_fn, + sequences=[{"input": Y_obs, "taps": [0, -1]}], + outputs_info=[ + { + "initial": at.as_tensor_variable(0.0, dtype=aesara.config.floatX), + "taps": [-1], + }, + {}, + ], + ) + + Y_logp.name = "Y_logp" + mu_tt.name = "mu_tt" + + # + # Get the model output variable that corresponds to the response + # `RandomVariable` + # + oo_idx, oo_var, io_var = get_random_outer_outputs(input_scan_args)[0] + + # + # Convert the original model `Scan` into another `Scan` that's equivalent + # to the log-likelihood `Scan` given above. + # In other words, automatically construct the log-likelihood `Scan` based + # on the model `Scan`. + # + value_map = {Y_all: Y_obs} + test_scan_args = convert_outer_out_to_in( + input_scan_args, + [oo_var], + value_map, + inner_out_fn=create_inner_out_logp, + ) + + scan_out, updates = construct_scan(test_scan_args) + + # + # Evaluate the manually and automatically constructed log-likelihoods and + # compare. + # + res = scan_out[oo_idx].eval() + exp_res = Y_logp.eval() + + assert np.array_equal(res, exp_res) + + +def test_convert_outer_out_to_in_mit_sot(): + """Test a single replacement with `convert_outer_out_to_in`. + + This should be a single MIT-SOT replacement. + """ + + rng_state = np.random.default_rng(1234) + rng_tt = aesara.shared(rng_state, name="rng", borrow=True) + rng_tt.tag.is_rng = True + rng_tt.default_update = rng_tt + + # + # This is a very simple model with only one output, but multiple + # taps/lags. + # + def input_step_fn(y_tm1, y_tm2, rng): + y_tm1.name = "y_tm1" + y_tm2.name = "y_tm2" + return at.random.normal(y_tm1 + y_tm2, 1.0, rng=rng, name="Y_t") + + Y_rv, _ = aesara.scan( + fn=input_step_fn, + outputs_info=[ + {"initial": at.as_tensor_variable(np.r_[-1.0, 0.0]), "taps": [-1, -2]}, + ], + non_sequences=[rng_tt], + n_steps=10, + ) + + Y_rv.name = "Y_rv" + Y_all = Y_rv.owner.inputs[0] + Y_all.name = "Y_all" + + Y_obs = at.as_tensor_variable(Y_rv.eval()) + Y_obs.name = "Y_obs" + + input_scan_args = ScanArgs.from_node(Y_rv.owner.inputs[0].owner) + + # TODO FIXME: Everything below needs to be replaced with explicit asserts + # on the values in `input_scan_args` + + # + # The corresponding log-likelihood + # + def output_step_fn(y_t, y_tm1, y_tm2): + y_t.name = "y_t" + y_tm1.name = "y_tm1" + y_tm2.name = "y_tm2" + logp = logprob(at.random.normal(y_tm1 + y_tm2, 1.0), y_t) + logp.name = "logp(y_t)" + return logp + + Y_logp, _ = aesara.scan( + fn=output_step_fn, + sequences=[{"input": Y_obs, "taps": [0, -1, -2]}], + outputs_info=[{}], + ) + + # + # Get the model output variable that corresponds to the response + # `RandomVariable` + # + oo_idx, oo_var, io_var = get_random_outer_outputs(input_scan_args)[0] + + # + # Convert the original model `Scan` into another `Scan` that's equivalent + # to the log-likelihood `Scan` given above. + # In other words, automatically construct the log-likelihood `Scan` based + # on the model `Scan`. + + value_map = {Y_all: Y_obs} + test_scan_args = convert_outer_out_to_in( + input_scan_args, + [oo_var], + value_map, + inner_out_fn=create_inner_out_logp, + ) + + scan_out, updates = construct_scan(test_scan_args) + + # + # Evaluate the manually and automatically constructed log-likelihoods and + # compare. + # + res = scan_out[oo_idx].eval() + exp_res = Y_logp.eval() + + assert np.array_equal(res, exp_res) + + +@pytest.mark.parametrize( + "require_inner_rewrites", + [ + False, + True, + ], +) +def test_scan_joint_logprob(require_inner_rewrites): + srng = at.random.RandomStream() + + N_tt = at.iscalar("N") + N_val = 10 + N_tt.tag.test_value = N_val + + M_tt = at.iscalar("M") + M_val = 2 + M_tt.tag.test_value = M_val + + mus_tt = at.matrix("mus_t") + + mus_val = np.stack([np.arange(0.0, 10), np.arange(0.0, -10, -1)], axis=-1).astype( + aesara.config.floatX + ) + mus_tt.tag.test_value = mus_val + + sigmas_tt = at.ones((N_tt,)) + Gamma_rv = srng.dirichlet(at.ones((M_tt, M_tt)), name="Gamma") + + Gamma_vv = Gamma_rv.clone() + Gamma_vv.name = "Gamma_vv" + + Gamma_val = np.array([[0.5, 0.5], [0.5, 0.5]]) + Gamma_rv.tag.test_value = Gamma_val + + def scan_fn(mus_t, sigma_t, Gamma_t): + S_t = srng.categorical(Gamma_t[0], name="S_t") + + if require_inner_rewrites: + Y_t = srng.normal(mus_t, sigma_t, name="Y_t")[S_t] + else: + Y_t = srng.normal(mus_t[S_t], sigma_t, name="Y_t") + + return Y_t, S_t + + (Y_rv, S_rv), _ = aesara.scan( + fn=scan_fn, + sequences=[mus_tt, sigmas_tt], + non_sequences=[Gamma_rv], + outputs_info=[{}, {}], + strict=True, + name="scan_rv", + ) + Y_rv.name = "Y" + S_rv.name = "S" + + y_vv = Y_rv.clone() + y_vv.name = "y" + + s_vv = S_rv.clone() + s_vv.name = "s" + + y_logp = joint_logprob({Y_rv: y_vv, S_rv: s_vv, Gamma_rv: Gamma_vv}) + + y_val = np.arange(10) + s_val = np.array([0, 1, 0, 1, 1, 0, 0, 0, 1, 1]) + + test_point = { + y_vv: y_val, + s_vv: s_val, + M_tt: M_val, + N_tt: N_val, + mus_tt: mus_val, + Gamma_vv: Gamma_val, + } + + y_logp_fn = aesara.function(list(test_point.keys()), y_logp) + + assert_no_rvs(y_logp_fn.maker.fgraph.outputs[0]) + + # Construct the joint log-probability by hand so we can compare it with + # `y_logp` + def scan_fn(mus_t, sigma_t, Y_t_val, S_t_val, Gamma_t): + S_t = at.random.categorical(Gamma_t[0], name="S_t") + Y_t = at.random.normal(mus_t[S_t_val], sigma_t, name="Y_t") + Y_t_logp, S_t_logp = logprob(Y_t, Y_t_val), logprob(S_t, S_t_val) + Y_t_logp.name = "log(Y_t=y_t)" + S_t_logp.name = "log(S_t=s_t)" + return Y_t_logp, S_t_logp + + (Y_rv_logp, S_rv_logp), _ = aesara.scan( + fn=scan_fn, + sequences=[mus_tt, sigmas_tt, y_vv, s_vv], + non_sequences=[Gamma_vv], + outputs_info=[{}, {}], + strict=True, + name="scan_rv", + ) + Y_rv_logp.name = "logp(Y=y)" + S_rv_logp.name = "logp(S=s)" + + Gamma_logp = logprob(Gamma_rv, Gamma_vv) + + y_logp_ref = Y_rv_logp.sum() + S_rv_logp.sum() + Gamma_logp.sum() + + assert_no_rvs(y_logp_ref) + + y_logp_val = y_logp.eval(test_point) + + y_logp_ref_val = y_logp_ref.eval(test_point) + + assert np.allclose(y_logp_val, y_logp_ref_val) + + +@pytest.mark.xfail(reason="see #148") +@aesara.config.change_flags(compute_test_value="raise") +@pytest.mark.xfail(reason="see #148") +def test_initial_values(): + srng = at.random.RandomStream(seed=2320) + + p_S_0 = np.array([0.9, 0.1]) + S_0_rv = srng.categorical(p_S_0, name="S_0") + S_0_rv.tag.test_value = 0 + + Gamma_at = at.matrix("Gamma") + Gamma_at.tag.test_value = np.array([[0, 1], [1, 0]]) + + s_0_vv = S_0_rv.clone() + s_0_vv.name = "s_0" + + def step_fn(S_tm1, Gamma): + S_t = srng.categorical(Gamma[S_tm1], name="S_t") + return S_t + + S_1T_rv, _ = aesara.scan( + fn=step_fn, + outputs_info=[{"initial": S_0_rv, "taps": [-1]}], + non_sequences=[Gamma_at], + strict=True, + n_steps=10, + name="S_0T", + ) + + S_1T_rv.name = "S_1T" + s_1T_vv = S_1T_rv.clone() + s_1T_vv.name = "s_1T" + + logp_parts = factorized_joint_logprob({S_1T_rv: s_1T_vv, S_0_rv: s_0_vv}) + + s_0_val = 0 + s_1T_val = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1]) + Gamma_val = np.array([[0.1, 0.9], [0.9, 0.1]]) + + exp_res = np.log(p_S_0[s_0_val]) + s_prev = s_0_val + for s in s_1T_val: + exp_res += np.log(Gamma_val[s_prev, s]) + s_prev = s + + S_0T_logp = sum(v.sum() for v in logp_parts.values()) + S_0T_logp_fn = aesara.function([s_0_vv, s_1T_vv, Gamma_at], S_0T_logp) + res = S_0T_logp_fn(s_0_val, s_1T_val, Gamma_val) + + assert res == pytest.approx(exp_res) + + +@pytest.mark.parametrize("remove_asserts", (True, False)) +def test_mode_is_kept(remove_asserts): + mode = Mode().including("local_remove_all_assert") if remove_asserts else None + x, _ = aesara.scan( + fn=lambda x: at.random.normal(assert_op(x, x > 0)), + outputs_info=[at.ones(())], + n_steps=10, + mode=mode, + ) + x.name = "x" + x_vv = x.clone() + x_logp = aesara.function([x_vv], joint_logprob({x: x_vv})) + + x_test_val = np.full((10,), -1) + if remove_asserts: + assert x_logp(x=x_test_val) + else: + with pytest.raises(AssertionError): + x_logp(x=x_test_val) diff --git a/pymc/tests/logprob/test_tensor.py b/pymc/tests/logprob/test_tensor.py new file mode 100644 index 00000000000..0c261429178 --- /dev/null +++ b/pymc/tests/logprob/test_tensor.py @@ -0,0 +1,306 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import numpy as np +import pytest + +from aesara import tensor as at +from aesara.graph import RewriteDatabaseQuery +from aesara.graph.rewriting.basic import in2out +from aesara.graph.rewriting.utils import rewrite_graph +from aesara.tensor.extra_ops import BroadcastTo +from scipy import stats as st + +from pymc.logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.rewriting import logprob_rewrites_db +from pymc.logprob.tensor import naive_bcast_rv_lift + + +def test_naive_bcast_rv_lift(): + r"""Make sure `naive_bcast_rv_lift` can handle useless scalar `BroadcastTo`\s.""" + X_rv = at.random.normal() + Z_at = BroadcastTo()(X_rv, ()) + + # Make sure we're testing what we intend to test + assert isinstance(Z_at.owner.op, BroadcastTo) + + res = rewrite_graph(Z_at, custom_rewrite=in2out(naive_bcast_rv_lift), clone=False) + assert res is X_rv + + +def test_naive_bcast_rv_lift_valued_var(): + r"""Check that `naive_bcast_rv_lift` won't touch valued variables""" + + x_rv = at.random.normal(name="x") + broadcasted_x_rv = at.broadcast_to(x_rv, (2,)) + + y_rv = at.random.normal(broadcasted_x_rv, name="y") + + x_vv = x_rv.clone() + y_vv = y_rv.clone() + logp_map = factorized_joint_logprob({x_rv: x_vv, y_rv: y_vv}) + assert x_vv in logp_map + assert y_vv in logp_map + assert len(logp_map) == 2 + assert np.allclose(logp_map[x_vv].eval({x_vv: 0}), st.norm(0).logpdf(0)) + assert np.allclose(logp_map[y_vv].eval({x_vv: 0, y_vv: [0, 0]}), st.norm(0).logpdf([0, 0])) + + +def test_measurable_make_vector(): + base1_rv = at.random.normal(name="base1") + base2_rv = at.random.halfnormal(name="base2") + base3_rv = at.random.exponential(name="base3") + y_rv = at.stack((base1_rv, base2_rv, base3_rv)) + y_rv.name = "y" + + base1_vv = base1_rv.clone() + base2_vv = base2_rv.clone() + base3_vv = base3_rv.clone() + y_vv = y_rv.clone() + + ref_logp = joint_logprob({base1_rv: base1_vv, base2_rv: base2_vv, base3_rv: base3_vv}) + make_vector_logp = joint_logprob({y_rv: y_vv}, sum=False) + + base1_testval = base1_rv.eval() + base2_testval = base2_rv.eval() + base3_testval = base3_rv.eval() + y_testval = np.stack((base1_testval, base2_testval, base3_testval)) + + ref_logp_eval_eval = ref_logp.eval( + {base1_vv: base1_testval, base2_vv: base2_testval, base3_vv: base3_testval} + ) + make_vector_logp_eval = make_vector_logp.eval({y_vv: y_testval}) + + assert make_vector_logp_eval.shape == y_testval.shape + assert np.isclose(make_vector_logp_eval.sum(), ref_logp_eval_eval) + + +@pytest.mark.parametrize( + "size1, size2, axis, concatenate", + [ + ((5,), (3,), 0, True), + ((5,), (3,), -1, True), + ((5, 2), (3, 2), 0, True), + ((2, 5), (2, 3), 1, True), + ((2, 5), (2, 5), 0, False), + ((2, 5), (2, 5), 1, False), + ((2, 5), (2, 5), 2, False), + ], +) +def test_measurable_join_univariate(size1, size2, axis, concatenate): + base1_rv = at.random.normal(size=size1, name="base1") + base2_rv = at.random.exponential(size=size2, name="base2") + if concatenate: + y_rv = at.concatenate((base1_rv, base2_rv), axis=axis) + else: + y_rv = at.stack((base1_rv, base2_rv), axis=axis) + y_rv.name = "y" + + base1_vv = base1_rv.clone() + base2_vv = base2_rv.clone() + y_vv = y_rv.clone() + + base_logps = list(factorized_joint_logprob({base1_rv: base1_vv, base2_rv: base2_vv}).values()) + if concatenate: + base_logps = at.concatenate(base_logps, axis=axis) + else: + base_logps = at.stack(base_logps, axis=axis) + y_logp = joint_logprob({y_rv: y_vv}, sum=False) + + base1_testval = base1_rv.eval() + base2_testval = base2_rv.eval() + if concatenate: + y_testval = np.concatenate((base1_testval, base2_testval), axis=axis) + else: + y_testval = np.stack((base1_testval, base2_testval), axis=axis) + np.testing.assert_allclose( + base_logps.eval({base1_vv: base1_testval, base2_vv: base2_testval}), + y_logp.eval({y_vv: y_testval}), + ) + + +@pytest.mark.parametrize( + "size1, supp_size1, size2, supp_size2, axis, concatenate", + [ + (None, 2, None, 2, 0, True), + (None, 2, None, 2, -1, True), + ((5,), 2, (3,), 2, 0, True), + ((5,), 2, (3,), 2, -2, True), + ((2,), 5, (2,), 3, 1, True), + pytest.param( + (2,), + 5, + (2,), + 5, + 0, + False, + marks=pytest.mark.xfail(reason="cannot measure dimshuffled multivariate RVs"), + ), + pytest.param( + (2,), + 5, + (2,), + 5, + 1, + False, + marks=pytest.mark.xfail(reason="cannot measure dimshuffled multivariate RVs"), + ), + ], +) +def test_measurable_join_multivariate(size1, supp_size1, size2, supp_size2, axis, concatenate): + base1_rv = at.random.multivariate_normal( + np.zeros(supp_size1), np.eye(supp_size1), size=size1, name="base1" + ) + base2_rv = at.random.dirichlet(np.ones(supp_size2), size=size2, name="base2") + if concatenate: + y_rv = at.concatenate((base1_rv, base2_rv), axis=axis) + else: + y_rv = at.stack((base1_rv, base2_rv), axis=axis) + y_rv.name = "y" + + base1_vv = base1_rv.clone() + base2_vv = base2_rv.clone() + y_vv = y_rv.clone() + base_logps = [ + at.atleast_1d(logp) + for logp in factorized_joint_logprob({base1_rv: base1_vv, base2_rv: base2_vv}).values() + ] + + if concatenate: + axis_norm = np.core.numeric.normalize_axis_index(axis, base1_rv.ndim) + base_logps = at.concatenate(base_logps, axis=axis_norm - 1) + else: + axis_norm = np.core.numeric.normalize_axis_index(axis, base1_rv.ndim + 1) + base_logps = at.stack(base_logps, axis=axis_norm - 1) + y_logp = joint_logprob({y_rv: y_vv}, sum=False) + + base1_testval = base1_rv.eval() + base2_testval = base2_rv.eval() + if concatenate: + y_testval = np.concatenate((base1_testval, base2_testval), axis=axis) + else: + y_testval = np.stack((base1_testval, base2_testval), axis=axis) + np.testing.assert_allclose( + base_logps.eval({base1_vv: base1_testval, base2_vv: base2_testval}), + y_logp.eval({y_vv: y_testval}), + ) + + +def test_join_mixed_ndim_supp(): + base1_rv = at.random.normal(size=3, name="base1") + base2_rv = at.random.dirichlet(np.ones(3), name="base2") + y_rv = at.concatenate((base1_rv, base2_rv), axis=0) + + y_vv = y_rv.clone() + with pytest.raises(ValueError, match="Joined logps have different number of dimensions"): + joint_logprob({y_rv: y_vv}) + + +@aesara.config.change_flags(cxx="") +@pytest.mark.parametrize( + "ds_order", + [ + (0, 2, 1), # Swap + (2, 1, 0), # Swap + (1, 2, 0), # Swap + (0, 1, 2, "x"), # Expand + ("x", 0, 1, 2), # Expand + ( + 0, + 2, + ), # Drop + (2, 0), # Swap and drop + (2, 1, "x", 0), # Swap and expand + ("x", 0, 2), # Expand and drop + (2, "x", 0), # Swap, expand and drop + ], +) +@pytest.mark.parametrize("multivariate", (False, True)) +def test_measurable_dimshuffle(ds_order, multivariate): + if multivariate: + base_rv = at.random.dirichlet([1, 2, 3], size=(2, 1)) + else: + base_rv = at.exp(at.random.beta(1, 2, size=(2, 1, 3))) + + ds_rv = base_rv.dimshuffle(ds_order) + base_vv = base_rv.clone() + ds_vv = ds_rv.clone() + + # Remove support dimension axis from ds_order (i.e., 2, for multivariate) + if multivariate: + logp_ds_order = [o for o in ds_order if o == "x" or o < 2] + else: + logp_ds_order = ds_order + + ref_logp = joint_logprob({base_rv: base_vv}, sum=False).dimshuffle(logp_ds_order) + + # Disable local_dimshuffle_rv_lift to test fallback Aeppl rewrite + ir_rewriter = logprob_rewrites_db.query( + RewriteDatabaseQuery(include=["basic"]).excluding("dimshuffle_lift") + ) + ds_logp = joint_logprob({ds_rv: ds_vv}, sum=False, ir_rewriter=ir_rewriter) + assert ds_logp is not None + + ref_logp_fn = aesara.function([base_vv], ref_logp) + ds_logp_fn = aesara.function([ds_vv], ds_logp) + + base_test_value = base_rv.eval() + ds_test_value = at.constant(base_test_value).dimshuffle(ds_order).eval() + + np.testing.assert_array_equal(ref_logp_fn(base_test_value), ds_logp_fn(ds_test_value)) + + +def test_unmeargeable_dimshuffles(): + # Test that graphs with DimShuffles that cannot be lifted/merged fail + + # Initial support axis is at axis=-1 + x = at.random.dirichlet( + np.ones((3,)), + size=(4, 2), + ) + # Support axis is now at axis=-2 + y = x.dimshuffle((0, 2, 1)) + # Downstream dimshuffle will not be lifted through cumsum. If it ever is, + # we will need a different measurable Op example + z = at.cumsum(y, axis=-2) + # Support axis is now at axis=-3 + w = z.dimshuffle((1, 0, 2)) + + w_vv = w.clone() + # TODO: Check that logp is correct if this type of graphs is ever supported + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({w: w_vv}) diff --git a/pymc/tests/logprob/test_transforms.py b/pymc/tests/logprob/test_transforms.py new file mode 100644 index 00000000000..1e599900fa8 --- /dev/null +++ b/pymc/tests/logprob/test_transforms.py @@ -0,0 +1,741 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest +import scipy as sp +import scipy.special + +from aesara.graph.basic import equal_computations +from aesara.graph.fg import FunctionGraph +from numdifftools import Jacobian + +from pymc.logprob.joint_logprob import factorized_joint_logprob, joint_logprob +from pymc.logprob.transforms import ( + DEFAULT_TRANSFORM, + ChainedTransform, + ExpTransform, + IntervalTransform, + LocTransform, + LogOddsTransform, + LogTransform, + RVTransform, + ScaleTransform, + TransformValuesMapping, + TransformValuesRewrite, + _default_transformed_rv, + transformed_variable, +) +from pymc.tests.helpers import assert_no_rvs + + +class DirichletScipyDist: + def __init__(self, alphas): + self.alphas = alphas + + def rvs(self, size=None, random_state=None): + if size is None: + size = () + samples_shape = tuple(np.atleast_1d(size)) + self.alphas.shape + samples = np.empty(samples_shape) + alphas_bcast = np.broadcast_to(self.alphas, samples_shape) + + for index in np.ndindex(*samples_shape[:-1]): + samples[index] = random_state.dirichlet(alphas_bcast[index]) + + return samples + + def logpdf(self, value): + res = np.sum( + scipy.special.xlogy(self.alphas - 1, value) - scipy.special.gammaln(self.alphas), + axis=-1, + ) + scipy.special.gammaln(np.sum(self.alphas, axis=-1)) + return res + + +@pytest.mark.parametrize( + "at_dist, dist_params, sp_dist, size", + [ + (at.random.uniform, (0, 1), sp.stats.uniform, ()), + ( + at.random.pareto, + (1.5, 10.5), + lambda b, scale: sp.stats.pareto(b, scale=scale), + (), + ), + ( + at.random.triangular, + (1.5, 3.0, 10.5), + lambda lower, mode, upper: sp.stats.triang( + (mode - lower) / (upper - lower), loc=lower, scale=upper - lower + ), + (), + ), + ( + at.random.halfnormal, + (0, 1), + sp.stats.halfnorm, + (), + ), + pytest.param( + at.random.wald, + (1.5, 10.5), + lambda mean, scale: sp.stats.invgauss(mean / scale, scale=scale), + (), + marks=pytest.mark.xfail( + reason="We don't use Aesara's Wald operator", + raises=NotImplementedError, + ), + ), + ( + at.random.exponential, + (1.5,), + lambda mu: sp.stats.expon(scale=mu), + (), + ), + pytest.param( + at.random.lognormal, + (-1.5, 10.5), + lambda mu, sigma: sp.stats.lognorm(s=sigma, loc=0, scale=np.exp(mu)), + (), + ), + ( + at.random.lognormal, + (-1.5, 1.5), + lambda mu, sigma: sp.stats.lognorm(s=sigma, scale=np.exp(mu)), + (), + ), + ( + at.random.halfcauchy, + (1.5, 10.5), + lambda alpha, beta: sp.stats.halfcauchy(loc=alpha, scale=beta), + (), + ), + ( + at.random.gamma, + (1.5, 10.5), + lambda alpha, inv_beta: sp.stats.gamma(alpha, scale=1.0 / inv_beta), + (), + ), + ( + at.random.invgamma, + (1.5, 10.5), + lambda alpha, beta: sp.stats.invgamma(alpha, scale=beta), + (), + ), + ( + at.random.chisquare, + (1.5,), + lambda df: sp.stats.chi2(df), + (), + ), + pytest.param( + at.random.weibull, + (1.5,), + lambda c: sp.stats.weibull_min(c), + (), + marks=pytest.mark.xfail( + reason="We don't use Aesara's Weibull operator", + raises=NotImplementedError, + ), + ), + ( + at.random.beta, + (1.5, 1.5), + lambda alpha, beta: sp.stats.beta(alpha, beta), + (), + ), + ( + at.random.vonmises, + (1.5, 10.5), + lambda mu, kappa: sp.stats.vonmises(kappa, loc=mu), + (), + ), + ( + at.random.dirichlet, + (np.array([0.7, 0.3]),), + lambda alpha: sp.stats.dirichlet(alpha), + (), + ), + ( + at.random.dirichlet, + (np.array([[0.7, 0.3], [0.9, 0.1]]),), + lambda alpha: DirichletScipyDist(alpha), + (), + ), + pytest.param( + at.random.dirichlet, + (np.array([0.3, 0.7]),), + lambda alpha: DirichletScipyDist(alpha), + (3, 2), + ), + ], +) +def test_transformed_logprob(at_dist, dist_params, sp_dist, size): + """ + This test takes a `RandomVariable` type, plus parameters, and uses it to + construct a variable ``a`` that's used in the graph ``b = + at.random.normal(a, 1.0)``. The transformed log-probability is then + computed for ``b``. We then test that the log-probability of ``a`` is + properly transformed, as well as any instances of ``a`` that are used + elsewhere in the graph (i.e. in ``b``), by comparing the graph for the + transformed log-probability with the SciPy-derived log-probability--using a + numeric approximation to the Jacobian term. + """ + + a = at_dist(*dist_params, size=size) + a.name = "a" + a_value_var = at.tensor(a.dtype, shape=(None,) * a.ndim) + a_value_var.name = "a_value" + + b = at.random.normal(a, 1.0) + b.name = "b" + b_value_var = b.clone() + b_value_var.name = "b_value" + + transform_rewrite = TransformValuesRewrite({a_value_var: DEFAULT_TRANSFORM}) + res = joint_logprob({a: a_value_var, b: b_value_var}, extra_rewrites=transform_rewrite) + + test_val_rng = np.random.RandomState(3238) + + logp_vals_fn = aesara.function([a_value_var, b_value_var], res) + + a_trans_op = _default_transformed_rv(a.owner.op, a.owner).op + transform = a_trans_op.transform + + a_forward_fn = aesara.function([a_value_var], transform.forward(a_value_var, *a.owner.inputs)) + a_backward_fn = aesara.function([a_value_var], transform.backward(a_value_var, *a.owner.inputs)) + log_jac_fn = aesara.function( + [a_value_var], + transform.log_jac_det(a_value_var, *a.owner.inputs), + on_unused_input="ignore", + ) + + for i in range(10): + a_dist = sp_dist(*dist_params) + a_val = a_dist.rvs(size=size, random_state=test_val_rng).astype(a_value_var.dtype) + b_dist = sp.stats.norm(a_val, 1.0) + b_val = b_dist.rvs(random_state=test_val_rng).astype(b_value_var.dtype) + + a_trans_value = a_forward_fn(a_val) + + if a_val.ndim > 0: + + def jacobian_estimate_novec(value): + + dim_diff = a_val.ndim - value.ndim # pylint: disable=cell-var-from-loop + if dim_diff > 0: + # Make sure the dimensions match the expected input + # dimensions for the compiled backward transform function + def a_backward_fn_(x): + x_ = np.expand_dims(x, axis=list(range(dim_diff))) + return a_backward_fn(x_).squeeze() + + else: + a_backward_fn_ = a_backward_fn + + jacobian_val = Jacobian(a_backward_fn_)(value) + + n_missing_dims = jacobian_val.shape[0] - jacobian_val.shape[1] + if n_missing_dims > 0: + missing_bases = np.eye(jacobian_val.shape[0])[..., -n_missing_dims:] + jacobian_val = np.concatenate([jacobian_val, missing_bases], axis=-1) + + return np.linalg.slogdet(jacobian_val)[-1] + + jacobian_estimate = np.vectorize(jacobian_estimate_novec, signature="(n)->()") + + exp_log_jac_val = jacobian_estimate(a_trans_value) + else: + jacobian_val = np.atleast_2d(sp.misc.derivative(a_backward_fn, a_trans_value, dx=1e-6)) + exp_log_jac_val = np.linalg.slogdet(jacobian_val)[-1] + + log_jac_val = log_jac_fn(a_trans_value) + np.testing.assert_almost_equal(exp_log_jac_val, log_jac_val, decimal=4) + + exp_logprob_val = a_dist.logpdf(a_val).sum() + exp_logprob_val += exp_log_jac_val.sum() + exp_logprob_val += b_dist.logpdf(b_val).sum() + + logprob_val = logp_vals_fn(a_trans_value, b_val) + + np.testing.assert_almost_equal(exp_logprob_val, logprob_val, decimal=4) + + +@pytest.mark.parametrize("use_jacobian", [True, False]) +def test_simple_transformed_logprob_nojac(use_jacobian): + X_rv = at.random.halfnormal(0, 3, name="X") + x_vv = X_rv.clone() + x_vv.name = "x" + + transform_rewrite = TransformValuesRewrite({x_vv: DEFAULT_TRANSFORM}) + tr_logp = joint_logprob( + {X_rv: x_vv}, extra_rewrites=transform_rewrite, use_jacobian=use_jacobian + ) + + assert np.isclose( + tr_logp.eval({x_vv: np.log(2.5)}), + sp.stats.halfnorm(0, 3).logpdf(2.5) + (np.log(2.5) if use_jacobian else 0.0), + ) + + +@pytest.mark.parametrize("ndim", (0, 1)) +def test_fallback_log_jac_det(ndim): + """ + Test fallback log_jac_det in RVTransform produces correct the graph for a + simple transformation: x**2 -> -log(2*x) + """ + + class SquareTransform(RVTransform): + name = "square" + + def forward(self, value, *inputs): + return at.power(value, 2) + + def backward(self, value, *inputs): + return at.sqrt(value) + + square_tr = SquareTransform() + + value = at.TensorType("float64", (None,) * ndim)("value") + value_tr = square_tr.forward(value) + log_jac_det = square_tr.log_jac_det(value_tr) + + test_value = np.full((2,) * ndim, 3) + expected_log_jac_det = -np.log(6) * test_value.size + assert np.isclose(log_jac_det.eval({value: test_value}), expected_log_jac_det) + + +def test_hierarchical_uniform_transform(): + """ + This model requires rv-value replacements in the backward transformation of + the value var `x` + """ + + lower_rv = at.random.uniform(0, 1, name="lower") + upper_rv = at.random.uniform(9, 10, name="upper") + x_rv = at.random.uniform(lower_rv, upper_rv, name="x") + + lower = lower_rv.clone() + upper = upper_rv.clone() + x = x_rv.clone() + + transform_rewrite = TransformValuesRewrite( + { + lower: DEFAULT_TRANSFORM, + upper: DEFAULT_TRANSFORM, + x: DEFAULT_TRANSFORM, + } + ) + logp = joint_logprob( + {lower_rv: lower, upper_rv: upper, x_rv: x}, + extra_rewrites=transform_rewrite, + ) + + assert_no_rvs(logp) + assert not np.isinf(logp.eval({lower: -10, upper: 20, x: -20})) + + +def test_nondefault_transforms(): + loc_rv = at.random.uniform(-10, 10, name="loc") + scale_rv = at.random.uniform(-1, 1, name="scale") + x_rv = at.random.normal(loc_rv, scale_rv, name="x") + + loc = loc_rv.clone() + scale = scale_rv.clone() + x = x_rv.clone() + + transform_rewrite = TransformValuesRewrite( + { + loc: None, + scale: LogOddsTransform(), + x: LogTransform(), + } + ) + + logp = joint_logprob( + {loc_rv: loc, scale_rv: scale, x_rv: x}, + extra_rewrites=transform_rewrite, + ) + + # Check numerical evaluation matches with expected transforms + loc_val = 0 + scale_val_tr = -1 + x_val_tr = -1 + + scale_val = sp.special.expit(scale_val_tr) + x_val = np.exp(x_val_tr) + + exp_logp = 0 + exp_logp += sp.stats.uniform(-10, 20).logpdf(loc_val) + exp_logp += sp.stats.uniform(-1, 2).logpdf(scale_val) + exp_logp += np.log(scale_val) + np.log1p(-scale_val) # logodds log_jac_det + exp_logp += sp.stats.norm(loc_val, scale_val).logpdf(x_val) + exp_logp += x_val_tr # log log_jac_det + + assert np.isclose( + logp.eval({loc: loc_val, scale: scale_val_tr, x: x_val_tr}), + exp_logp, + ) + + +def test_default_transform_multiout(): + r"""Make sure that `Op`\s with multiple outputs are handled correctly.""" + + # This SVD value is necessarily `1`, but it's generated by an `Op` with + # multiple outputs and no default output. + sd = at.linalg.svd(at.eye(1))[1][0] + x_rv = at.random.normal(0, sd, name="x") + x = x_rv.clone() + + transform_rewrite = TransformValuesRewrite({x: DEFAULT_TRANSFORM}) + + logp = joint_logprob( + {x_rv: x}, + extra_rewrites=transform_rewrite, + ) + + assert np.isclose( + logp.eval({x: 1}), + sp.stats.norm(0, 1).logpdf(1), + ) + + +def test_nonexistent_default_transform(): + """ + Test that setting `DEFAULT_TRANSFORM` to a variable that has no default + transform does not fail + """ + x_rv = at.random.normal(name="x") + x = x_rv.clone() + + transform_rewrite = TransformValuesRewrite({x: DEFAULT_TRANSFORM}) + + logp = joint_logprob( + {x_rv: x}, + extra_rewrites=transform_rewrite, + ) + + assert np.isclose( + logp.eval({x: 1}), + sp.stats.norm(0, 1).logpdf(1), + ) + + +def test_TransformValuesMapping(): + x = at.vector() + fg = FunctionGraph(outputs=[x]) + + tvm = TransformValuesMapping({}) + fg.attach_feature(tvm) + + tvm2 = TransformValuesMapping({}) + fg.attach_feature(tvm2) + + assert fg._features[-1] is tvm + + +def test_original_values_output_dict(): + """ + Test that the original unconstrained value variable appears an the key of + the logprob factor + """ + p_rv = at.random.beta(1, 1, name="p") + p_vv = p_rv.clone() + + tr = TransformValuesRewrite({p_vv: DEFAULT_TRANSFORM}) + logp_dict = factorized_joint_logprob({p_rv: p_vv}, extra_rewrites=tr) + + assert p_vv in logp_dict + + +def test_mixture_transform(): + """Make sure that non-`RandomVariable` `MeasurableVariable`s can be transformed. + + This test is specific to `MixtureRV`, which is derived from an `OpFromGraph`. + """ + + I_rv = at.random.bernoulli(0.5, name="I") + Y_1_rv = at.random.beta(100, 1, name="Y_1") + Y_2_rv = at.random.beta(1, 100, name="Y_2") + + # A `MixtureRV`, which is an `OpFromGraph` subclass, will replace this + # `at.stack` in the graph + Y_rv = at.stack([Y_1_rv, Y_2_rv])[I_rv] + Y_rv.name = "Y" + + i_vv = I_rv.clone() + i_vv.name = "i" + y_vv = Y_rv.clone() + y_vv.name = "y" + + logp_no_trans = joint_logprob( + {Y_rv: y_vv, I_rv: i_vv}, + ) + + transform_rewrite = TransformValuesRewrite({y_vv: LogTransform()}) + + with pytest.warns(None) as record: + # This shouldn't raise any warnings + logp_trans = joint_logprob( + {Y_rv: y_vv, I_rv: i_vv}, + extra_rewrites=transform_rewrite, + use_jacobian=False, + ) + + assert not record.list + + # The untransformed graph should be the same as the transformed graph after + # replacing the `Y_rv` value variable with a transformed version of itself + logp_nt_fg = FunctionGraph(outputs=[logp_no_trans], clone=False) + y_trans = transformed_variable(at.exp(y_vv), y_vv) + y_trans.name = "y_log" + logp_nt_fg.replace(y_vv, y_trans) + logp_nt = logp_nt_fg.outputs[0] + + assert equal_computations([logp_nt], [logp_trans]) + + +def test_invalid_interval_transform(): + x_rv = at.random.normal(0, 1) + x_vv = x_rv.clone() + + msg = "Both edges of IntervalTransform cannot be None" + tr = IntervalTransform(lambda *inputs: (None, None)) + with pytest.raises(ValueError, match=msg): + tr.forward(x_vv, *x_rv.owner.inputs) + + tr = IntervalTransform(lambda *inputs: (None, None)) + with pytest.raises(ValueError, match=msg): + tr.backward(x_vv, *x_rv.owner.inputs) + + tr = IntervalTransform(lambda *inputs: (None, None)) + with pytest.raises(ValueError, match=msg): + tr.log_jac_det(x_vv, *x_rv.owner.inputs) + + +def test_chained_transform(): + loc = 5 + scale = 0.1 + + ch = ChainedTransform( + transform_list=[ + ScaleTransform( + transform_args_fn=lambda *inputs: at.constant(scale), + ), + ExpTransform(), + LocTransform( + transform_args_fn=lambda *inputs: at.constant(loc), + ), + ], + base_op=at.random.multivariate_normal, + ) + + x = at.random.multivariate_normal(np.zeros(3), np.eye(3)) + x_val = x.eval() + + x_val_forward = ch.forward(x_val, *x.owner.inputs).eval() + assert np.allclose( + x_val_forward, + np.exp(x_val * scale) + loc, + ) + + x_val_backward = ch.backward(x_val_forward, *x.owner.inputs, scale, loc).eval() + assert np.allclose( + x_val_backward, + x_val, + ) + + log_jac_det = ch.log_jac_det(x_val_forward, *x.owner.inputs, scale, loc) + assert np.isclose( + log_jac_det.eval(), + -np.log(scale) - np.sum(np.log(x_val_forward - loc)), + ) + + +def test_exp_transform_rv(): + base_rv = at.random.normal(0, 1, size=2, name="base_rv") + y_rv = at.exp(base_rv) + y_rv.name = "y" + + y_vv = y_rv.clone() + logp = joint_logprob({y_rv: y_vv}, sum=False) + logp_fn = aesara.function([y_vv], logp) + + y_val = [0.1, 0.3] + np.testing.assert_allclose( + logp_fn(y_val), + sp.stats.lognorm(s=1).logpdf(y_val), + ) + + +def test_log_transform_rv(): + base_rv = at.random.lognormal(0, 1, size=2, name="base_rv") + y_rv = at.log(base_rv) + y_rv.name = "y" + + y_vv = y_rv.clone() + logp = joint_logprob({y_rv: y_vv}, sum=False) + logp_fn = aesara.function([y_vv], logp) + + y_val = [0.1, 0.3] + np.testing.assert_allclose( + logp_fn(y_val), + sp.stats.norm().logpdf(y_val), + ) + + +@pytest.mark.parametrize( + "rv_size, loc_type", + [ + (None, at.scalar), + (2, at.vector), + ((2, 1), at.col), + ], +) +def test_loc_transform_rv(rv_size, loc_type): + + loc = loc_type("loc") + y_rv = loc + at.random.normal(0, 1, size=rv_size, name="base_rv") + y_rv.name = "y" + y_vv = y_rv.clone() + + logp = joint_logprob({y_rv: y_vv}, sum=False) + assert_no_rvs(logp) + logp_fn = aesara.function([loc, y_vv], logp) + + loc_test_val = np.full(rv_size, 4.0) + y_test_val = np.full(rv_size, 1.0) + + np.testing.assert_allclose( + logp_fn(loc_test_val, y_test_val), + sp.stats.norm(loc_test_val, 1).logpdf(y_test_val), + ) + + +@pytest.mark.parametrize( + "rv_size, scale_type", + [ + (None, at.scalar), + (1, at.TensorType("floatX", (True,))), + ((2, 3), at.matrix), + ], +) +def test_scale_transform_rv(rv_size, scale_type): + + scale = scale_type("scale") + y_rv = at.random.normal(0, 1, size=rv_size, name="base_rv") * scale + y_rv.name = "y" + y_vv = y_rv.clone() + + logp = joint_logprob({y_rv: y_vv}, sum=False) + assert_no_rvs(logp) + logp_fn = aesara.function([scale, y_vv], logp) + + scale_test_val = np.full(rv_size, 4.0) + y_test_val = np.full(rv_size, 1.0) + + np.testing.assert_allclose( + logp_fn(scale_test_val, y_test_val), + sp.stats.norm(0, scale_test_val).logpdf(y_test_val), + ) + + +def test_transformed_rv_and_value(): + y_rv = at.random.halfnormal(-1, 1, name="base_rv") + 1 + y_rv.name = "y" + y_vv = y_rv.clone() + + transform_rewrite = TransformValuesRewrite({y_vv: LogTransform()}) + + logp = joint_logprob({y_rv: y_vv}, extra_rewrites=transform_rewrite) + assert_no_rvs(logp) + logp_fn = aesara.function([y_vv], logp) + + y_test_val = -5 + + assert np.isclose( + logp_fn(y_test_val), + sp.stats.halfnorm(0, 1).logpdf(np.exp(y_test_val)) + y_test_val, + ) + + +def test_loc_transform_multiple_rvs_fails1(): + x_rv1 = at.random.normal(name="x_rv1") + x_rv2 = at.random.normal(name="x_rv2") + y_rv = x_rv1 + x_rv2 + + y = y_rv.clone() + + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({y_rv: y}) + + +def test_nested_loc_transform_multiple_rvs_fails2(): + x_rv1 = at.random.normal(name="x_rv1") + x_rv2 = at.cos(at.random.normal(name="x_rv2")) + y_rv = x_rv1 + x_rv2 + + y = y_rv.clone() + + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({y_rv: y}) + + +def test_discrete_rv_unary_transform_fails(): + y_rv = at.exp(at.random.poisson(1)) + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({y_rv: y_rv.clone()}) + + +def test_discrete_rv_multinary_transform_fails(): + y_rv = 5 + at.random.poisson(1) + with pytest.raises(RuntimeError, match="could not be derived"): + joint_logprob({y_rv: y_rv.clone()}) + + +@pytest.mark.xfail(reason="Check not implemented yet, see #51") +def test_invalid_broadcasted_transform_rv_fails(): + loc = at.vector("loc") + y_rv = loc + at.random.normal(0, 1, size=2, name="base_rv") + y_rv.name = "y" + y_vv = y_rv.clone() + + logp = joint_logprob({y_rv: y_vv}) + logp.eval({y_vv: [0, 0, 0, 0], loc: [0, 0, 0, 0]}) + assert False, "Should have failed before" diff --git a/pymc/tests/logprob/test_utils.py b/pymc/tests/logprob/test_utils.py new file mode 100644 index 00000000000..afed18a4aec --- /dev/null +++ b/pymc/tests/logprob/test_utils.py @@ -0,0 +1,193 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import aesara +import aesara.tensor as at +import numpy as np +import pytest + +from aesara import function +from aesara.compile import get_default_mode +from aesara.tensor.random.basic import normal, uniform + +from pymc.logprob.abstract import MeasurableVariable, logprob +from pymc.logprob.utils import ( + ParameterValueError, + dirac_delta, + rvs_to_value_vars, + walk_model, +) +from pymc.tests.helpers import assert_no_rvs +from pymc.tests.logprob.utils import create_aesara_params, scipy_logprob_tester + + +def test_walk_model(): + d = at.vector("d") + b = at.vector("b") + c = uniform(0.0, d) + c.name = "c" + e = at.log(c) + a = normal(e, b) + a.name = "a" + + test_graph = at.exp(a + 1) + res = list(walk_model((test_graph,))) + assert a in res + assert c not in res + + res = list(walk_model((test_graph,), walk_past_rvs=True)) + assert a in res + assert c in res + + res = list(walk_model((test_graph,), walk_past_rvs=True, stop_at_vars={e})) + assert a in res + assert c not in res + + +def test_rvs_to_value_vars(): + + a = at.random.uniform(0.0, 1.0) + a.name = "a" + a.tag.value_var = a_value_var = a.clone() + + b = at.random.uniform(0, a + 1.0) + b.name = "b" + b.tag.value_var = b_value_var = b.clone() + + c = at.random.normal() + c.name = "c" + c.tag.value_var = c_value_var = c.clone() + + d = at.log(c + b) + 2.0 + + initial_replacements = {b: b_value_var, c: c_value_var} + (res,), replaced = rvs_to_value_vars((d,), initial_replacements=initial_replacements) + + assert res.owner.op == at.add + log_output = res.owner.inputs[0] + assert log_output.owner.op == at.log + log_add_output = res.owner.inputs[0].owner.inputs[0] + assert log_add_output.owner.op == at.add + c_output = log_add_output.owner.inputs[0] + + # We make sure that the random variables were replaced + # with their value variables + assert c_output == c_value_var + b_output = log_add_output.owner.inputs[1] + assert b_output == b_value_var + + # There shouldn't be any `RandomVariable`s in the resulting graph + assert_no_rvs(res) + + res_ancestors = list(walk_model((res,), walk_past_rvs=True)) + + assert b_value_var in res_ancestors + assert c_value_var in res_ancestors + assert a_value_var not in res_ancestors + + +def test_rvs_to_value_vars_intermediate_rv(): + """Test that function replaces values above an intermediate RV.""" + a = at.random.uniform(0.0, 1.0) + a.name = "a" + a.tag.value_var = a_value_var = a.clone() + + b = at.random.uniform(0, a + 1.0) + b.name = "b" + b.tag.value_var = b.clone() + + c = at.random.normal() + c.name = "c" + c.tag.value_var = c_value_var = c.clone() + + d = at.log(c + b) + 2.0 + + initial_replacements = {a: a_value_var, c: c_value_var} + (res,), replaced = rvs_to_value_vars((d,), initial_replacements=initial_replacements) + + # Assert that the only RandomVariable that remains in the graph is `b` + res_ancestors = list(walk_model((res,), walk_past_rvs=True)) + + assert ( + len( + list(n for n in res_ancestors if n.owner and isinstance(n.owner.op, MeasurableVariable)) + ) + == 1 + ) + + assert c_value_var in res_ancestors + assert a_value_var in res_ancestors + + +def test_CheckParameter(): + mu = at.constant(0) + sigma = at.scalar("sigma") + x_rv = at.random.normal(mu, sigma, name="x") + x_vv = at.constant(0) + x_logp = logprob(x_rv, x_vv) + + x_logp_fn = function([sigma], x_logp) + with pytest.raises(ParameterValueError, match="sigma > 0"): + x_logp_fn(-1) + + +def test_dirac_delta(): + fn = aesara.function( + [], dirac_delta(at.as_tensor(1)), mode=get_default_mode().excluding("useless") + ) + with pytest.warns(UserWarning, match=".*DiracDelta.*"): + assert np.array_equal(fn(), 1) + + +@pytest.mark.parametrize( + "dist_params, obs", + [ + ((np.array(0, dtype=np.float64),), np.array([0, 0.5, 1, -1], dtype=np.float64)), + ((np.array([0, 0], dtype=np.int64),), np.array(0, dtype=np.int64)), + ], +) +def test_dirac_delta_logprob(dist_params, obs): + + dist_params_at, obs_at, _ = create_aesara_params(dist_params, obs, ()) + dist_params = dict(zip(dist_params_at, dist_params)) + + x = dirac_delta(*dist_params_at) + + @np.vectorize + def scipy_logprob(obs, c): + return 0.0 if obs == c else -np.inf + + scipy_logprob_tester(x, obs, dist_params, test_fn=scipy_logprob) diff --git a/pymc/tests/logprob/utils.py b/pymc/tests/logprob/utils.py new file mode 100644 index 00000000000..6ed3348cb3c --- /dev/null +++ b/pymc/tests/logprob/utils.py @@ -0,0 +1,174 @@ +# Copyright 2022- The PyMC Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# MIT License +# +# Copyright (c) 2021-2022 aesara-devs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numpy as np + +from aesara import tensor as at +from aesara.graph.basic import walk +from aesara.graph.op import HasInnerGraph +from scipy import stats as stats + +from pymc.logprob.abstract import MeasurableVariable, icdf, logcdf, logprob + + +def assert_no_rvs(var): + """Assert that there are no `MeasurableVariable` nodes in a graph.""" + + def expand(r): + owner = r.owner + if owner: + inputs = list(reversed(owner.inputs)) + + if isinstance(owner.op, HasInnerGraph): + inputs += owner.op.inner_outputs + + return inputs + + for v in walk([var], expand, False): + if v.owner and isinstance(v.owner.op, MeasurableVariable): + raise AssertionError(f"Variable {v} is a MeasurableVariable") + + +def simulate_poiszero_hmm( + N, mu=10.0, pi_0_a=np.r_[1, 1], p_0_a=np.r_[5, 1], p_1_a=np.r_[1, 1], seed=None +): + rng = np.random.default_rng(seed) + + p_0 = rng.dirichlet(p_0_a) + p_1 = rng.dirichlet(p_1_a) + + Gammas = np.stack([p_0, p_1]) + Gammas = np.broadcast_to(Gammas, (N,) + Gammas.shape) + + pi_0 = rng.dirichlet(pi_0_a) + s_0 = rng.choice(pi_0.shape[0], p=pi_0) + s_tm1 = s_0 + + y_samples = np.empty((N,), dtype=np.int64) + s_samples = np.empty((N,), dtype=np.int64) + + for i in range(N): + s_t = rng.choice(Gammas.shape[-1], p=Gammas[i, s_tm1]) + s_samples[i] = s_t + s_tm1 = s_t + + if s_t == 1: + y_samples[i] = rng.poisson(mu) + else: + y_samples[i] = 0 + + sample_point = { + "Y_t": y_samples, + "p_0": p_0, + "p_1": p_1, + "S_t": s_samples, + "P_tt": Gammas, + "S_0": s_0, + "pi_0": pi_0, + } + + return sample_point + + +def scipy_logprob(obs, p): + if p.ndim > 1: + if p.ndim > obs.ndim: + obs = obs[((None,) * (p.ndim - obs.ndim) + (Ellipsis,))] + elif p.ndim < obs.ndim: + p = p[((None,) * (obs.ndim - p.ndim) + (Ellipsis,))] + + pattern = (p.ndim - 1,) + tuple(range(p.ndim - 1)) + return np.log(np.take_along_axis(p.transpose(pattern), obs, 0)) + else: + return np.log(p[obs]) + + +def create_aesara_params(dist_params, obs, size): + dist_params_at = [] + for p in dist_params: + p_aet = at.as_tensor(p).type() + p_aet.tag.test_value = p + dist_params_at.append(p_aet) + + size_at = [] + for s in size: + s_aet = at.iscalar() + s_aet.tag.test_value = s + size_at.append(s_aet) + + obs_at = at.as_tensor(obs).type() + obs_at.tag.test_value = obs + + return dist_params_at, obs_at, size_at + + +def scipy_logprob_tester( + rv_var, obs, dist_params, test_fn=None, check_broadcastable=True, test="logprob" +): + """Test for correspondence between `RandomVariable` and NumPy shape and + broadcast dimensions. + """ + if test_fn is None: + name = getattr(rv_var.owner.op, "name", None) + + if name is None: + name = rv_var.__name__ + + test_fn = getattr(stats, name) + + if test == "logprob": + aesara_res = logprob(rv_var, at.as_tensor(obs)) + elif test == "logcdf": + aesara_res = logcdf(rv_var, at.as_tensor(obs)) + elif test == "icdf": + aesara_res = icdf(rv_var, at.as_tensor(obs)) + else: + raise ValueError(f"test must be one of (logprob, logcdf, icdf), got {test}") + + aesara_res_val = aesara_res.eval(dist_params) + + numpy_res = np.asarray(test_fn(obs, *dist_params.values())) + + assert aesara_res.type.numpy_dtype.kind == numpy_res.dtype.kind + + if check_broadcastable: + numpy_shape = np.shape(numpy_res) + numpy_bcast = [s == 1 for s in numpy_shape] + np.testing.assert_array_equal(aesara_res.type.broadcastable, numpy_bcast) + + np.testing.assert_array_equal(aesara_res_val.shape, numpy_res.shape) + + np.testing.assert_array_almost_equal(aesara_res_val, numpy_res, 4) diff --git a/pymc/tests/sampling/test_forward.py b/pymc/tests/sampling/test_forward.py index 59063c6326a..1cfbd69050d 100644 --- a/pymc/tests/sampling/test_forward.py +++ b/pymc/tests/sampling/test_forward.py @@ -331,7 +331,7 @@ def test_lkj_cholesky_cov(self): def test_non_random_model_variable(self): with pm.Model() as model: # A user may register non-pure RandomVariables that can nevertheless be - # sampled, as long as a custom logprob is dispatched or Aeppl can infer + # sampled, as long as a custom logprob is dispatched or we can infer # its logprob (which is the case for `clip`) y = at.clip(pm.Normal.dist(), -1, 1) y = model.register_rv(y, name="y") diff --git a/pymc/tests/test_aesaraf.py b/pymc/tests/test_aesaraf.py index f627d932faa..6524df1810b 100644 --- a/pymc/tests/test_aesaraf.py +++ b/pymc/tests/test_aesaraf.py @@ -22,7 +22,6 @@ import pytest import scipy.sparse as sps -from aeppl.logprob import ParameterValueError from aesara.compile.builders import OpFromGraph from aesara.graph.basic import Variable, equal_computations from aesara.tensor.random.basic import normal, uniform @@ -48,6 +47,7 @@ from pymc.distributions.distribution import SymbolicRandomVariable from pymc.distributions.transforms import Interval from pymc.exceptions import NotConstantValueError +from pymc.logprob.utils import ParameterValueError from pymc.tests.helpers import assert_no_rvs from pymc.vartypes import int_types diff --git a/pymc/tests/test_model.py b/pymc/tests/test_model.py index e03cd4507b6..c020bdd90b8 100644 --- a/pymc/tests/test_model.py +++ b/pymc/tests/test_model.py @@ -29,7 +29,6 @@ import scipy.sparse as sps import scipy.stats as st -from aeppl.transforms import IntervalTransform from aesara.graph import graph_inputs from aesara.tensor import TensorVariable from aesara.tensor.random.op import RandomVariable @@ -44,6 +43,7 @@ from pymc.distributions.logprob import _joint_logp from pymc.distributions.transforms import log from pymc.exceptions import ImputationWarning, ShapeError, ShapeWarning +from pymc.logprob.transforms import IntervalTransform from pymc.model import Point, ValueGradFunction, modelcontext from pymc.tests.helpers import SeededTest from pymc.tests.models import simple_model diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e2cc87cf08..8c368174dc1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,6 @@ # This file is auto-generated by scripts/generate_pip_deps_from_conda.py, do not modify. # See that file for comments about the need/usage of each dependency. -aeppl==0.0.38 aesara==2.8.7 arviz>=0.13.0 cachetools>=4.2.1 @@ -13,6 +12,7 @@ ipython>=7.16 jupyter-sphinx mypy==0.990 myst-nb +numdifftools>=0.9.40 numpy>=1.15.0 numpydoc pandas>=0.24.0 diff --git a/requirements.txt b/requirements.txt index c99dea2315a..abe5b314074 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -aeppl==0.0.38 aesara==2.8.7 arviz>=0.13.0 cachetools>=4.2.1 diff --git a/scripts/run_mypy.py b/scripts/run_mypy.py index f34ad7d7a5b..a4ae8a39202 100644 --- a/scripts/run_mypy.py +++ b/scripts/run_mypy.py @@ -45,6 +45,8 @@ pymc/gp/gp.py pymc/gp/mean.py pymc/gp/util.py +pymc/logprob/__init__.py +pymc/logprob/abstract.py pymc/math.py pymc/ode/__init__.py pymc/ode/ode.py @@ -190,10 +192,6 @@ def check_no_unexpected_results(mypy_lines: Iterator[str]): if __name__ == "__main__": - # Enforce PEP 561 for some important dependencies that - # have relevant type hints but don't tell that to mypy. - enforce_pep561("aeppl") - parser = argparse.ArgumentParser(description="Run mypy type checks on PyMC codebase.") parser.add_argument( "--verbose", action="count", default=0, help="Pass this to print mypy output."