From c47c52dc82aae412057e91b556363717d647e8bd Mon Sep 17 00:00:00 2001 From: vcantarella Date: Tue, 17 Sep 2024 15:07:30 +0200 Subject: [PATCH] improving code clarity and cleaning up the code + improvements in CI --- .github/workflows/python-package-conda.yml | 16 +- LICENCE | 3 +- README.md | 17 +- examples/.gitingnore | 2 + examples/ammer/ammer_notebook.ipynb | 18 +- examples/ammer/ammer_notebook_hyvrmodel.ipynb | 2 +- examples/ammer/ammer_v2606.py | 40 +++- examples/ammer/fromto_units.csv | 50 +++++ examples/ammer/gaussian_kernel_regression.py | 52 ++--- examples/ammer/loc.csv | 13 ++ examples/channels/test_gauss_surface.ipynb | 39 ---- examples/demonstration/model_progression.py | 189 ------------------ src/HyVR_fork.egg-info/SOURCES.txt | 1 - src/hyvr/objects/channel.py | 20 +- src/hyvr/objects/sheet.py | 12 +- src/hyvr/objects/trough.py | 19 +- src/hyvr/postprocess/plotting.py | 83 -------- src/hyvr/tools.py | 43 +++- src/hyvr/utils.py | 160 +++++++++++++-- tests/test_surface.py | 44 ++-- 20 files changed, 355 insertions(+), 468 deletions(-) create mode 100644 examples/.gitingnore create mode 100644 examples/ammer/fromto_units.csv create mode 100644 examples/ammer/loc.csv delete mode 100644 examples/channels/test_gauss_surface.ipynb delete mode 100644 examples/demonstration/model_progression.py delete mode 100644 src/hyvr/postprocess/plotting.py diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index c8dc052..4e2e24c 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -24,15 +24,13 @@ jobs: - name: Lint with ruff run: | pip install ruff - ruff check # Lint all files in the current directory (and any subdirectories). ruff format # Format all files in the current directory (and any subdirectories). + ruff check # Lint all files in the current directory (and any subdirectories). - name: Test with pytest run: | - conda install pytest - pip install pytest-html - pytest - - name: Coverage tests - run: | - pip install coverage - NUMBA_DISABLE_JIT=1 coverage run -m pytest - coverage report -m + pip install pytest pytest-cov + NUMBA_DISABLE_JIT=1 pytest --cov + - name: Upload results to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/LICENCE b/LICENCE index 911ea4a..88e6a34 100644 --- a/LICENCE +++ b/LICENCE @@ -1,6 +1,7 @@ MIT License -Copyright (c) 2017 +Copyright(c) 2023 Vitor Cantarella +Copyright(c) for portions of the HyVR module are held by Jeremy Bennett, 2017 and Samuel Schorr, 2018 as part of the original repository hyvr 2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index aedaacd..487dc8a 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,9 @@ -# Status +# HyVR - simple -# Introduction - -**HyVR** - -This is a fork from the original HyVR package with minimal implementation of -the core features the idea to keep the original ideas alive, -but maintainable in the fast python development ecosystem. -Thus, most of the non-essencial functionality, that is nowadays done much better in other packages, -has been dropped. The functionality is now focus on the creation of geobodies to grids. - -Eveything that could be done before is still doable, and more. But the user should understand a bit more about -operating on arrays and using python. +[![Build Status]!(https://github.com/vcantarella/hyvr/actions/workflows/python-package-conda.yml/badge.svg?event=push)] +[![codecov](https://codecov.io/github/vcantarella/hyvr/graph/badge.svg?token=QWGCQVEJ3G)](https://codecov.io/github/vcantarella/hyvr) +This is a fork from the original HyVR package with minimal implementation of the core features the idea to keep the original ideas alive, but maintainable in the fast python development ecosystem. The Hydrogeological Virtual Reality simulation package (HyVR) is a Python module that helps researchers and practitioners generate sedimentary subsurface models with diff --git a/examples/.gitingnore b/examples/.gitingnore new file mode 100644 index 0000000..08d660e --- /dev/null +++ b/examples/.gitingnore @@ -0,0 +1,2 @@ +ammer/* +channels/* \ No newline at end of file diff --git a/examples/ammer/ammer_notebook.ipynb b/examples/ammer/ammer_notebook.ipynb index e97749a..1057988 100644 --- a/examples/ammer/ammer_notebook.ipynb +++ b/examples/ammer/ammer_notebook.ipynb @@ -22,10 +22,22 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ImportError", + "evalue": "cannot import name 'trough' from 'hyvr' (c:\\Users\\vcant\\miniconda3\\envs\\hyvr\\Lib\\site-packages\\hyvr\\__init__.py)", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[1], line 4\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhyvr\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m specsim_surface \u001b[38;5;66;03m# used for the surface creation\u001b[39;00m\n\u001b[0;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhyvr\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m channel \u001b[38;5;66;03m# channel object creation\u001b[39;00m\n\u001b[1;32m----> 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhyvr\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m trough \u001b[38;5;66;03m# trough creation\u001b[39;00m\n\u001b[0;32m 5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mscipy\u001b[39;00m \u001b[38;5;66;03m# general scientific calculations\u001b[39;00m\n\u001b[0;32m 6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mflopy\u001b[39;00m \u001b[38;5;66;03m# our modelling interface\u001b[39;00m\n", + "\u001b[1;31mImportError\u001b[0m: cannot import name 'trough' from 'hyvr' (c:\\Users\\vcant\\miniconda3\\envs\\hyvr\\Lib\\site-packages\\hyvr\\__init__.py)" + ] + } + ], "source": [ "from hyvr.tools import ferguson_curve # used for the channel curve creation\n", - "from hyvr.tools import surface_gauss_regression # used for the surface creation\n", + "from hyvr.tools import specsim_surface # used for the surface creation\n", "from hyvr import channel # channel object creation\n", "from hyvr import trough # trough creation\n", "import scipy # general scientific calculations\n", @@ -711,7 +723,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/examples/ammer/ammer_notebook_hyvrmodel.ipynb b/examples/ammer/ammer_notebook_hyvrmodel.ipynb index d2410f7..7d0f4c1 100644 --- a/examples/ammer/ammer_notebook_hyvrmodel.ipynb +++ b/examples/ammer/ammer_notebook_hyvrmodel.ipynb @@ -1744,7 +1744,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.11.7" } }, "nbformat": 4, diff --git a/examples/ammer/ammer_v2606.py b/examples/ammer/ammer_v2606.py index c92c586..df941ac 100644 --- a/examples/ammer/ammer_v2606.py +++ b/examples/ammer/ammer_v2606.py @@ -16,6 +16,7 @@ import flopy # our modelling interface import numpy as np # general numerical functions and array manipulation from hyvr.utils import specsim +from hyvr.tools import specsim_surface import numba # %% [markdown] @@ -63,7 +64,7 @@ ncol=ncol, delr=dely, delc=delx, - top=7.0, + top=H, botm=np.arange(H - delz, 0 - delz, -delz), ) @@ -115,21 +116,40 @@ # # %% # Define the top and bottom of the model as the top and bottom of the sequence. Assign the unassigned cells to the facies 4 (background facies). + np.random.seed(37893) -mean_top = H - 1.93 -var_top = 6.59e-2 -corl_top = np.array([2.0e2, 6e2]) +mean_top = H - 1.86 +var_top = 0.7 +corl_top = np.array([70, 792]) surf_top = specsim(X[0, :, :], Y[0, :, :], mean=mean_top, var=var_top, corl=corl_top) -mean_botm = H - 5.82 -var_botm = 0.67 -corl_botm = np.array([200, 600]) +mean_botm = H - 8.3 +var_botm = .9 +corl_botm = np.array([300, 900]) surf_botm = specsim( - X[0, :, :], Y[0, :, :], mean=mean_botm, var=var_botm, corl=corl_botm + X[0, :, :], Y[0, :, :], mean=mean_botm, var=var_botm, corl=corl_top ) - +import matplotlib.pyplot as plt +fig, ax = plt.subplots(figsize=(12, 6)) +ax.plot( X[0, int(nrow/2), :],surf_top[int(nrow/2),:], label="Top") +ax.plot(X[0, int(nrow/2), :],surf_botm[int(nrow/2),:], label="Botm") +ax.legend() +plt.show() # %% [markdown] +surf_top2 = specsim_surface(X[0, :, :], Y[0, :, :], mean=mean_top, var=var_top, corl=corl_top) +surf_botm2 = specsim_surface(X[0, :, :], Y[0, :, :], mean=mean_botm, var=var_botm, corl=corl_top) +import matplotlib.pyplot as plt +fig, ax = plt.subplots(figsize=(12, 6)) +ax.plot( X[0, int(nrow/2), :],surf_top[int(nrow/2),:], label="Top") +ax.plot( X[0, int(nrow/2), :],surf_top2[int(nrow/2),:], label="Top2") +ax.plot(X[0, int(nrow/2), :],surf_botm[int(nrow/2),:], label="Botm") +ax.plot(X[0, int(nrow/2), :],surf_botm2[int(nrow/2),:], label="Botm2") +ax.legend() +plt.show() + + +# %% # ### Defining the sequence of thicknesses # # We will assume an average thickness of 0.7 m. The first layer in the system is modelled deterministically. It is $\approx$ 0.4 m thickness and composed with light color tufa with fossil and low organic matter content. The remaining layers are modelled probabilistically. @@ -140,7 +160,7 @@ # %% # according to answer in : https://math.stackexchange.com/questions/291174/probability-distribution-of-the-subinterval-lengths-from-a-random-interval-divis # The cumulative distribution function for intervals randomly sampled from the interval [0,a] is: -simulated_thickness = (mean_top - 0) - 0.4 +simulated_thickness = (mean_top) - 1 n = 8 print(f"With the number of layers: {n}") F = lambda t: 1 - ((simulated_thickness - t) / simulated_thickness) ** (n - 1) diff --git a/examples/ammer/fromto_units.csv b/examples/ammer/fromto_units.csv new file mode 100644 index 0000000..fffbcca --- /dev/null +++ b/examples/ammer/fromto_units.csv @@ -0,0 +1,50 @@ +name,from,to,units +X011,0,2.4,clay1 +X011,2.4,9.2,gravel +X012,0,2,clay1 +X012,2,10,tufa +X012,10,10.8,clay2 +X012,10.8,14,gravel +X013,0,0.8,clay1 +X013,0.8,9.6,tufa +X013,9.6,10.7,clay2 +X013,10.7,12,gravel +X014,0,2.3,clay1 +X014,2.3,4.5,tufa +X014,4.5,12,clay2 +X014,12,14,gravel +X015,0,2.3,clay1 +X015,2.3,9.95,tufa +X015,10.3,14,gravel +X017,0,2,clay1 +X017,2,8.6,tufa +X017,8.6,9.5,clay2 +X017,9.5,12,gravel +X018,0,10,clay1 +X018,10,11.75,gravel +X018,11.75,12,marge +X019,0,1.5,clay1 +X019,1.5,4.55,tufa +X019,4.55,8.1,clay2 +X019,8.1,14,gravel +X020,0,2.1,clay1 +X020,2.1,9,tufa +X020,9,10.85,clay2 +X020,10.85,13.65,gravel +X020,13.65,14,marge +X022,0,1.7,clay1 +X022,1.7,9,tufa +X022,9,10.65,clay2 +X022,10.65,14,gravel +X024,0,1.9,clay1 +X024,1.9,9,tufa +X024,9,10.65,clay2 +X024,10.65,14,gravel +X030,0,2,clay1 +X030,2,8.8,tufa +X030,8.8,10,clay2 +X030,10,16.5,gravel +X037,0,2.5,clay1 +X037,2.5,8.2,tufa +X037,8.2,9.4,clay2 +X037,8.2,12.5,gravel diff --git a/examples/ammer/gaussian_kernel_regression.py b/examples/ammer/gaussian_kernel_regression.py index cc86a3e..fd50539 100644 --- a/examples/ammer/gaussian_kernel_regression.py +++ b/examples/ammer/gaussian_kernel_regression.py @@ -1,25 +1,32 @@ import george import numpy as np +import pandas as pd from george.kernels import ExpSquaredKernel import scipy +loc = pd.read_csv("examples/ammer/loc.csv") +fromto = pd.read_csv("examples/ammer/fromto_units.csv") +fromto = fromto[fromto["units"] == "tufa"] +# merge loc and fromto +loc = loc.merge(fromto, on="name") -var = 2**2 -corr_lengths = np.array([1000, 2000]) +#top_botm = np.loadtxt("examples/ammer/top_botm.csv",delimiter=",", skiprows=1,usecols=(1,2,3,4)) +top = loc["z"].to_numpy() - loc["from"].to_numpy() +bottom = loc["z"].to_numpy() - loc["to"].to_numpy() +x_y = loc[["x", "y"]].to_numpy() -kernel = var*ExpSquaredKernel(corr_lengths, ndim=2, ) +var = 0.2**2 +corr_lengths = np.array([70, 120]) + +kernel = 10*np.var(top)*ExpSquaredKernel(corr_lengths, ndim=2, ) print(kernel.get_parameter_names()) print(kernel.get_parameter_vector()) np.exp(kernel.get_parameter_vector()) -top_botm = np.loadtxt("examples/ammer/top_botm.csv",delimiter=",", skiprows=1,usecols=(1,2,3,4)) -top = top_botm[:,2] -bottom = top_botm[:,3] -x_y = top_botm[:,0:2] gp_top = george.GP(kernel, mean=np.mean(top), fit_mean=False, - white_noise=np.log(0.5**2), fit_white_noise=False) + white_noise=0.2**2, fit_white_noise=False) gp_top.compute(x_y) print(gp_top.log_likelihood(top)) print(gp_top.grad_log_likelihood(top)) @@ -40,27 +47,28 @@ def grad_nll_top(p): # Run the optimization routine. p0 = gp_top.get_parameter_vector() -results = scipy.optimize.minimize(nll_top, p0, jac=grad_nll_top, method="L-BFGS-B") - +results = scipy.optimize.minimize(nll_top, p0, jac=grad_nll_top, method="L-BFGS-B", tol = 1e-5) +print(results) # Update the kernel and print the final log-likelihood. gp_top.set_parameter_vector(results.x) +gp_top.compute(x_y) print(gp_top.log_likelihood(top)) -gp_top.get_parameter_names() +print(gp_top.get_parameter_names()) params = gp_top.get_parameter_vector() -params = np.concatenate([np.array([params[0]]), np.exp(params[1:])]) -params +print(np.exp(params)) +print(np.sqrt(np.exp(params)[1:3])) - -kernelb = var*ExpSquaredKernel(corr_lengths, ndim=2, ) +corr_b = np.array([200, 1000]) +kernelb = var*ExpSquaredKernel(corr_b, ndim=2, ) kernelb.get_parameter_names() kernelb.get_parameter_vector() -gp_bottom = george.GP(kernel, mean=np.mean(bottom), fit_mean=False, - white_noise=np.log(0.5**2), fit_white_noise=False) +gp_bottom = george.GP(kernelb, mean=np.mean(bottom), fit_mean=False, + white_noise=1**2, fit_white_noise=False) gp_bottom.compute(x_y) print(gp_bottom.log_likelihood(bottom)) print(gp_bottom.grad_log_likelihood(bottom)) @@ -81,7 +89,7 @@ def grad_nll_bottom(p): # Run the optimization routine. p0 = gp_bottom.get_parameter_vector() -results = scipy.optimize.minimize(nll_bottom, p0, jac=grad_nll_bottom, method="L-BFGS-B", tol = 1e-6) +results = scipy.optimize.minimize(nll_bottom, p0, jac=grad_nll_bottom, method="L-BFGS-B", tol = 1e-12) # Update the kernel and print the final log-likelihood. gp_bottom.set_parameter_vector(results.x) @@ -90,8 +98,6 @@ def grad_nll_bottom(p): gp_bottom.get_parameter_names() paramsb = gp_bottom.get_parameter_vector() kern_pars = np.exp(paramsb) -corr_lb = np.sqrt(kern_pars) -corr_lb -paramsb = np.concatenate([np.array([paramsb[0]]), np.exp(paramsb[1:])]) -paramsb - +print(kern_pars) +print(np.sqrt(kern_pars[1:3])) +mean_top = np.mean(top) diff --git a/examples/ammer/loc.csv b/examples/ammer/loc.csv new file mode 100644 index 0000000..a176328 --- /dev/null +++ b/examples/ammer/loc.csv @@ -0,0 +1,13 @@ +name,x,y,z +X011,3500466.23,5375816.52,340.83 +X012,3499758.76,5375662.68,341.62 +X013,3499553.94,5375898.53,342.38 +X014,3499121,5376120,344.22 +X015,3499092.76,5375723.18,343.3 +X017,3498622.48,5375995.81,344.46 +X018,3498166.26,5375706.61,347.08 +X019,3498078.02,5375935.71,346.44 +X020,3499479.44,5375593.82,342.22 +X022,3499472.76,5375563.75,342.19 +X024,3499466.75,5375533.16,342.23 +X030,3498948.341,5375919.338,343.776 diff --git a/examples/channels/test_gauss_surface.ipynb b/examples/channels/test_gauss_surface.ipynb deleted file mode 100644 index 5dfd46f..0000000 --- a/examples/channels/test_gauss_surface.ipynb +++ /dev/null @@ -1,39 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flopy-env-win", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/examples/demonstration/model_progression.py b/examples/demonstration/model_progression.py deleted file mode 100644 index 8fa8d29..0000000 --- a/examples/demonstration/model_progression.py +++ /dev/null @@ -1,189 +0,0 @@ -import flopy -import numpy as np -import scipy -from hyvr.objects.channel import channel -from hyvr.objects.trough import trough -from hyvr.tools import ferguson_curve - -# Model creation: -name = "ammer_simplified" -ws = "." -sim = flopy.mf6.MFSimulation( - sim_name=name, - exe_name="mf6", - version="mf6", - sim_ws=ws, -) -# Simulation time: -tdis = flopy.mf6.ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=1, perioddata=[(1.0, 1, 1.0)] -) -# Nam file -model_nam_file = "{}.nam".format(name) -# Groundwater flow object: -gwf = flopy.mf6.ModflowGwf( - sim, - modelname=name, - model_nam_file=model_nam_file, - save_flows=True, -) -# Grid properties: -Lx = 200 # problem lenght [m] -Ly = 100 # problem width [m] -H = 5 # aquifer height [m] -delx = 1 # block size x direction -dely = 1 # block size y direction -delz = 0.2 # block size z direction -nlay = int(H / delz) -ncol = int(Lx / delx) # number of columns -nrow = int(Ly / dely) # number of layers - -# Flopy Discretizetion Objects (DIS) -dis = flopy.mf6.ModflowGwfdis( - gwf, - xorigin=0.0, - yorigin=0.0, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=dely, - delc=delx, - top=7.0, - botm=np.arange(H - delz, 0 - delz, -delz), -) - -# Flopy initial Conditions -h0 = 18 -start = h0 * np.ones((nlay, nrow, ncol)) -ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=start) - -# Node property flow -k = 1e-5 # Model conductivity in m/s -npf = flopy.mf6.ModflowGwfnpf( - gwf, - icelltype=1, # This we define the model as convertible (water table aquifer) - k=k, -) -# boundary conditions: -grid = gwf.modelgrid - -centers = grid.xyzcellcenters - -X = centers[0] -Y = centers[1] -Z = centers[2] - -X = np.broadcast_to(X, Z.shape) -Y = np.broadcast_to(Y, Z.shape) - - -# print(channel_curve_1) -# fig,ax = plt.subplots() -# ax.plot(channel_curve_1[0],channel_curve_1[1]) -# fig.show() - -# building the layers: -# according to answer in : https://math.stackexchange.com/questions/291174/probability-distribution-of-the-subinterval-lengths-from-a-random-interval-divis -# The cumulative distribution function for intervals randomly sampled from the interval [0,a] is: -simulated_thickness = 5 -n = np.floor(simulated_thickness / 0.6) -F = lambda t: 1 - ((simulated_thickness - t) / simulated_thickness) ** (n - 1) -F_ = lambda t, p: F(t) - p -thicks = [] -sum_thick = 0.0 -while sum_thick < simulated_thickness: - p = np.random.uniform(0, 1) - sol = scipy.optimize.root(F_, 0.1, args=(p,)) - thick = sol.x[0] - if thick < 0.3: - thick = 0.3 - sum_thick += thick - thicks.append(thick) - -print(sum_thick) - -thicks = [thick for thick in thicks if (thick < simulated_thickness)] - -# We are simulating the facies: -##TUFA: -# T8+T9: PEAT -# T11: gravel -colors = { - 1: "#FCF2A8", - 2: "#cec47f", - 4: "#c9c186", - 5: "#7e7954", - 6: "#b0b468", - 7: "#c8b23d", - 8: "#323021", - 11: "#b7b7b7", -} - -facies_tufa = np.array([2, 3, 4, 5], dtype=np.int32) -facies = np.empty_like(Z, dtype=np.int32) -z_0 = 0.0 -layer = 0 -for thick in thicks: - # peat lenses: - peat = 0.0 - # peat lenses - while peat < 0.15: - x_c = np.random.uniform(0, 200) - y_c = np.random.uniform(0, 100) - z_c = z_0 + thick + np.random.uniform(-0.2, 0) - a = np.random.uniform(20, 50) - b = np.random.uniform(15, 25) - azim = np.random.uniform(60, 120) - if thick > 0.6: - peat_depth = 0.6 - else: - peat_depth = thick - c = peat_depth - - facies_trough, dip_dir_trough, dip_trough = trough( - X, - Y, - Z, - center_coords=np.array([x_c, y_c, z_c]), - dims=np.array([a, b, c]), - azim=azim, - facies=np.array([8]), - ) - facies[facies_trough != -1] = facies_trough[facies_trough != -1] - logic_peat = (Z >= z_0) & (Z <= z_0 + thick) - peat = np.sum(facies[logic_peat] == 8) / np.sum(logic_peat) - print(peat) - # channels - channel_curve_1 = ferguson_curve( - h=0.1, - k=np.pi / 200, - eps_factor=(np.pi / 1.5) ** 2, - flow_angle=0.0, - s_max=400, - xstart=0, - ystart=25, - ) - y_shift_1 = np.random.uniform(-25, 25) - channel_1 = np.c_[channel_curve_1[0], channel_curve_1[1] + y_shift_1] - depth = np.random.uniform(0.5, 0.6) - channel_f, channel_dip_dir, channel_dip = channel( - X, - Y, - Z, - z_top=z_0 + thick, - curve=channel_1, - parabola_pars=np.array([4, depth]), - facies=np.array([11]), - ) - facies[channel_f != -1] = channel_f[channel_f != -1] - # resetting z_0: - z_0 += thick - - vtk = flopy.export.vtk.Vtk(model=gwf, modelgrid=grid) - vtk.add_array(facies, "facies") - vtk.write(f"ammer_lay{layer}") - layer += 1 - - -# final_layer! -facies[Z > simulated_thickness] = 1 # T1 facies diff --git a/src/HyVR_fork.egg-info/SOURCES.txt b/src/HyVR_fork.egg-info/SOURCES.txt index 5c932dd..0b17f81 100644 --- a/src/HyVR_fork.egg-info/SOURCES.txt +++ b/src/HyVR_fork.egg-info/SOURCES.txt @@ -14,7 +14,6 @@ src/hyvr/objects/channel.py src/hyvr/objects/sheet.py src/hyvr/objects/trough.py src/hyvr/postprocess/__init__.py -src/hyvr/postprocess/plotting.py tests/test_angles.py tests/test_surface.py tests/test_volume.py \ No newline at end of file diff --git a/src/hyvr/objects/channel.py b/src/hyvr/objects/channel.py index cbbfabf..4c88ba8 100644 --- a/src/hyvr/objects/channel.py +++ b/src/hyvr/objects/channel.py @@ -25,7 +25,8 @@ def channel( Assigns a channel to the grid points x,y,z. The channel is defined by a curve, which represents the trajectory of the channel and a parabola, which defines the cross section. - Besides, it may have internal structure. + Besides, it may have internal structure (Not currently implemented). + params: --- f_array: ndarray(int32) of the facies values at the coordinates (x,y,z) @@ -81,12 +82,11 @@ def channel( y_curve = curve[:, 1] xy_dist[filter_zone], idx_curve[filter_zone] = min_distance(x_curve, y_curve, P) logic_xy = np.full(x.size, False) - logic_xy[filter_zone] = xy_dist[filter_zone]**2 <= ( + logic_xy[filter_zone] = xy_dist[filter_zone] ** 2 <= ( width**2 / 4 + width**2 * dz[filter_zone] / (4 * depth) ) # From the HyVR documentation. logic_inside = logic_xy - #facies_output = np.ones_like(logic_inside, dtype=np.int32) * (-1) if internal_layering: # distance between neighbouring points: dif = np.diff(np.ascontiguousarray(curve[:, 0:2])) ** 2 @@ -96,10 +96,11 @@ def channel( srqt_dif = np.ravel(srqt_dif) dist_curve = np.concatenate((np.zeros(1), srqt_dif)) # gradient values: - vx = curve[:, 2] - vy = curve[:, 3] + # vx = curve[:, 2] + # vy = curve[:, 3] # azimuth from inverse distance weighted velocity - azim = np.where(logic_inside, np.arctan2(vy, vx) / np.pi * 180, -1) + # dip and azimuth for the channel internal features is not currently implemented + # azim = np.where(logic_inside, np.arctan2(vy, vx) / np.pi * 180, -1) dip = np.radians(dip) # create facies array: curve_length = np.sum(dist_curve) @@ -119,19 +120,12 @@ def channel( for i in numba.prange(idx_curve.shape[0]): d_grid[i] = d[idx_curve[i]] ns_grid[i] = ns[idx_curve[i]] - # print(np.max(ns)) - # print(self.object_facies_array.shape) - facies = np.array([facies_array[n] for n in ns_grid]) f_array.ravel()[logic_inside] = facies else: f_array.ravel()[logic_inside] = np.repeat(facies[0], np.sum(logic_inside)) dip_array.ravel()[logic_inside] = np.repeat(0.0, np.sum(logic_inside)) dip_dir_array.ravel()[logic_inside] = np.repeat(0.0, np.sum(logic_inside)) - #dip_output = np.where(logic_inside, 0.0, np.nan) - #dip_direction = np.where(logic_inside, 0.0, np.nan) f_array = np.reshape(f_array, x.shape) dip_array = np.reshape(dip_array, x.shape) dip_dir_array = np.reshape(dip_dir_array, x.shape) - - # return facies_output, dip_output, dip_direction diff --git a/src/hyvr/objects/sheet.py b/src/hyvr/objects/sheet.py index 5231734..671a102 100644 --- a/src/hyvr/objects/sheet.py +++ b/src/hyvr/objects/sheet.py @@ -50,7 +50,7 @@ def sheet( dip_dir: dip direction in degrees of the internal dipping layers. Leave the default value for massive structure. follows the mathematical convention, anticlockwise from east layer_dist: perpendicular to dip distance between layers - + Modified arrays: --- f_array: ndarray(int32) of the facies values at the coordinates (x,y,z) @@ -59,17 +59,12 @@ def sheet( """ true_array_x = (x >= xmin) & (x <= xmax) true_array_y = (y >= ymin) & (y <= ymax) - # if len(bottom_surface.shape) != len(z.shape): - # bottom_surface = np.broadcast_to(bottom_surface, z.shape) - # if len(top_surface.shape) != len(z.shape): - # top_surface = np.broadcast_to(top_surface, z.shape) - true_array_z = (z >= np.broadcast_to(bottom_surface, z.shape)) & ( z <= np.broadcast_to(top_surface, z.shape) ) true_array = true_array_z & true_array_y & true_array_x true_array = np.ravel(true_array) - #facies_output = np.ones(x.size, dtype=np.int32) * (-1) + # facies_output = np.ones(x.size, dtype=np.int32) * (-1) if internal_layering: normal_vector = normal_plane_from_dip_dip_dir(dip, dip_dir) xcenter = xmin + (xmax - xmin) / 2 @@ -99,9 +94,6 @@ def sheet( dip_dir = coterminal_angle(dip_dir) dip_array.ravel()[true_array] = np.repeat(dip, np.sum(true_array)) dip_dir_array.ravel()[true_array] = np.repeat(dip_dir, np.sum(true_array)) - #dip = np.repeat(dip, x.size) - #dip_direction = np.repeat(dip_dir, x.size) f_array = np.reshape(f_array, x.shape) dip_array = np.reshape(dip_array, x.shape) dip_dir_array = np.reshape(dip_dir_array, x.shape) - # return facies_output, dip, dip_direction diff --git a/src/hyvr/objects/trough.py b/src/hyvr/objects/trough.py index 4e73caa..8eeca59 100644 --- a/src/hyvr/objects/trough.py +++ b/src/hyvr/objects/trough.py @@ -83,27 +83,17 @@ def half_ellipsoid( & (z >= zmin) & (z <= zmax) ) - - # copy for later - original_shape = x.shape # modifying the reference so we calculate on less points: gross_limit_logic = np.ravel(gross_limit_logic) x = x.ravel()[gross_limit_logic] y = y.ravel()[gross_limit_logic] z = z.ravel()[gross_limit_logic] - # gross_limit_logic = np.ravel(gross_limit_logic) - # # To decide whether the point is inside, we can just use the normalized - # # distance. Therefore, we first calculate the normalized distance vector - - # l2 = normalized_dx ** 2 + normalized_dy ** 2 + normalized_dz ** 2 - # logic = l2 < 1 logic = is_point_inside_ellipsoid(x, y, z, x_c, y_c, z_c, a, b, c, alpha) - # # To ensure the semi-ellipse shape, we cut the ellipse in half by assuming a negative distance: + # To ensure the semi-ellipse shape, we cut the ellipse in half by assuming a negative distance: dz = z - z_c logic = logic & (np.ravel(dz) <= 0) - # print(np.sum(logic)) if np.sum(logic) == 0: # return empty dataset: print("No points inside the ellipsoid") return @@ -116,14 +106,11 @@ def half_ellipsoid( dip_output, dip_dir_output, norm_distance = dip_dip_dir_bulbset( x_e, y_e, z_e, x_c, y_c, z_c, a, b, c, alpha, dip ) - #dip_output = np.where(logic, dip_output, np.nan) - #dip_dir_output = np.where(logic, dip_dir_output, np.nan) if internal_layering: n_layers = np.int32(np.ceil(np.max(norm_distance) * c / layer_dist)) ns = (np.floor((norm_distance) * c / layer_dist)).astype(np.int32) facies_array = get_alternating_facies(facies, n_layers, alternating_facies) facies_output = np.array([facies_array[n] for n in ns]) - else: facies_output = np.repeat(facies, np.sum(logic)) else: # no bulbset @@ -149,14 +136,12 @@ def half_ellipsoid( ns = np.floor(plane_dist / layer_dist) + (n_layers // 2) ns = ns.astype(np.int32) facies_output = np.array([facies_array[n] for n in ns]) - else: facies_output = np.repeat(facies, np.sum(logic)) dip = np.deg2rad(dip) dip_dir = coterminal_angle(dip_dir) dip_output = np.repeat(dip, np.sum(logic)) dip_dir_output = np.repeat(dip_dir, np.sum(logic)) - # reshaping final arrays and assigning values assignment_f = np.zeros(np.sum(gross_limit_logic), dtype=np.int32) assignment_f[logic] = facies_output @@ -170,5 +155,3 @@ def half_ellipsoid( assignment_dip_dir[logic] = dip_dir_output assignment_dip_dir[~logic] = dip_dir_array.ravel()[gross_limit_logic][~logic] dip_dir_array.ravel()[gross_limit_logic] = assignment_dip_dir - #Wdip_dir_array.reshape(original_shape) - #return facies_final, dip_final, dip_dir_final diff --git a/src/hyvr/postprocess/plotting.py b/src/hyvr/postprocess/plotting.py deleted file mode 100644 index 7e677c2..0000000 --- a/src/hyvr/postprocess/plotting.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Some functions to plot hyvr output. This module is for testing purposes only. -""" - -import sys -import numpy as np -import matplotlib.pyplot as plt - -def cross_section_pcolor(model, field, y=None, x=None, log=False, xlim_y=None, ylim_y=None, - xlim_x=None, ylim_x=None, cmap=None): - """ - Create a pcolor plot of a cross section of the specified field. - - Parameters - ---------- - model : Model object - field : str - The field to be shown, e.g. 'ae', 'fac', 'hat', 'k_iso', etc. - y : sequence of values or None, optional (default: None) - If a sequence of values or a single value is given, cross section at this y-value will be - plotted. - If it is None, a y-cross section through the center of the model domain will be plotted. - If it is an empty list, no y-cross section will be plotted. - x : sequence of values or None, optional (default: None) - If a sequence of values or a single value is given, cross section at this x-value will be - plotted. - If it is None, a x-cross section through the center of the model domain will be plotted. - If it is an empty list, no x-cross section will be plotted. - log : bool, optional (default: False) - Whether to plot the logarithm of the field. - xlim_y, ylim_y : lists/tuples, optional (default: None) - Axis limits for the y-cross-sections - xlim_x, ylim_x : lists/tuples, optional (default: None) - Axis limits for the x-cross-sections - cmap : matplotlib colormap, optional (default: None) - Colormap to use for the plot, e.g. 'prism'. - """ - if y is None: - y = [model.grid.y0 + model.grid.ly/2] - if x is None: - x = [model.grid.x0 + model.grid.lx/2] - if np.isscalar(y): - y = [y] - if np.isscalar(x): - x = [x] - - for yi in y: - fig, ax = plt.subplots() - y_index = int(np.round((yi-(model.grid.y0+model.grid.dy/2)/model.grid.dy))) - if 0 <= y_index and y_index < model.grid.ny: - im = ax.pcolor(model.grid.X[:,y_index,:].T, model.grid.Z[:,y_index,:].T, - model.data[field][:,y_index,:].T, cmap=cmap) - ax.set_title(field + ', y = {:.2f}'.format(yi)) - ax.set_xlabel('x') - ax.set_ylabel('z') - if xlim_y is not None: ax.set_xlim(xlim_y) - if ylim_y is not None: ax.set_ylim(ylim_y) - ax.set_aspect('equal') - fig.colorbar(im, ax=ax) - plt.show(fig) - else: - print("Warning: y = {:.2f} not in model domain".format(yi), file=sys.stderr) - - for xi in x: - fig, ax = plt.subplots() - x_index = int(np.round((xi-(model.grid.x0+model.grid.dx/2)/model.grid.dx))) - if 0 <= x_index and x_index < model.grid.nx: - im = ax.pcolor(model.grid.Y[x_index,:,:].T, model.grid.Z[x_index,:,:].T, - model.data[field][x_index,:,:].T, cmap=cmap) - ax.set_title(field + ', x = {:.2f}'.format(xi)) - ax.set_xlabel('y') - ax.set_ylabel('z') - if xlim_x is not None: ax.set_xlim(xlim_x) - if ylim_x is not None: ax.set_ylim(ylim_x) - ax.set_aspect('equal') - fig.colorbar(im, ax=ax) - plt.show(fig) - else: - print("Warning: x = {:.2f} not in model domain".format(xi), file=sys.stderr) - - - - diff --git a/src/hyvr/tools.py b/src/hyvr/tools.py index cddfa5a..dc0f36d 100644 --- a/src/hyvr/tools.py +++ b/src/hyvr/tools.py @@ -1,9 +1,10 @@ -import numpy as np import numba +import numpy as np import scipy -from .utils import ferguson_theta_ode from scipy.spatial.distance import pdist, squareform +from .utils import ferguson_theta_ode, gaussian_kernel, specsim_syn + def ferguson_curve( h: np.float64, @@ -35,7 +36,7 @@ def ferguson_curve( xstart, ystart : float Starting coordinates of the channel centerline extra_noise : float - Added extra noise for the matrix stability of the underlying Gaussian error curve + small error added to the covariance matrix to avoid singular matrix in the underlying Gaussian error curve Returns ------- @@ -91,6 +92,40 @@ def ferguson_curve( return outputs[:, 0], outputs[:, 1], outputs[:, 2], outputs[:, 3], outputs[:, 4] +def specsim_surface( + x: np.array, + y: np.array, + mean: np.float64, + var: np.float64, + corl: np.array, + mask=None, +): + """ + Creates gaussian random surface with mean value and variance input + with the spectral method from Dietrich & Newsam (1993). + Input: + ------------- + x,y: 2D grid of x and y points + mean: mean value + var: variance + corl: correlation lenghts (same unit as x and y) in x and y directions + mask: mask array (same dimensions as x and y) + Returns: + Z: output np.array with same dimensions as x and y and with Z values corrensponding to the surface + """ + M = np.diag(1 / corl ** 2) + if mask is not None: + x[~mask] = np.nan + y[~mask] = np.nan + coords = [x, y] + Z = specsim_syn(gaussian_kernel, coords, mean, (var, M)) + Z[~mask] = np.nan + else: + coords = [x, y] + Z = specsim_syn(gaussian_kernel, coords, mean, (var, M)) + return Z + + def contact_surface( x: np.array, y: np.array, @@ -190,4 +225,4 @@ def distance(x, y, x_t, y_t): mean = cov_test.T @ L_y + mean # Variance prediction # to be implemented v = scipy.linalg.solve_triangular(L, cov_test, lower=True) - return np.reshape(mean, shape) \ No newline at end of file + return np.reshape(mean, shape) diff --git a/src/hyvr/utils.py b/src/hyvr/utils.py index 78a3b26..71b481c 100644 --- a/src/hyvr/utils.py +++ b/src/hyvr/utils.py @@ -1,9 +1,12 @@ +from typing import List + import numba import numpy as np import numpy.typing as npt import scipy + @numba.njit() def normal_plane_from_dip_dip_dir(dip: float, dip_dir: float) -> npt.ArrayLike: """ @@ -256,6 +259,11 @@ def sign(x: float): return (x >= 0) - (x < 0) +@numba.njit(nogil=True) +def distance(X, x, y): + return np.sqrt((X[0] - x) ** 2 + (X[1] - y) ** 2) + + @numba.jit(nopython=True, parallel=True) def min_distance(x, y, P): """ @@ -268,7 +276,6 @@ def min_distance(x, y, P): Returns min indexes and distances array. """ - distance = lambda X, x, y: np.sqrt((X[0] - x) ** 2 + (X[1] - y) ** 2) # compute distance d_array = np.zeros((P.shape[0])) glob_min_idx = np.zeros((P.shape[0])) @@ -279,7 +286,14 @@ def min_distance(x, y, P): return d_array, glob_min_idx -def ferguson_theta_ode(s_max: np.float64, eps_factor: np.float64, k:np.float64, h:np.float64, omega:np.float64, err:np.float64): +def ferguson_theta_ode( + s_max: np.float64, + eps_factor: np.float64, + k: np.float64, + h: np.float64, + omega: np.float64, + err: np.float64 = 1e-8, +): """ Implementation of (Ferguson, 1976, Eq.9). The equation is formulated as an initial value problem and integrated with scipy function for integration (solve_ivp) @@ -291,6 +305,7 @@ def ferguson_theta_ode(s_max: np.float64, eps_factor: np.float64, k:np.float64, h: Height eps_factor: Random background noise (normal variance) omega: initial angle + err: small error added to the covariance matrix to avoid singular matrix Returns: theta : angle array @@ -311,8 +326,7 @@ def ferguson_theta_ode(s_max: np.float64, eps_factor: np.float64, k:np.float64, u = np.random.multivariate_normal( np.zeros_like(s_range), np.eye((s_range).shape[0]) ) - epsilon = 1e-8 - cov = cov + epsilon * np.eye(cov.shape[0]) + cov = cov + err * np.eye(cov.shape[0]) L = scipy.linalg.cholesky(cov) e_s = L @ u @@ -360,26 +374,39 @@ def jac(t, y, k, h): return theta, s, x, y, tau -def R_1(s,s_arr,curv_arr,k_1,Cf,W,D,Omega = -1, F = 2.5): + +def R_1(s, s_arr, curv_arr, k_1, Cf, W, D, Omega=-1, F=2.5): # interpolate to find the value of tau at s tau = np.interp(s, s_arr, curv_arr) - Ro = k_1 *tau * W + Ro = k_1 * tau * W s_prime = np.where(s_arr < s, s_arr, 0) s_prime = s_prime[s_prime != 0] - curv_prime = curv_arr[:len(s_prime)] + curv_prime = curv_arr[: len(s_prime)] sau = s - s_prime - G_sau = np.exp(-2* Cf * sau/D) + G_sau = np.exp(-2 * Cf * sau / D) Ro_prime = k_1 * curv_prime * W - integration = np.trapz(Ro_prime * G_sau, sau)/np.trapz(G_sau, sau) + integration = np.trapz(Ro_prime * G_sau, sau) / np.trapz(G_sau, sau) return Omega * Ro + F * integration -def Rs(s_arr, curv_arr, k_1, W,Cf, D, Omega = -1, F = 2.5): + +def Rs(s_arr, curv_arr, k_1, W, Cf, D, Omega=-1, F=2.5): Rs_arr = np.zeros(len(s_arr)) for i in range(len(s_arr)): Rs_arr[i] = R_1(s_arr[i], s_arr, curv_arr, k_1, Cf, W, D, Omega, F) return Rs_arr -def howard_knudson_ode(s_max, eps_factor, k, h, omega, k_1, Cf, Omega = -1, F=2.5, ): + +def howard_knudson_ode( + s_max, + eps_factor, + k, + h, + omega, + k_1, + Cf, + Omega=-1, + F=2.5, +): """ Implementation of (Ferguson, 1976, Eq.9). The equation is formulated as an initial value problem and integrated with scipy function for integration (solve_ivp) @@ -414,10 +441,6 @@ def howard_knudson_ode(s_max, eps_factor, k, h, omega, k_1, Cf, Omega = -1, F=2. L = scipy.linalg.cholesky(cov) e_s = L @ u - - - - def rhs(t, y, k, h): eps_t = np.interp(np.array([t]), s_range, e_s) eps_t = eps_t[0] # from array to float @@ -462,6 +485,97 @@ def jac(t, y, k, h): return theta, s, x, y +def gaussian_kernel(r, phi, M): + """ + Gaussian kernel function + + Parameters + ---------- + r : np.ndarray + Distance vector between two points + phi : float + Variance of the gaussian kernel + M : np.ndarray + Matrix with the correlation structure of the kernel + """ + r = np.atleast_2d(r) + # Perform the vector-matrix multiplication with einsum (r.T @ M @ r, for stacked vectors in r) + result = phi * np.exp(-0.5 * np.einsum('bi,ij,bj->b', r, M, r)) + + return result + + + +def matern_kernel(r, nu, M): + """ + Matern kernel function + + Parameters + ---------- + r : np.ndarray + Distance vector between two points + nu : float + order of the matern kernel + M : np.ndarray + Matrix with the correlation structure of the kernel + + """ + r = np.atleast_2d(r) + r2 = np.einsum('bi,ij,bj->b', r, M, r) + kv_ = scipy.special.kv(nu, np.sqrt(2 * nu * r2)) + gamma_ = ( + 2 ** (1 - nu) + / scipy.special.gamma(nu) + * (np.sqrt(2 * nu * r2)) ** nu + ) + return gamma_ * kv_ + + +def specsim_syn(kernel, coords: List[np.ndarray], mean=0.0, args=()): + """ + Generate random variables with stationary covariance function using spectral + techniques of Dietrich & Newsam (1993) + + Parameters + ---------- + coords: (x,y,z,...) : 1D, 2D or 3D np.ndarray with x,y,z, coordinates of the grid. Dimensions must match. + mean: mean value of the random field + kernel: callable + Function which computes the covariance function between two points. + the of the function must be the distance vector between the points + args: tuple + Contain the aditional parameters of the covariance function + (eg. variance, correlation structure, etc. Empty by default. + The calling signature is ``kernel(x, *args) + + Returns + ------- + Y : 1d, 2d, or 3d numpy array + Numpy array of random field given in the same dimensions of x, y and z. + """ + # calculate the distance to centre + shape = coords[0].shape + for i in range(len(coords)): + coords[i] = coords[i].ravel() - np.nanmean(coords[i]) + # calculate the kernel distance: + r = np.stack(coords) + r = r.T + ryy = kernel(r, *args) + ryy = ryy.reshape(shape) + # FFT of the kernel and calculations according to Dietrich & Newsam (1993) + ntot = ryy.size + syy = np.fft.fftn(np.fft.fftshift(ryy)) / ntot + syy = np.abs(syy) # Remove imaginary artifacts + syy[0] = 0 + real = np.random.randn(*syy.shape) + imag = np.random.randn(*syy.shape) + epsilon = real + 1j * imag + rand = epsilon * np.sqrt(syy) + Y = np.real(np.fft.ifftn(rand * ntot)) + Y = Y + mean + return Y + + # so far this function is not jitted. I plan to adapt a gaussian random function generator # using linear algebra, instead of fft.The linalg module is accessible in numba. # still buggy!! @@ -507,23 +621,26 @@ def specsim( if covmod not in ["gaussian", "exp"]: raise ValueError("covariance model must be 'gaussian' or 'exp'") if mask is None: - x_calc = x - y_calc = y + x_calc = x - np.mean(x) + y_calc = y - np.mean(y) else: x_calc = np.where(mask, x, np.nan) + x_calc = x_calc - np.nanmean(x_calc) y_calc = np.where(mask, y, np.nan) + y_calc = y_calc - np.nanmean(y_calc) two_dim = len(corl) < 3 # boolean weather calculations should be done in two or 3D if two_dim: Y = np.empty(x.shape) - h_square = 0.5*(x_calc / corl[0]) ** 2 + 0.5*(y_calc / corl[1]) ** 2 + h_square = 0.5 * (x_calc / corl[0]) ** 2 + 0.5 * (y_calc / corl[1]) ** 2 else: if mask is None: - z_calc = z + z_calc = z - np.mean(z) else: z_calc = np.where(mask, z, z_calc) + z_calc = z_calc - np.nanmean(z_calc) Y = np.empty(z.shape) - h_square = ( - 0.5 * (x_calc / corl[0]) ** 2 + 0.5* (y_calc / corl[1]) ** 2 + 0.5 * (z_calc / corl[2]) ** 2 + h_square = 0.5 * ( + (x_calc / corl[0]) ** 2 + (y_calc / corl[1]) ** 2 + (z_calc / corl[2]) ** 2 ) ntot = h_square.size # Covariance matrix of variables @@ -542,5 +659,6 @@ def specsim( epsilon = real + 1j * imag rand = epsilon * np.sqrt(syy) Y = np.real(np.fft.ifftn(rand * ntot)) + print(Y.shape) Y = Y + mean return Y diff --git a/tests/test_surface.py b/tests/test_surface.py index 3cbdcf5..1ebc8a9 100644 --- a/tests/test_surface.py +++ b/tests/test_surface.py @@ -1,5 +1,5 @@ import numpy as np -from src.hyvr.tools import surface_gauss_regression, ferguson_curve +from src.hyvr.tools import ferguson_curve, specsim_surface from src.hyvr.utils import specsim @@ -31,41 +31,25 @@ def test_specsim(): y, mean=mean, var=variance, - corl=np.array([0.01, 0.01]), + corl=np.array([100, 100]), mask=None, covmod="gaussian", ) assert Z.shape == x.shape assert np.allclose(Z.mean(), mean, atol=1e-3) - assert np.allclose(Z.var(), variance, atol=1e-3) - -def test_gauss_surface_regression(): +def test_specsim_surface(): x = np.linspace(0, 100, 1000) y = np.linspace(0, 100, 1000) x, y = np.meshgrid(x, y) - # training data: - h = 0.4 - k = np.pi / 200 - eps_factor = (np.pi / 1.5) ** 2 - x_t, y_t, vx, vy, s = ferguson_curve( - h=h, - k=k, - eps_factor=eps_factor, - flow_angle=0.0, - s_max=400, - xstart=0, - ystart=25, - extra_noise=1e-4, - ) - z_t = np.sin(s) - dataset = np.array([x_t, y_t, z_t]).T - error = np.repeat(0.3, dataset.shape[0]) - corl = np.array([10, 10]) - mean = 0.0 - variance = 1.0 - # Z = surface_gauss_regression(x, y, mean, variance, corl, dataset, error) - # assert Z.shape == x.shape - # assert Z.shape == y.shape - assert x_t.shape == y_t.shape - assert x_t.shape == vx.shape + mask = (x > 25) & (x < 75) & (y > 25) & (y < 75) + mean = 2.5 + variance = 0.05**2 + corl = np.array([100., 100.]) + Z = specsim_surface(x,y, mean, variance, corl, mask=None) + Z_masked = specsim_surface(x,y, mean, variance, corl, mask=mask) + assert Z.shape == x.shape + assert Z_masked.shape == x.shape + assert np.allclose(Z.mean(), mean, atol=1e-3) + assert np.allclose(np.nanmean(Z_masked), mean, atol=1e-3) +