diff --git a/benchmarks/eko/benchmark_evol_to_unity.py b/benchmarks/eko/benchmark_evol_to_unity.py index 24deb236c..f18d4584d 100644 --- a/benchmarks/eko/benchmark_evol_to_unity.py +++ b/benchmarks/eko/benchmark_evol_to_unity.py @@ -7,7 +7,7 @@ from eko.couplings import Couplings from eko.evolution_operator import Operator from eko.evolution_operator.grid import OperatorGrid -from eko.interpolation import InterpolatorDispatcher +from eko.interpolation import InterpolatorDispatcher, XGrid from eko.thresholds import ThresholdsAtlas # from eko.matching_conditions.operator_matrix_element import OperatorMatrixElement @@ -43,16 +43,20 @@ class BenchmarkBackwardForward: operators_card = { "Q2grid": [10], # here you need a very dense grid - "interpolation_xgrid": np.linspace(1e-1, 1, 30), - # "interpolation_xgrid": make_grid(30,30, x_min=1e-3), - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": False, - "debug_skip_non_singlet": False, - "ev_op_max_order": 1, - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": np.linspace(1e-1, 1, 30), + # "xgrid": make_grid(30,30, x_min=1e-3), + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": (2, 0), + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": { + "skip_singlet": False, + "skip_non_singlet": False, + }, } new_theory, new_operators = compatibility.update(theory_card, operators_card) g = OperatorGrid.from_dict( @@ -60,7 +64,13 @@ class BenchmarkBackwardForward: new_operators, ThresholdsAtlas.from_dict(new_theory), Couplings.from_dict(new_theory), - InterpolatorDispatcher.from_dict(new_operators), + InterpolatorDispatcher( + XGrid( + new_operators["xgrid"], + log=new_operators["configs"]["interpolation_is_log"], + ), + new_operators["configs"]["interpolation_polynomial_degree"], + ), ) def test_operator_grid( @@ -72,7 +82,13 @@ def test_operator_grid( self.new_operators, ThresholdsAtlas.from_dict(self.new_theory), Couplings.from_dict(self.new_theory), - InterpolatorDispatcher.from_dict(self.new_operators), + InterpolatorDispatcher( + XGrid( + self.new_operators["xgrid"], + log=self.new_operators["configs"]["interpolation_is_log"], + ), + self.new_operators["configs"]["interpolation_polynomial_degree"], + ), ) q20 = 30 q21 = 50 diff --git a/benchmarks/ekobox/benchmark_evol_pdf.py b/benchmarks/ekobox/benchmark_evol_pdf.py index a4233171f..035b3a38d 100644 --- a/benchmarks/ekobox/benchmark_evol_pdf.py +++ b/benchmarks/ekobox/benchmark_evol_pdf.py @@ -1,18 +1,17 @@ # -*- coding: utf-8 -*- import pathlib -import lhapdf import numpy as np import pytest from eko import basis_rotation as br -from eko import output from ekobox import evol_pdf as ev_p -from ekobox import gen_op as g_o -from ekobox import gen_theory as g_t +from ekobox import operators_card as oc +from ekobox import theory_card as tc from ekobox.genpdf import load test_pdf = pathlib.Path(__file__).parent / "fakepdf" +lhapdf = pytest.importorskip("lhapdf") @pytest.mark.isolated @@ -22,8 +21,8 @@ def benchmark_evolve_single_member(tmp_path, cd, lhapdf_path): 100.0, 10000.0, ] - op = g_o.gen_op_card(q2grid) - theory = g_t.gen_theory_card( + op = oc.generate(q2grid) + theory = tc.generate( 0, 5.0, update={ @@ -80,10 +79,10 @@ def benchmark_evolve_single_member(tmp_path, cd, lhapdf_path): @pytest.mark.isolated def benchmark_evolve_more_members(tmp_path, cd, lhapdf_path): - op = g_o.gen_op_card( + op = oc.generate( [10, 100], update={"interpolation_xgrid": [1e-7, 0.01, 0.1, 0.2, 0.3]} ) - theory = g_t.gen_theory_card(0, 1.0) + theory = tc.generate(0, 1.0) with lhapdf_path(test_pdf): pdfs = lhapdf.mkPDFs("myMSTW2008nlo90cl") d = tmp_path / "sub" @@ -102,28 +101,3 @@ def benchmark_evolve_more_members(tmp_path, cd, lhapdf_path): for x in [1e-7, 0.01, 0.1, 0.2, 0.3]: for pid in [21, 1, 2]: assert new_pdf_1.xfxQ2(pid, x, Q2) != new_pdf_2.xfxQ2(pid, x, Q2) - - -@pytest.mark.isolated -def benchmark_gen_and_dump_out(tmp_path): - op = g_o.gen_op_card( - [100.0], update={"interpolation_xgrid": [1e-7, 0.01, 0.1, 0.2, 0.3]} - ) - theory = g_t.gen_theory_card(0, 1.0) - - out = ev_p.gen_out(theory, op, path=tmp_path) - - ops_id = f"o{op['hash'][:6]}_t{theory['hash'][:6]}" - outpath = f"{tmp_path}/{ops_id}.tar" - loaded_out = output.Output.load_tar(outpath) - for el, load_el in zip( - out["interpolation_xgrid"], loaded_out["interpolation_xgrid"] - ): - assert el == load_el - for el, load_el in zip( - out["Q2grid"][100.0]["operators"], loaded_out["Q2grid"][100.0]["operators"] - ): - np.testing.assert_allclose( - out["Q2grid"][100.0]["operators"], - loaded_out["Q2grid"][100.0]["operators"], - ) diff --git a/benchmarks/ekobox/benchmark_gen_theory.py b/benchmarks/ekobox/benchmark_gen_theory.py deleted file mode 100644 index 07320fa5e..000000000 --- a/benchmarks/ekobox/benchmark_gen_theory.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - -from ekobox import gen_theory as g_t - - -@pytest.mark.isolated -def benchmark_gen_theory_card(): - theory = g_t.gen_theory_card(0, 1.0) - assert theory["PTO"] == 0 - assert theory["Q0"] == 1.0 - assert theory["mt"] == 173.07 - up_err = {"Prova": "Prova"} - with pytest.raises(ValueError): - theory = g_t.gen_theory_card(0, 1.0, update=up_err) - up = {"mb": 132.3, "PTO": 2} - theory = g_t.gen_theory_card(0, 1.0, update=up) - assert theory["PTO"] == 2 - assert theory["mb"] == 132.3 - - -@pytest.mark.isolated -def benchmark_export_load_theory_card(tmp_path, cd): - with cd(tmp_path): - theory = g_t.gen_theory_card(2, 12.3, name="debug_theory") - g_t.export_theory_card("debug_theory_two", theory) - theory_loaded = g_t.import_theory_card("debug_theory.yaml") - theory_two_loaded = g_t.import_theory_card("debug_theory_two.yaml") - for key in theory.keys(): - assert theory[key] == theory_loaded[key] == theory_two_loaded[key] diff --git a/benchmarks/ekobox/benchmark_utils.py b/benchmarks/ekobox/benchmark_utils.py deleted file mode 100644 index 0b8ac5664..000000000 --- a/benchmarks/ekobox/benchmark_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import pytest - -from ekobox import evol_pdf as ev_p -from ekobox import gen_op as g_o -from ekobox import gen_theory as g_t -from ekobox import utils - - -@pytest.mark.isolated -def benchmark_ekos_product(): - # Generating two ekos - op1 = g_o.gen_op_card( - [60.0, 80.0, 100.0], update={"interpolation_xgrid": [1e-7, 0.01, 0.1, 0.2, 0.3]} - ) - theory1 = g_t.gen_theory_card(0, 5.0) - - op2 = g_o.gen_op_card( - [80.0, 100.0, 120.0], - update={"interpolation_xgrid": [1e-7, 0.01, 0.1, 0.2, 0.3]}, - ) - theory2 = g_t.gen_theory_card(0, 10.0) - theory_err = g_t.gen_theory_card(0, 5.0) - - eko_ini = ev_p.gen_out(theory1, op1) - eko_fin = ev_p.gen_out(theory2, op2) - # Test_error - eko_fin_err = ev_p.gen_out(theory_err, op2) - with pytest.raises(ValueError): - _ = utils.ekos_product(eko_ini, eko_fin_err) - # product is copied - eko_res = utils.ekos_product(eko_ini, eko_fin, in_place=False) - # product overwrites initial - eko_res2 = utils.ekos_product(eko_ini, eko_fin) - np.testing.assert_allclose( - eko_res["Q2grid"][80.0]["operators"], eko_res2["Q2grid"][80.0]["operators"] - ) - np.testing.assert_allclose(eko_res2["q2_ref"], eko_ini["q2_ref"]) - np.testing.assert_allclose( - list(eko_res2["Q2grid"].keys()), list(eko_fin["Q2grid"].keys()) - ) - - np.testing.assert_allclose( - eko_ini["Q2grid"][80.0]["operators"], eko_res2["Q2grid"][80.0]["operators"] - ) - np.testing.assert_allclose(eko_res["q2_ref"], eko_ini["q2_ref"]) - np.testing.assert_allclose( - list(eko_res["Q2grid"].keys()), list(eko_fin["Q2grid"].keys()) - ) diff --git a/benchmarks/ekobox/genpdf/benchmark_flavors.py b/benchmarks/ekobox/genpdf/benchmark_flavors.py index 010dc7408..9dc6913f5 100644 --- a/benchmarks/ekobox/genpdf/benchmark_flavors.py +++ b/benchmarks/ekobox/genpdf/benchmark_flavors.py @@ -13,38 +13,6 @@ lhapdf = pytest.importorskip("lhapdf") -@pytest.mark.isolated -def benchmark_is_evolution(): - assert genpdf.flavors.is_evolution_labels(["V", "T3"]) - assert not genpdf.flavors.is_evolution_labels(["21", "2"]) - - -@pytest.mark.isolated -def benchmark_is_pids(): - assert not genpdf.flavors.is_pid_labels(["V", "T3"]) - assert not genpdf.flavors.is_pid_labels(["35", "9"]) - assert not genpdf.flavors.is_pid_labels({}) - assert genpdf.flavors.is_pid_labels([21, 2]) - - -@pytest.mark.isolated -def benchmark_flavors_pid_to_flavor(): - flavs = genpdf.flavors.pid_to_flavor([1, 2, 21, -3]) - for f in flavs: - for g in flavs: - if not np.allclose(f, g): - assert f @ g == 0 - - -@pytest.mark.isolated -def benchmark_flavors_evol_to_flavor(): - flavs = genpdf.flavors.evol_to_flavor(["S", "g", "T3", "V8"]) - for f in flavs: - for g in flavs: - if not np.allclose(f, g): - assert f @ g == 0 - - @pytest.mark.isolated def benchmark_flavors_pids_ct14(tmp_path, cd): with cd(tmp_path): @@ -101,59 +69,3 @@ def benchmark_flavors_evol_ct14(tmp_path, cd): np.testing.assert_allclose( pdf.xfxQ2(21, x, Q2), gonly.xfxQ2(21, x, Q2) ) - - -@pytest.mark.isolated -def benchmark_flavors_evol_raw(): - blocks = [ - { - "Q2grid": np.array([1, 2]), - "xgrid": np.array([0.1, 1.0]), - "pids": np.array([-1, 21, 1]), - "data": np.array([[0.1, 0.2, 0.1]] * 4), - } - ] - gonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["g"])) - assert len(gonly) == 1 - np.testing.assert_allclose( - gonly[0]["data"], - np.array( - [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 4 - ), - ) - Sonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["S"])) - assert len(Sonly) == 1 - for i in [0, 1, 2, 3]: - # g and gamma are zero - np.testing.assert_allclose(Sonly[0]["data"][i][7], 0) - np.testing.assert_allclose(Sonly[0]["data"][i][0], 0) - # quark are all equal and equal to anti-quarks - for pid in [2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]: - np.testing.assert_allclose(Sonly[0]["data"][i][pid], Sonly[0]["data"][i][1]) - - -@pytest.mark.isolated -def benchmark_flavors_evol_nodata(): - # try with a block without data - blocks = [ - { - "Q2grid": np.array([1, 2]), - "xgrid": np.array([0.1, 1.0]), - "pids": np.array([-1, 21, 1]), - "data": np.array([]), - }, - { - "Q2grid": np.array([1, 2]), - "xgrid": np.array([0.1, 1.0]), - "pids": np.array([-1, 21, 1]), - "data": np.array([[0.1, 0.2, 0.1]] * 4), - }, - ] - gonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["g"])) - assert len(gonly) == 2 - np.testing.assert_allclose( - gonly[1]["data"], - np.array( - [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 4 - ), - ) diff --git a/benchmarks/ekobox/genpdf/benchmark_init.py b/benchmarks/ekobox/genpdf/benchmark_init.py index 3e23e8006..29a9b0b05 100644 --- a/benchmarks/ekobox/genpdf/benchmark_init.py +++ b/benchmarks/ekobox/genpdf/benchmark_init.py @@ -14,32 +14,6 @@ lhapdf = pytest.importorskip("lhapdf") -@pytest.mark.isolated -def benchmark_genpdf_exceptions(tmp_path, cd): - # using a wrong label and then a wrong parent pdf - with cd(tmp_path): - with pytest.raises(TypeError): - genpdf.generate_pdf( - "test_genpdf_exceptions1", - ["f"], - { - 21: lambda x, Q2: 3.0 * x * (1.0 - x), - 2: lambda x, Q2: 4.0 * x * (1.0 - x), - }, - ) - with pytest.raises(ValueError): - genpdf.generate_pdf( - "test_genpdf_exceptions2", - ["g"], - 10, - ) - with pytest.raises(FileExistsError): - genpdf.install_pdf("foo") - - with pytest.raises(TypeError): - genpdf.generate_pdf("debug", [21], info_update=(10, 15, 20)) - - @pytest.mark.isolated def benchmark_genpdf_no_parent_and_install(tmp_path, cd): with cd(tmp_path): diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py index 65bc64fb8..f2ee934e3 100644 --- a/benchmarks/lha_paper_bench.py +++ b/benchmarks/lha_paper_bench.py @@ -224,8 +224,8 @@ def benchmark_sv(self, pto): obj = BenchmarkVFNS() # obj = BenchmarkFFNS() - # obj.benchmark_plain(0) - obj.benchmark_sv(2) + obj.benchmark_plain(0) + # obj.benchmark_sv(2) # # VFNS benchmarks with LHA settings # programs = ["LHA", "pegasus", "apfel"] diff --git a/doc/source/code/IO.rst b/doc/source/code/IO.rst index 1a3eaad48..43fe6daa0 100644 --- a/doc/source/code/IO.rst +++ b/doc/source/code/IO.rst @@ -199,7 +199,7 @@ The ``Q2grid`` values are the actual tensor for the requested :math:`Q^2`. Each - ``operators`` a :py:obj:`dict` with all evolution kernel operators where the key indicates which distribution is generated by which other one and the value represents the eko in matrix representation - this can either be the plain list representation or the binary representation (as provided by :py:meth:`numpy.ndarray.tobytes`) -- ``operator_errors`` a :py:obj:`dict` with the integration errors associated to the respective operators following the same conventions as +- ``errors`` a :py:obj:`dict` with the integration errors associated to the respective operators following the same conventions as the ``operator`` dictionary Each element (|EKO|) is a rank-4 tensor with the indices ordered in the following way: ``EKO[pid_out][x_out][pid_in][x_in]`` where ``pid_out`` and ``x_out`` diff --git a/doc/source/conf.py b/doc/source/conf.py index eee87debb..2f1becb11 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -103,6 +103,9 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = None +# Criterion to sort class members +autodoc_member_order = "bysource" + # A string to be included at the beginning of all files shared = here / "shared" rst_prolog = "\n".join( diff --git a/doc/source/overview/tutorials/output.ipynb b/doc/source/overview/tutorials/output.ipynb index b3b9112ce..13c10b68e 100644 --- a/doc/source/overview/tutorials/output.ipynb +++ b/doc/source/overview/tutorials/output.ipynb @@ -55,7 +55,7 @@ "id": "f26bf77f-9999-42d0-8ebf-2c1607214ea7", "metadata": {}, "source": [ - "Now that we have it, we can actually use one of the available formats to dump it." + "The computed operator is now it's own type (that no longer inherits from dictionary as in previous eko versions):" ] }, { @@ -63,39 +63,20 @@ "execution_count": 3, "id": "daae3811-e7a6-4a7c-9f55-eef8ea1564b8", "metadata": {}, - "outputs": [], - "source": [ - "evolution_operator.dump_tar(\"myeko.tar\")" - ] - }, - { - "cell_type": "markdown", - "id": "a0d115e4-43cd-4f83-bcb5-922dcbfc56d4", - "metadata": {}, - "source": [ - "Once dumped, we can always use the paired method to load it, at any later time." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "b62dc52e-ff36-4621-a772-502ff495456f", - "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "eko.output.Output" + "eko.output.struct.EKO" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "myeko = eko.output.Output.load_tar(\"myeko.tar\")\n", - "type(myeko)" + "type(evolution_operator)" ] }, { @@ -103,12 +84,12 @@ "id": "91b17b27-e423-43fe-87f2-a1eaff027c7a", "metadata": {}, "source": [ - "Now, let's inspect the content of the operator." + "Now, let's inspect the content of the operator: e.g. you can extract the theory and operator card" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "id": "e5c5b8af-d478-48ca-bb61-9a0de680252a", "metadata": {}, "outputs": [ @@ -116,113 +97,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "Q2grid\n", - "eko_version\n", - "inputgrid\n", - "inputpids\n", - "interpolation_is_log\n", - "interpolation_polynomial_degree\n", - "interpolation_xgrid\n", - "q2_ref\n", - "targetgrid\n", - "targetpids\n" + "{'CKM': '0.97428 0.22530 0.003470 0.22520 0.97345 0.041000 0.00862 0.04030 0.999152', 'Comments': 'LO baseline for small-x res', 'DAMP': 0, 'EScaleVar': 1, 'FNS': 'FFNS', 'GF': 1.1663787e-05, 'HQ': 'POLE', 'IB': 0, 'IC': 0, 'ID': 0, 'MP': 0.938, 'MW': 80.398, 'MZ': 91.1876, 'MaxNfAs': 6, 'MaxNfPdf': 6, 'ModEv': 'EXA', 'ModSV': None, 'NfFF': 3, 'PTO': 0, 'Q0': 1.0, 'QED': 0, 'Qedref': 1.777, 'Qmb': 4.5, 'Qmc': 2.0, 'Qmt': 173.07, 'Qref': 91.2, 'SIN2TW': 0.23126, 'SxOrd': 'LL', 'SxRes': 0, 'TMC': 0, 'XIF': 1.0, 'XIR': 1.0, 'alphaqed': 0.007496251999999999, 'alphas': 0.11800000000000001, 'fact_to_ren_scale_ratio': 1.0, 'global_nx': 0, 'kDISbThr': 1.0, 'kDIScThr': 1.0, 'kDIStThr': 1.0, 'kbThr': 1.0, 'kcThr': 1.0, 'ktThr': 1.0, 'mb': 4.5, 'mc': 2.0, 'mt': 173.07, 'nf0': None, 'nfref': None}\n", + "{'Q0': 1.0, 'Q2grid': [100], 'backward_inversion': 'expanded', 'configs': {'backward_inversion': 'expanded', 'ev_op_iterations': 10, 'ev_op_max_order': [10, 0], 'interpolation_is_log': True, 'interpolation_polynomial_degree': 4, 'n_integration_cores': 0}, 'debug': {'skip_non_singlet': False, 'skip_singlet': False}, 'debug_skip_non_singlet': False, 'debug_skip_singlet': False, 'ev_op_iterations': 10, 'ev_op_max_order': 10, 'inputgrid': None, 'inputpids': None, 'interpolation_is_log': True, 'interpolation_polynomial_degree': 4, 'interpolation_xgrid': [0.001, 0.01, 0.1, 0.5, 1.0], 'n_integration_cores': 0, 'rotations': {'inputgrid': None, 'inputpids': None, 'targetgrid': None, 'targetpids': None, 'xgrid': [0.001, 0.01, 0.1, 0.5, 1.0]}, 'targetgrid': None, 'targetpids': None}\n" ] - }, - { - "data": { - "text/plain": [ - "'0.0.0'" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ - "print(*myeko.keys(), sep=\"\\n\")\n", - "myeko[\"eko_version\"]" - ] - }, - { - "cell_type": "markdown", - "id": "3d610d9f-b2cb-4ddf-ab02-a47d9c9638c3", - "metadata": {}, - "source": [ - "In the last step, we proved that an `eko.output.Output` object essentially behaves like a dictionary. Indeed, it is a dictionary." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "fcca9db4-a266-4abb-8aea-2390fc3aaa72", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "isinstance(myeko, dict)" - ] - }, - { - "cell_type": "markdown", - "id": "7b1baedd-8a95-4b5c-a1a0-03716534ecf3", - "metadata": {}, - "source": [ - "At the moment we have only seen one attribute in action: an `Output` object records the version of `eko` that has generated.\n", - "Let's have a look at some other ones." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "593038bf-d8db-46ce-8a19-fbb2ae1908b6", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INTERNAL\n", - "log? True, degree? 4\n", - "[0.001 0.01 0.1 0.5 1. ]\n", - "\n", - "INPUT\n", - "grid: (5,) float64\n", - "pids: 14 \n", - "Q0^2: 1.0 GeV^2\n", - "TARGET\n", - "grid: (5,) float64\n", - "pids: 14 \n", - "\n", - "All grid the same? True\n" - ] - } - ], - "source": [ - "interpl = myeko[\"interpolation_is_log\"]\n", - "interpd = myeko[\"interpolation_polynomial_degree\"]\n", - "interpg = myeko[\"interpolation_xgrid\"]\n", - "print(f\"INTERNAL\\nlog? {interpl}, degree? {interpd}\\n{interpg}\\n\")\n", - "\n", - "ig = myeko[\"inputgrid\"]\n", - "ip = myeko[\"inputpids\"]\n", - "q0 = myeko[\"q2_ref\"]\n", - "print(f\"INPUT\\ngrid: {ig.shape} {ig.dtype}\\npids: {len(ip)} {type(ip[0])}\\nQ0^2: {q0} GeV^2\")\n", - "tg = myeko[\"targetgrid\"]\n", - "tp = myeko[\"targetpids\"]\n", - "print(f\"TARGET\\ngrid: {tg.shape} {tg.dtype}\\npids: {len(tp)} {type(tp[0])}\")\n", - "\n", - "print(\"\\nAll grid the same?\", (interpg == ig).all() and (interpg == tg).all())" + "# obtain theory card\n", + "print(evolution_operator.theory_card)\n", + "# or operator card\n", + "print(evolution_operator.operator_card)" ] }, { @@ -231,29 +115,28 @@ "metadata": {}, "source": [ "So an `Output` object has some internal parameters, related to the interpolation used for the calculation, and then some external attributes, related to the final operator delivered.\n", - "But actually, we have not accessed yet the actual operator." + "But actually, we have not accessed yet the actual operator - let's first find out again which final scales we computed:" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "id": "c82e6733-946f-4210-a6fa-95f995ee5be5", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "dict_keys([100.0])" + "array([100])" ] }, - "execution_count": 8, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "opgrid = myeko[\"Q2grid\"]\n", - "opgrid.keys()" + "evolution_operator.Q2grid" ] }, { @@ -261,13 +144,13 @@ "id": "075734fa-8108-44ff-9070-cb4e5baab072", "metadata": {}, "source": [ - "Even the operator grid is a dictionary, mapping $Q^2$ values to the operator evolving to that scale (from the unique starting scale $Q_0^2$).\n", - "In the present case there is a unique final scale, but in the general one there might be many." + "Remember that the unique starting scale is $Q_0^2$. In the present case there is a unique final scale, but in the general one there might be many.\n", + "Now, let's use this operator! The recommended way to load an operator is by using a context manager:" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "id": "15262aee-1cfa-40f3-b072-b53ec5517784", "metadata": {}, "outputs": [ @@ -275,23 +158,15 @@ "name": "stdout", "output_type": "stream", "text": [ - "dict_keys(['operators', 'operator_errors'])\n", - "\n", - "OPERATOR\n", - "(14, 5, 14, 5) float64\n", - "\n", - "ERROR\n", - "(14, 5, 14, 5) float64\n" + "operator: (14, 5, 14, 5)\n", + "error: (14, 5, 14, 5)\n" ] } ], "source": [ - "print(opgrid[100.].keys())\n", - "op = opgrid[100.][\"operators\"]\n", - "operr = opgrid[100.][\"operator_errors\"]\n", - "\n", - "print(f\"\\nOPERATOR\\n{op.shape} {op.dtype}\")\n", - "print(f\"\\nERROR\\n{operr.shape} {operr.dtype}\")" + "with evolution_operator.operator(100) as op:\n", + " print(f\"operator: {op.operator.shape}\")\n", + " print(f\"error: {op.error.shape}\")" ] }, { @@ -300,7 +175,7 @@ "metadata": {}, "source": [ "This is the final product we expected from the beginning: the evolution operator, delivered as a numerical array.\n", - "It is actually composed by 3 elements:\n", + "It is actually composed by two elements:\n", "\n", "- the **operator** itself, whose dimensions are `(flavor_out, x_out, flavor_in, x_in)`\n", "- the *error* on each operator element, propagated from the integration error on the numerical Mellin inversion (no other source is taken into account)" @@ -313,7 +188,7 @@ "source": [ "How to use this object is now completely up to the user, but a few helpers are included in another package: `ekobox`!\n", "\n", - "This package will be explored in a separate tutorial." + "This package will be explored in [a separate tutorial](./pdf.ipynb)." ] } ], @@ -333,7 +208,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/pyproject.toml b/pyproject.toml index 325610263..f4f769768 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,10 +82,13 @@ files = ["src/eko/version.py"] [tool.poetry.scripts] ekonav = "ekomark.navigator:launch_navigator" genpdf = "ekobox.genpdf.cli:cli" +eko = "ekobox.cli:command" [tool.poe.tasks] test = "pytest tests" -bench = "pytest benchmarks" +coverage = "$BROWSER htmlcov/index.html" +test-cov = ["test", "coverage"] +bench = ["bench-iso", "bench-run"] bench-iso.cmd = "pytest benchmarks -m isolated" bench-iso.env.NUMBA_DISABLE_JIT.default = "0" bench-run.cmd = "pytest benchmarks -m 'not isolated'" @@ -116,7 +119,7 @@ python_classes = ['Test*', 'Benchmark*'] python_functions = ['test_*', 'benchmark_*'] addopts = [ '--cov=eko', - # '--cov=ekobox', + '--cov=ekobox', '--cov-report=html', '--cov-report=xml', '--strict-markers', diff --git a/src/eko/compatibility.py b/src/eko/compatibility.py index a41ce4d91..86a89f9ca 100644 --- a/src/eko/compatibility.py +++ b/src/eko/compatibility.py @@ -1,52 +1,81 @@ # -*- coding: utf-8 -*- +"""Compatibility functions. + +Upgrade old input (NNPDF jargon compatible) to the native one. + +""" import copy +from typing import Optional -def update(theory, operators): - """ - Upgrade the legacy theory and observable runcards with the new settings. +def update(theory: dict, operators: Optional[dict]): + """Upgrade the legacy theory and observable runcards with the new settings. Parameters ---------- - theory : dict - theory runcard - observables : dict - observable runcard + theory : dict + theory runcard + observables : dict or None + observable runcard (default: `None`) Returns ------- - new_theory : dict - upgraded theory runcard - new_obs : dict - upgraded observable runcard + new_theory : dict + upgraded theory runcard + new_obs : dict + upgraded observable runcard + """ new_theory = copy.deepcopy(theory) - new_operators = copy.deepcopy(operators) + new_operators = copy.deepcopy(operators) if operators is not None else {} + if "alphaqed" in new_theory: new_theory["alphaem"] = new_theory.pop("alphaqed") if "QED" in new_theory: - new_theory["order"] = (new_theory.pop("PTO") + 1, new_theory.pop("QED")) - if operators is not None: - if isinstance(new_operators["ev_op_max_order"], int): - new_operators["ev_op_max_order"] = ( - new_operators["ev_op_max_order"], + new_theory["order"] = [new_theory.pop("PTO") + 1, new_theory.pop("QED")] + + if operators is not None and "configs" not in operators: + new_operators["configs"] = {} + for k in ( + "interpolation_polynomial_degree", + "interpolation_is_log", + "ev_op_iterations", + "backward_inversion", + "n_integration_cores", + ): + new_operators["configs"][k] = operators[k] + new_operators["rotations"] = {} + new_operators["debug"] = {} + for k in ("debug_skip_non_singlet", "debug_skip_singlet"): + new_operators["debug"][k[len("debug_") :]] = operators[k] + + max_order = operators["ev_op_max_order"] + if isinstance(max_order, int): + new_operators["configs"]["ev_op_max_order"] = [ + max_order, new_theory["order"][1], - ) + ] + + new_operators["rotations"]["xgrid"] = operators["interpolation_xgrid"] + for basis in ("inputgrid", "targetgrid", "inputpids", "targetpids"): + new_operators["rotations"][f"{basis}"] = operators[basis] + new_operators["Q0"] = new_theory["Q0"] + return new_theory, new_operators -def update_theory(theory): - """ - Upgrade the legacy theory runcards with the new settings. +def update_theory(theory: dict): + """Upgrade the legacy theory runcards with the new settings. Parameters ---------- - theory : dict - theory runcard + theory : dict + theory runcard Returns ------- - new_theory : dict - upgraded theory runcard + new_theory : dict + upgraded theory runcard + """ return update(theory, None)[0] diff --git a/src/eko/couplings.py b/src/eko/couplings.py index b6c0d00ea..2d3ec307e 100644 --- a/src/eko/couplings.py +++ b/src/eko/couplings.py @@ -381,7 +381,7 @@ def __init__( raise NotImplementedError("a_s beyond N3LO is not implemented") if order[1] not in [0, 1, 2]: raise NotImplementedError("a_em beyond NLO is not implemented") - self.order = order + self.order = tuple(order) if method not in ["expanded", "exact"]: raise ValueError(f"Unknown method {method}") self.method = method diff --git a/src/eko/evolution_operator/__init__.py b/src/eko/evolution_operator/__init__.py index a03a27fa1..23d88b6cb 100644 --- a/src/eko/evolution_operator/__init__.py +++ b/src/eko/evolution_operator/__init__.py @@ -265,7 +265,7 @@ def __init__( self._mellin_cut = mellin_cut self.is_threshold = is_threshold self.op_members = {} - self.order = config["order"] + self.order = tuple(config["order"]) @property def n_pools(self): @@ -376,7 +376,7 @@ def quad_ker(self, label, logx, areas): nf=self.nf, L=np.log(self.fact_to_ren), ev_op_iterations=self.config["ev_op_iterations"], - ev_op_max_order=self.config["ev_op_max_order"], + ev_op_max_order=tuple(self.config["ev_op_max_order"]), sv_mode=self.sv_mode, is_threshold=self.is_threshold, ) @@ -435,7 +435,6 @@ def run_op_integration( ) temp_dict[label] = res[:2] column.append(temp_dict) - logger.info( "%s: computing operators - %u/%u took: %6f s", self.log_label, @@ -499,7 +498,7 @@ def integrate( # run integration in parallel for each grid point # or avoid opening a single pool - args = (self.run_op_integration, enumerate(np.log(self.int_disp.xgrid_raw))) + args = (self.run_op_integration, enumerate(np.log(self.int_disp.xgrid.raw))) if self.n_pools == 1: res = map(*args) else: diff --git a/src/eko/evolution_operator/grid.py b/src/eko/evolution_operator/grid.py index db235f3d1..2ab23c515 100644 --- a/src/eko/evolution_operator/grid.py +++ b/src/eko/evolution_operator/grid.py @@ -21,14 +21,33 @@ class OperatorGrid(sv.ModeMixin): - """ + """Collection of evolution operators for several scales. + The operator grid is the driver class of the evolution. It receives as input a threshold holder and a generator of a_s. From that point onwards it can compute any operator at any q2. - Parameters + Attributes ---------- + config: dict + q2_grid: np.ndarray + managers: dict + + """ + + def __init__( + self, + config, + q2_grid, + thresholds_config, + strong_coupling, + interpol_dispatcher, + ): + """Initialize `OperatorGrid`. + + Parameters + ---------- config: dict configuration dictionary q2_grid: array @@ -44,20 +63,12 @@ class OperatorGrid(sv.ModeMixin): kernel_dispatcher: eko.kernel_generation.KernelDispatcher Instance of the :class:`~eko.kernel_generation.KernelDispatcher` with the information about the kernels - """ - def __init__( - self, - config, - q2_grid, - thresholds_config, - strong_coupling, - interpol_dispatcher, - ): + """ # check order = config["order"] method = config["method"] - if not method in [ + if method not in [ "iterate-exact", "iterate-expanded", "truncated", @@ -119,13 +130,13 @@ def from_dict( } method = mod_ev2method.get(method, method) config["method"] = method - config["backward_inversion"] = operators_card["backward_inversion"] + config["backward_inversion"] = operators_card["configs"]["backward_inversion"] config["fact_to_ren"] = (theory_card["fact_to_ren_scale_ratio"]) ** 2 - config["ev_op_max_order"] = operators_card["ev_op_max_order"] - config["ev_op_iterations"] = operators_card["ev_op_iterations"] - config["debug_skip_singlet"] = operators_card["debug_skip_singlet"] - config["debug_skip_non_singlet"] = operators_card["debug_skip_non_singlet"] - config["n_integration_cores"] = operators_card["n_integration_cores"] + config["ev_op_max_order"] = operators_card["configs"]["ev_op_max_order"] + config["ev_op_iterations"] = operators_card["configs"]["ev_op_iterations"] + config["n_integration_cores"] = operators_card["configs"]["n_integration_cores"] + config["debug_skip_singlet"] = operators_card["debug"]["skip_singlet"] + config["debug_skip_non_singlet"] = operators_card["debug"]["skip_non_singlet"] config["HQ"] = theory_card["HQ"] config["ModSV"] = theory_card["ModSV"] q2_grid = np.array(operators_card["Q2grid"], np.float_) @@ -203,7 +214,7 @@ def get_threshold_operators(self, path): return thr_ops def compute(self, q2grid=None): - """Computes all ekos for the `q2grid`. + """Compute all ekos for the `q2grid`. Parameters ---------- @@ -234,7 +245,7 @@ def compute(self, q2grid=None): return grid_return def generate(self, q2): - r"""Computes a single EKO. + r"""Compute a single EKO. Parameters ---------- @@ -297,6 +308,6 @@ def generate(self, q2): values, errors = final_op.to_flavor_basis_tensor() return { - "operators": values, - "operator_errors": errors, + "operator": values, + "error": errors, } diff --git a/src/eko/interpolation.py b/src/eko/interpolation.py index 118d8ffa0..72c3f78b5 100644 --- a/src/eko/interpolation.py +++ b/src/eko/interpolation.py @@ -8,9 +8,11 @@ """ import logging import math +from typing import Sequence, Union import numba as nb import numpy as np +import numpy.typing as npt import scipy.special as sp logger = logging.getLogger(__name__) @@ -414,6 +416,66 @@ def __call__(self, *args, **kwargs): return self.callable(*args, **kwargs) +class XGrid: + def __init__(self, xgrid: Union[Sequence, npt.NDArray], log: bool = True): + ugrid = np.array(np.unique(xgrid), np.float_) + if len(xgrid) != len(ugrid): + raise ValueError(f"xgrid is not unique: {xgrid}") + if len(xgrid) < 2: + raise ValueError(f"xgrid needs at least 2 points, received {len(xgrid)}") + + self.log = log + + # henceforth ugrid might no longer be the input! + # which is ok, because for most of the code this is all we need to do + # to distinguish log and non-log + if log: + self._raw = ugrid + ugrid = np.log(ugrid) + + self.grid = ugrid + + def __len__(self) -> int: + return len(self.grid) + + def __eq__(self, other) -> bool: + """Checks equality""" + # check shape before comparing values + return len(self) == len(other) and np.allclose(self.raw, other.raw) + + @property + def raw(self) -> np.ndarray: + return self.grid if not self.log else self._raw + + @property + def size(self) -> int: + return self.grid.size + + def tolist(self) -> list: + return self.raw.tolist() + + def dump(self) -> dict: + return dict(grid=self.tolist(), log=self.log) + + @classmethod + def load(cls, doc: dict): + return cls(doc["grid"], log=doc["log"]) + + @classmethod + def fromcard(cls, value: list, log: bool): + if len(value) == 0: + raise ValueError("Empty xgrid!") + + if value[0] == "make_grid": + xgrid = make_grid(*value[1:]) + elif value[0] == "make_lambert_grid": + xgrid = make_lambert_grid(*value[1:]) + else: + xgrid = np.array(value) + + return cls(xgrid, log=log) + + class InterpolatorDispatcher: """ Setups the interpolator. @@ -426,7 +488,7 @@ class InterpolatorDispatcher: Parameters ---------- - xgrid_in : numpy.ndarray + xgrid : numpy.ndarray Grid in x-space from which the interpolators are constructed polynomial_degree : int degree of the interpolation polynomial @@ -436,41 +498,32 @@ class InterpolatorDispatcher: if true compiles the function on N, otherwise compiles x """ - def __init__(self, xgrid, polynomial_degree, log=True, mode_N=True): + def __init__( + self, xgrid: Union[XGrid, Sequence, npt.NDArray], polynomial_degree, mode_N=True + ): + if not isinstance(xgrid, XGrid): + xgrid = XGrid(xgrid) + # sanity checks - xgrid_size = len(xgrid) - ugrid = np.array(np.unique(xgrid), np.float_) - if xgrid_size != len(ugrid): - raise ValueError(f"xgrid is not unique: {xgrid}") - xgrid = ugrid - if xgrid_size < 2: - raise ValueError(f"xgrid needs at least 2 points, received {xgrid_size}") if polynomial_degree < 1: raise ValueError( f"need at least polynomial_degree 1, received {polynomial_degree}" ) - if xgrid_size <= polynomial_degree: + if len(xgrid) <= polynomial_degree: raise ValueError( f"to interpolate with degree {polynomial_degree} " " we need at least that much points + 1" ) - # keep a true copy of grid - self.xgrid_raw = xgrid - # henceforth xgrid might no longer be the input! - # which is ok, because for most of the code this is all we need to do - # to distinguish log and non-log - if log: - xgrid = np.log(xgrid) # Save the different variables self.xgrid = xgrid self.polynomial_degree = polynomial_degree - self.log = log + self.log = xgrid.log logger.info( "Interpolation: number of points = %d, polynomial degree = %d, logarithmic = %s", - xgrid_size, + len(xgrid), polynomial_degree, - log, + xgrid.log, ) # Create blocks @@ -482,76 +535,32 @@ def __init__(self, xgrid, polynomial_degree, log=True, mode_N=True): if polynomial_degree % 2 == 0: po2 -= 1 # iterate areas: there is 1 less then number of points - for i in range(xgrid_size - 1): + for i in range(len(xgrid) - 1): kmin = max(0, i - po2) kmax = kmin + polynomial_degree - if kmax >= xgrid_size: - kmax = xgrid_size - 1 + if kmax >= len(xgrid): + kmax = len(xgrid) - 1 kmin = kmax - polynomial_degree b = (kmin, kmax) list_of_blocks.append(b) # Generate the basis functions basis_functions = [] - for i in range(xgrid_size): + for i in range(len(xgrid)): new_basis = BasisFunction( - xgrid, i, list_of_blocks, mode_log=log, mode_N=mode_N + xgrid.grid, i, list_of_blocks, mode_log=xgrid.log, mode_N=mode_N ) basis_functions.append(new_basis) self.basis = basis_functions - @classmethod - def from_dict(cls, operators_card, mode_N=True): - """ - Create object from dictionary. - - .. list-table:: operators runcard parameters - :header-rows: 1 - - * - Name - - Type - - description - * - ``interpolation_xgrid`` - - :py:obj:`list(float)` - - the interpolation grid - * - ``interpolation_polynomial_degree`` - - :py:obj:`int` - - polynomial degree of the interpolating function - * - ``interpolation_is_log`` - - :py:obj:`bool` - - use logarithmic interpolation? - - Parameters - ---------- - operators_card : dict - input configurations - """ - # load xgrid - xgrid = operators_card["interpolation_xgrid"] - if len(xgrid) == 0: - raise ValueError("Empty xgrid!") - if xgrid[0] == "make_grid": - xgrid = make_grid(*xgrid[1:]) - elif xgrid[0] == "make_lambert_grid": - xgrid = make_lambert_grid(*xgrid[1:]) - is_log_interpolation = bool(operators_card["interpolation_is_log"]) - polynom_rank = operators_card["interpolation_polynomial_degree"] - return cls( - xgrid, - polynom_rank, - log=is_log_interpolation, - mode_N=mode_N, - ) - def __eq__(self, other): """Checks equality""" checks = [ - len(self.xgrid_raw) == len(other.xgrid_raw), self.log == other.log, self.polynomial_degree == other.polynomial_degree, + self.xgrid == other.xgrid, ] - # check elements after shape - return all(checks) and np.allclose(self.xgrid_raw, other.xgrid_raw) + return all(checks) def __iter__(self): # return iter(self.basis) @@ -561,7 +570,7 @@ def __iter__(self): def __getitem__(self, item): return self.basis[item] - def get_interpolation(self, targetgrid): + def get_interpolation(self, targetgrid: Union[npt.NDArray, Sequence]): """Computes interpolation matrix between `targetgrid` and `xgrid`. .. math:: @@ -581,10 +590,10 @@ def get_interpolation(self, targetgrid): """ # trivial? - if len(targetgrid) == len(self.xgrid_raw) and np.allclose( - targetgrid, self.xgrid_raw + if len(targetgrid) == len(self.xgrid) and np.allclose( + targetgrid, self.xgrid.raw ): - return np.eye(len(self.xgrid_raw)) + return np.eye(len(self.xgrid)) # compute map out = [] for x in targetgrid: @@ -607,9 +616,9 @@ def to_dict(self): full grid configuration """ ret = { - "interpolation_xgrid": self.xgrid_raw, - "interpolation_polynomial_degree": self.polynomial_degree, - "interpolation_is_log": self.log, + "xgrid": self.xgrid.dump(), + "polynomial_degree": self.polynomial_degree, + "is_log": self.log, } return ret diff --git a/src/eko/member.py b/src/eko/member.py index e9f3b4576..f97c8cca1 100644 --- a/src/eko/member.py +++ b/src/eko/member.py @@ -243,7 +243,7 @@ def operator_multiply(left, right, operation): Returns ------- - dict : + dict new operator members dictionary """ # prepare paths diff --git a/src/eko/output.py b/src/eko/output.py deleted file mode 100644 index c6a16af87..000000000 --- a/src/eko/output.py +++ /dev/null @@ -1,424 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This file contains the output management -""" -import io -import logging -import pathlib -import tarfile -import tempfile -import warnings - -import lz4.frame -import numpy as np -import yaml - -from . import basis_rotation as br -from . import interpolation, version - -logger = logging.getLogger(__name__) - - -class Output(dict): - """ - Wrapper for the output to help with application - to PDFs and dumping to file. - """ - - def xgrid_reshape(self, targetgrid=None, inputgrid=None): - """ - Changes the operators to have in the output targetgrid and/or in the input inputgrid. - - The operation is in-place. - - Parameters - ---------- - targetgrid : None or list - xgrid for the target - inputgrid : None or list - xgrid for the input - """ - # calling with no arguments is an error - if targetgrid is None and inputgrid is None: - raise ValueError("Nor inputgrid nor targetgrid was given") - # now check to the current status - if ( - targetgrid is not None - and len(targetgrid) == len(self["targetgrid"]) - and np.allclose(targetgrid, self["targetgrid"]) - ): - targetgrid = None - warnings.warn("The new targetgrid is close to the current targetgrid") - if ( - inputgrid is not None - and len(inputgrid) == len(self["inputgrid"]) - and np.allclose(inputgrid, self["inputgrid"]) - ): - inputgrid = None - warnings.warn("The new inputgrid is close to the current inputgrid") - # after the checks: if there is still nothing to do, skip - if targetgrid is None and inputgrid is None: - logger.debug("Nothing done.") - return - - # construct matrices - if targetgrid is not None: - b = interpolation.InterpolatorDispatcher( - self["targetgrid"], - self["interpolation_polynomial_degree"], - self["interpolation_is_log"], - False, - ) - target_rot = b.get_interpolation(targetgrid) - self["targetgrid"] = np.array(targetgrid) - if inputgrid is not None: - b = interpolation.InterpolatorDispatcher( - inputgrid, - self["interpolation_polynomial_degree"], - self["interpolation_is_log"], - False, - ) - input_rot = b.get_interpolation(self["inputgrid"]) - self["inputgrid"] = np.array(inputgrid) - - # build new grid - for elem in self["Q2grid"].values(): - ops = elem["operators"] - errs = elem["operator_errors"] - if targetgrid is not None and inputgrid is None: - ops = np.einsum("ij,ajbk->aibk", target_rot, ops) - errs = np.einsum("ij,ajbk->aibk", target_rot, errs) - elif inputgrid is not None and targetgrid is None: - ops = np.einsum("ajbk,kl->ajbl", ops, input_rot) - errs = np.einsum("ajbk,kl->ajbl", errs, input_rot) - else: - ops = np.einsum("ij,ajbk,kl->aibl", target_rot, ops, input_rot) - errs = np.einsum("ij,ajbk,kl->aibl", target_rot, errs, input_rot) - elem["operators"] = ops - elem["operator_errors"] = errs - - def flavor_reshape(self, targetbasis=None, inputbasis=None): - """ - Changes the operators to have in the output targetbasis and/or in the input inputbasis. - - The operation is in-place. - - Parameters - ---------- - targetbasis : numpy.ndarray - target rotation specified in the flavor basis - inputbasis : None or list - input rotation specified in the flavor basis - """ - # calling with no arguments is an error - if targetbasis is None and inputbasis is None: - raise ValueError("Nor inputbasis nor targetbasis was given") - # now check to the current status - if targetbasis is not None and np.allclose( - targetbasis, np.eye(len(self["targetpids"])) - ): - targetbasis = None - warnings.warn("The new targetbasis is close to current basis") - if inputbasis is not None and np.allclose( - inputbasis, np.eye(len(self["inputpids"])) - ): - inputbasis = None - warnings.warn("The new inputbasis is close to current basis") - # after the checks: if there is still nothing to do, skip - if targetbasis is None and inputbasis is None: - logger.debug("Nothing done.") - return - - # flip input around - if inputbasis is not None: - inv_inputbasis = np.linalg.inv(inputbasis) - - # build new grid - for elem in self["Q2grid"].values(): - ops = elem["operators"] - errs = elem["operator_errors"] - if targetbasis is not None and inputbasis is None: - ops = np.einsum("ca,ajbk->cjbk", targetbasis, ops) - errs = np.einsum("ca,ajbk->cjbk", targetbasis, errs) - elif inputbasis is not None and targetbasis is None: - ops = np.einsum("ajbk,bd->ajdk", ops, inv_inputbasis) - errs = np.einsum("ajbk,bd->ajdk", errs, inv_inputbasis) - else: - ops = np.einsum("ca,ajbk,bd->cjdk", targetbasis, ops, inv_inputbasis) - errs = np.einsum("ca,ajbk,bd->cjdk", targetbasis, errs, inv_inputbasis) - elem["operators"] = ops - elem["operator_errors"] = errs - # drop PIDs - keeping them int nevertheless - if inputbasis is not None: - self["inputpids"] = [0] * len(self["inputpids"]) - if targetbasis is not None: - self["targetpids"] = [0] * len(self["targetpids"]) - - def to_evol(self, source=True, target=False): - """ - Rotate the operator into evolution basis. - - This also assigns also the pids. The operation is in-place. - - Parameters - ---------- - source : bool - rotate on the input tensor - target : bool - rotate on the output tensor - """ - # rotate - inputbasis = br.rotate_flavor_to_evolution if source else None - targetbasis = br.rotate_flavor_to_evolution if target else None - self.flavor_reshape(inputbasis=inputbasis, targetbasis=targetbasis) - # assign pids - if source: - self["inputpids"] = br.evol_basis_pids - if target: - self["targetpids"] = br.evol_basis_pids - - def get_raw(self, binarize=True, skip_q2_grid=False): - """ - Serialize result as dict/YAML. - - This maps the original numpy matrices to lists. - - Parameters - ---------- - binarize : bool - dump in binary format (instead of list format) - - Returns - ------- - out : dict - dictionary which will be written on output - """ - # prepare output dict - out = {"Q2grid": {}, "eko_version": version.__version__} - # dump raw elements - for f in [ - "interpolation_polynomial_degree", - "interpolation_is_log", - "q2_ref", - ]: - out[f] = self[f] - - # list() work both for tuple and list - out["inputpids"] = list(self["inputpids"]) - out["targetpids"] = list(self["targetpids"]) - # make raw lists - # TODO: is interpolation_xgrid really needed in the output? - for k in ["interpolation_xgrid", "targetgrid", "inputgrid"]: - out[k] = self[k].tolist() - # make operators raw - if not skip_q2_grid: - for q2, op in self["Q2grid"].items(): - out["Q2grid"][q2] = {} - for k, v in op.items(): - if k == "alphas": - out["Q2grid"][q2][k] = float(v) - continue - if binarize: - out["Q2grid"][q2][k] = lz4.frame.compress(v.tobytes()) - else: - out["Q2grid"][q2][k] = v.tolist() - else: - out["Q2grid"] = self["Q2grid"] - return out - - def dump_yaml(self, stream=None, binarize=True, skip_q2_grid=False): - """ - Serialize result as YAML. - - Parameters - ---------- - stream : None or stream - if given, dump is written on it - binarize : bool - dump in binary format (instead of list format) - skip_q2_grid : bool - avoid dumping Q2grid (i.e. the actual operators) into the yaml - file (default: ``False``) - - Returns - ------- - dump : any - result of dump(output, stream), i.e. a string, if no stream is given or - Null, if written successfully to stream - """ - # TODO explicitly silence yaml - out = self.get_raw(binarize, skip_q2_grid=skip_q2_grid) - return yaml.dump(out, stream) - - def dump_yaml_to_file(self, filename, binarize=True, skip_q2_grid=False): - """ - Writes YAML representation to a file. - - Parameters - ---------- - filename : str - target file name - binarize : bool - dump in binary format (instead of list format) - skip_q2_grid : bool - avoid dumping Q2grid (i.e. the actual operators) into the yaml - file (default: ``False``) - - Returns - ------- - ret : any - result of dump(output, stream), i.e. Null if written successfully - """ - with open(filename, "w", encoding="utf-8") as f: - ret = self.dump_yaml(f, binarize, skip_q2_grid=skip_q2_grid) - return ret - - def dump_tar(self, tarname): - """ - Writes representation into a tar archive containing: - - - metadata (in YAML) - - operator (in numpy ``.npy`` format) - - Parameters - ---------- - tarname : str - target file name - """ - tarpath = pathlib.Path(tarname) - if tarpath.suffix != ".tar": - raise ValueError(f"'{tarname}' is not a valid tar filename, wrong suffix") - - with tempfile.TemporaryDirectory() as tmpdir: - tmpdir = pathlib.Path(tmpdir) - - cls = self.__class__ - metadata = cls(**{str(k): v for k, v in self.items() if k != "Q2grid"}) - metadata["Q2grid"] = list(self["Q2grid"].keys()) - - yamlname = tmpdir / "metadata.yaml" - metadata.dump_yaml_to_file(yamlname, skip_q2_grid=True) - - for kind in next(iter(self["Q2grid"].values())).keys(): - operator = np.stack([q2[kind] for q2 in self["Q2grid"].values()]) - stream = io.BytesIO() - np.save(stream, operator) - stream.seek(0) - with lz4.frame.open( - (tmpdir / kind).with_suffix(".npy.lz4"), "wb" - ) as fo: - fo.write(stream.read()) - - with tarfile.open(tarpath, "w") as tar: - tar.add(tmpdir, arcname=tarpath.stem) - - @classmethod - def load_yaml(cls, stream, skip_q2_grid=False): - """ - Load YAML representation from stream - - Parameters - ---------- - stream : any - source stream - skip_q2_grid : bool - avoid loading Q2grid (i.e. the actual operators) from the yaml - file (default: ``False``) - - Returns - ------- - obj : output - loaded object - """ - obj = yaml.safe_load(stream) - len_tpids = len(obj["targetpids"]) - len_ipids = len(obj["inputpids"]) - len_tgrid = len(obj["targetgrid"]) - len_igrid = len(obj["inputgrid"]) - # cast lists to numpy - for k in ["interpolation_xgrid", "inputgrid", "targetgrid"]: - obj[k] = np.array(obj[k]) - # make operators numpy - if not skip_q2_grid: - for op in obj["Q2grid"].values(): - for k, v in op.items(): - if k == "alphas": - v = float(v) - elif isinstance(v, list): - v = np.array(v) - elif isinstance(v, bytes): - v = np.frombuffer(lz4.frame.decompress(v)) - v = v.reshape(len_tpids, len_tgrid, len_ipids, len_igrid) - op[k] = v - return cls(obj) - - @classmethod - def load_yaml_from_file(cls, filename, skip_q2_grid=False): - """ - Load YAML representation from file - - Parameters - ---------- - filename : str - source file name - skip_q2_grid : bool - avoid loading Q2grid (i.e. the actual operators) from the yaml - file (default: ``False``) - - Returns - ------- - obj : output - loaded object - """ - obj = None - with open(filename, encoding="utf-8") as o: - obj = Output.load_yaml(o, skip_q2_grid) - return obj - - @classmethod - def load_tar(cls, tarname): - """ - Load tar representation from file (compliant with :meth:`dump_tar` - output). - - Parameters - ---------- - tarname : str - source tar name - - Returns - ------- - obj : output - loaded object - """ - tarpath = pathlib.Path(tarname) - - with tempfile.TemporaryDirectory() as tmpdir: - tmpdir = pathlib.Path(tmpdir) - - with tarfile.open(tarpath, "r") as tar: - tar.extractall(tmpdir) - - # metadata = cls(**{str(k): v for k, v in self.items() if k != "Q2grid"}) - # metadata["Q2grid"] = list(self["Q2grid"].keys()) - - innerdir = list(tmpdir.glob("*"))[0] - yamlname = innerdir / "metadata.yaml" - metadata = cls.load_yaml_from_file(yamlname, skip_q2_grid=True) - - grids = {} - for fp in innerdir.glob("*.npy.lz4"): - with lz4.frame.open(fp, "rb") as fd: - stream = io.BytesIO(fd.read()) - stream.seek(0) - grids[pathlib.Path(fp.stem).stem] = np.load(stream) - - fp.unlink() - - q2grid = metadata["Q2grid"] - operator_grid = {} - for q2, slices in zip(q2grid, zip(*grids.values())): - operator_grid[q2] = dict(zip(grids.keys(), slices)) - metadata["Q2grid"] = operator_grid - - return metadata diff --git a/src/eko/output/__init__.py b/src/eko/output/__init__.py new file mode 100644 index 000000000..d8bb6ebaf --- /dev/null +++ b/src/eko/output/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +""" +This subpackage orchestrate the calculation workflow. +""" + +from . import manipulate +from .struct import EKO, Operator diff --git a/src/eko/output/legacy.py b/src/eko/output/legacy.py new file mode 100644 index 000000000..b1ec9de2c --- /dev/null +++ b/src/eko/output/legacy.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +"""Support legacy storage formats.""" +import copy +import dataclasses +import io +import os +import pathlib +import tarfile +import tempfile +from typing import Optional, TextIO, Union + +import lz4.frame +import numpy as np +import yaml + +from .. import version +from . import struct + + +def get_raw(eko: struct.EKO, binarize: bool = True): + """Serialize result as dict/YAML. + + This maps the original numpy matrices to lists. + + Parameters + ---------- + binarize : bool + dump in binary format (instead of list format) + + Returns + ------- + out : dict + dictionary which will be written on output + + """ + obj = eko.raw + + # prepare output dict + out = {"Q2grid": {}, "eko_version": version.__version__} + out["Q0"] = obj["Q0"] + # dump raw elements + for sec in ["configs", "rotations"]: + for key, value in obj[sec].items(): + if key.startswith("_"): + key = key[1:] + if "grid" in key and value is not None: + value = value["grid"] + out[key] = value + + out["interpolation_xgrid"] = out["xgrid"] + del out["xgrid"] + + # make operators raw + for q2, op in eko.items(): + q2 = float(q2) + out["Q2grid"][q2] = {} + if op is not None: + for k, v in dataclasses.asdict(op).items(): + if binarize: + out["Q2grid"][q2][k] = lz4.frame.compress(v.tobytes()) + else: + out["Q2grid"][q2][k] = v.tolist() + + return out + + +def tocard(raw: dict) -> dict: + """Upgrade raw representation to new card. + + Parameters + ---------- + raw: dict + legacy raw representation of Output + + Returns + ------- + dict + new format operator card + + """ + card = copy.deepcopy(raw) + + card["rotations"] = {} + card["rotations"]["xgrid"] = raw["interpolation_xgrid"] + card["rotations"]["pids"] = raw["pids"] + for basis in ("inputgrid", "targetgrid", "inputpids", "targetpids"): + card["rotations"][basis] = raw[basis] + + card["configs"] = {} + for field in dataclasses.fields(struct.Configs): + card["configs"][field.name] = raw[field.name] + del card[field.name] + + return card + + +def dump_yaml( + obj: struct.EKO, + stream: Optional[TextIO] = None, + binarize: bool = True, +): + """Serialize result as YAML. + + Parameters + ---------- + stream : None or stream + if given, dump is written on it + binarize : bool + dump in binary format (instead of list format) + + Returns + ------- + dump : any + result of dump(output, stream), i.e. a string, if no stream is given or + Null, if written successfully to stream + + """ + out = get_raw(obj, binarize) + return yaml.dump(out, stream) + + +def dump_yaml_to_file( + obj: struct.EKO, + filename: Union[str, os.PathLike], + binarize: bool = True, +): + """Write YAML representation to a file. + + Parameters + ---------- + filename : str + target file name + binarize : bool + dump in binary format (instead of list format) + + Returns + ------- + ret : any + result of dump(output, stream), i.e. Null if written successfully + + """ + with open(filename, "w", encoding="utf-8") as f: + ret = dump_yaml(obj, f, binarize) + return ret + + +def dump_tar(obj: struct.EKO, tarname: Union[str, os.PathLike]): + """Write representation into a tar archive. + + The written archive will contain: + + - metadata (in YAML) + - operator (in numpy ``.npy`` format) + + Parameters + ---------- + tarname : str + target file name + + """ + tarpath = pathlib.Path(tarname) + if tarpath.suffix != ".tar": + raise ValueError(f"'{tarname}' is not a valid tar filename, wrong suffix") + + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = pathlib.Path(tmpdir) + + metadata = {str(k): v for k, v in get_raw(obj).items() if k != "Q2grid"} + metadata["Q2grid"] = obj.Q2grid.tolist() + + yamlname = tmpdir / "metadata.yaml" + with open(yamlname, "w", encoding="utf-8") as fd: + yaml.dump(metadata, fd) + + for kind in ["operator", "error"]: + elements = [] + for q2, op in obj.items(): + el = getattr(op, kind) + elements.append(el) + operator = np.stack(elements) + stream = io.BytesIO() + np.save(stream, operator) + stream.seek(0) + with lz4.frame.open((tmpdir / kind).with_suffix(".npy.lz4"), "wb") as fo: + fo.write(stream.read()) + + with tarfile.open(tarpath, "w") as tar: + tar.add(tmpdir, arcname=tarpath.stem) + + +def load_yaml(stream: TextIO) -> struct.EKO: + """Load YAML representation from stream. + + Parameters + ---------- + stream : TextIO + source stream + + Returns + ------- + obj : output + loaded object + + """ + obj = tocard(yaml.safe_load(stream)) + len_tpids = len(obj["rotations"]["targetpids"]) + len_ipids = len(obj["rotations"]["inputpids"]) + len_tgrid = len(obj["rotations"]["targetgrid"]) + len_igrid = len(obj["rotations"]["inputgrid"]) + # make operators numpy + for op in obj["Q2grid"].values(): + for k, v in op.items(): + if isinstance(v, list): + v = np.array(v) + elif isinstance(v, bytes): + v = np.frombuffer(lz4.frame.decompress(v)) + v = v.reshape(len_tpids, len_tgrid, len_ipids, len_igrid) + op[k] = v + + return struct.EKO.new(theory={}, operator=obj) + + +def load_yaml_from_file(filename: Union[str, os.PathLike]) -> struct.EKO: + """Load YAML representation from file. + + Parameters + ---------- + filename : str + source file name + + Returns + ------- + obj : output + loaded object + + """ + obj = None + with open(filename, encoding="utf-8") as o: + obj = load_yaml(o) + return obj + + +def load_tar(tarname: Union[str, os.PathLike]) -> struct.EKO: + """Load tar representation from file. + + Compliant with :meth:`dump_tar` output. + + Parameters + ---------- + tarname : str + source tar name + + Returns + ------- + obj : output + loaded object + + """ + tarpath = pathlib.Path(tarname) + + operator_grid = {} + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = pathlib.Path(tmpdir) + + with tarfile.open(tarpath, "r") as tar: + tar.extractall(tmpdir) + + # load metadata + innerdir = list(tmpdir.glob("*"))[0] + yamlname = innerdir / "metadata.yaml" + with open(yamlname, encoding="utf-8") as fd: + metadata = tocard(yaml.safe_load(fd)) + + # get actual grids + grids = {} + for fp in innerdir.glob("*.npy.lz4"): + with lz4.frame.open(fp, "rb") as fd: + stream = io.BytesIO(fd.read()) + stream.seek(0) + grids[pathlib.Path(fp.stem).stem] = np.load(stream) + + q2grid = metadata["Q2grid"] + for q2, slices in zip(q2grid, zip(*grids.values())): + operator_grid[q2] = dict(zip(grids.keys(), slices)) + + # now eveything is in place + eko = struct.EKO.new(theory={}, operator=metadata) + for q2, op in operator_grid.items(): + eko[q2] = struct.Operator.from_dict(op) + + return eko diff --git a/src/eko/output/manipulate.py b/src/eko/output/manipulate.py new file mode 100644 index 000000000..5a91f114d --- /dev/null +++ b/src/eko/output/manipulate.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +"""Manipulate output generate by EKO.""" +import logging +import warnings +from typing import Optional + +import numpy as np + +from .. import basis_rotation as br +from .. import interpolation +from .struct import EKO + +logger = logging.getLogger(__name__) + + +def xgrid_reshape( + eko: EKO, + targetgrid: Optional[interpolation.XGrid] = None, + inputgrid: Optional[interpolation.XGrid] = None, +): + """Reinterpolate operators on output and/or input grids. + + The operation is in-place. + + Parameters + ---------- + targetgrid : None or list + xgrid for the target (output PDF) + inputgrid : None or list + xgrid for the input (input PDF) + + """ + # calling with no arguments is an error + if targetgrid is None and inputgrid is None: + raise ValueError("Nor inputgrid nor targetgrid was given") + # now check to the current status + if ( + targetgrid is not None + and len(targetgrid) == len(eko.rotations.targetgrid) + and np.allclose(targetgrid.raw, eko.rotations.targetgrid.raw) + ): + targetgrid = None + warnings.warn("The new targetgrid is close to the current targetgrid") + if ( + inputgrid is not None + and len(inputgrid) == len(eko.rotations.inputgrid) + and np.allclose(inputgrid.raw, eko.rotations.inputgrid.raw) + ): + inputgrid = None + warnings.warn("The new inputgrid is close to the current inputgrid") + # after the checks: if there is still nothing to do, skip + if targetgrid is None and inputgrid is None: + logger.debug("Nothing done.") + return + + # construct matrices + if targetgrid is not None: + b = interpolation.InterpolatorDispatcher( + eko.rotations.targetgrid, + eko.configs.interpolation_polynomial_degree, + False, + ) + target_rot = b.get_interpolation(targetgrid.raw) + eko.rotations._targetgrid = targetgrid + if inputgrid is not None: + b = interpolation.InterpolatorDispatcher( + inputgrid, + eko.configs.interpolation_polynomial_degree, + False, + ) + input_rot = b.get_interpolation(eko.rotations.inputgrid.raw) + eko.rotations._inputgrid = inputgrid + + # build new grid + for q2, elem in eko.items(): + ops = elem.operator + errs = elem.error + if targetgrid is not None and inputgrid is None: + ops = np.einsum("ij,ajbk->aibk", target_rot, ops) + errs = np.einsum("ij,ajbk->aibk", target_rot, errs) + elif inputgrid is not None and targetgrid is None: + ops = np.einsum("ajbk,kl->ajbl", ops, input_rot) + errs = np.einsum("ajbk,kl->ajbl", errs, input_rot) + else: + ops = np.einsum("ij,ajbk,kl->aibl", target_rot, ops, input_rot) + errs = np.einsum("ij,ajbk,kl->aibl", target_rot, errs, input_rot) + elem.operator = ops + elem.error = errs + + eko[q2] = elem + + +def flavor_reshape( + eko: EKO, + targetpids: Optional[np.ndarray] = None, + inputpids: Optional[np.ndarray] = None, +): + """Change the operators to have in the output targetpids and/or in the input inputpids. + + The operation is in-place. + + Parameters + ---------- + targetpids : numpy.ndarray + target rotation specified in the flavor basis + inputpids : None or list + input rotation specified in the flavor basis + + """ + # calling with no arguments is an error + if targetpids is None and inputpids is None: + raise ValueError("Nor inputpids nor targetpids was given") + # now check to the current status + if targetpids is not None and np.allclose( + targetpids, np.eye(len(eko.rotations.targetpids)) + ): + targetpids = None + warnings.warn("The new targetpids is close to current basis") + if inputpids is not None and np.allclose( + inputpids, np.eye(len(eko.rotations.inputpids)) + ): + inputpids = None + warnings.warn("The new inputpids is close to current basis") + # after the checks: if there is still nothing to do, skip + if targetpids is None and inputpids is None: + logger.debug("Nothing done.") + return + + # flip input around + if inputpids is not None: + inv_inputpids = np.linalg.inv(inputpids) + + # build new grid + for q2, elem in eko.items(): + ops = elem.operator + errs = elem.error + if targetpids is not None and inputpids is None: + ops = np.einsum("ca,ajbk->cjbk", targetpids, ops) + errs = np.einsum("ca,ajbk->cjbk", targetpids, errs) + elif inputpids is not None and targetpids is None: + ops = np.einsum("ajbk,bd->ajdk", ops, inv_inputpids) + errs = np.einsum("ajbk,bd->ajdk", errs, inv_inputpids) + else: + ops = np.einsum("ca,ajbk,bd->cjdk", targetpids, ops, inv_inputpids) + errs = np.einsum("ca,ajbk,bd->cjdk", targetpids, errs, inv_inputpids) + elem.operator = ops + elem.error = errs + + eko[q2] = elem + + # drop PIDs - keeping them int nevertheless + # there is no meaningful way to set them in general, after rotation + if inputpids is not None: + eko.rotations._inputpids = np.array([0] * len(eko.rotations.inputpids)) + if targetpids is not None: + eko.rotations._targetpids = np.array([0] * len(eko.rotations.targetpids)) + + +def to_evol(eko: EKO, source: bool = True, target: bool = False): + """Rotate the operator into evolution basis. + + This also assigns also the pids. The operation is in-place. + + Parameters + ---------- + source : bool + rotate on the input tensor + target : bool + rotate on the output tensor + + """ + # rotate + inputpids = br.rotate_flavor_to_evolution if source else None + targetpids = br.rotate_flavor_to_evolution if target else None + flavor_reshape(eko, inputpids=inputpids, targetpids=targetpids) + # assign pids + if source: + eko.rotations._inputpids = br.evol_basis_pids + if target: + eko.rotations._targetpids = br.evol_basis_pids diff --git a/src/eko/output/struct.py b/src/eko/output/struct.py new file mode 100644 index 000000000..239336f3e --- /dev/null +++ b/src/eko/output/struct.py @@ -0,0 +1,880 @@ +# -*- coding: utf-8 -*- +"""Define output representation structures.""" +import contextlib +import copy +import io +import logging +import os +import pathlib +import shutil +import subprocess +import tarfile +import tempfile +import time +from dataclasses import dataclass, fields +from typing import BinaryIO, Dict, Literal, Optional, Tuple + +import lz4.frame +import numpy as np +import numpy.lib.npyio as npyio +import numpy.typing as npt +import yaml + +from .. import basis_rotation as br +from .. import interpolation +from .. import version as vmod + +logger = logging.getLogger(__name__) + +THEORYFILE = "theory.yaml" +OPERATORFILE = "operator.yaml" +RECIPESDIR = "recipes" +PARTSDIR = "parts" +OPERATORSDIR = "operators" + + +class DictLike: + """Dictionary compatibility base class, for dataclasses. + + This class add compatibility to import and export from Python :class:`dict`, + in such a way to support serialization interfaces working with them. + + Some collections and scalar objects are normalized to native Python + structures, in order to simplify the on-disk representation. + + """ + + def __init__(self, **kwargs): + """Empty initializer.""" + + @classmethod + def from_dict(cls, dictionary): + """Initialize dataclass object from raw dictionary. + + Parameters + ---------- + dictionary: dict + the dictionary to be converted to :class:`DictLike` + + Returns + ------- + DictLike + instance with `dictionary` content loaded as attributes + + """ + return cls(**dictionary) + + @property + def raw(self): + """Convert dataclass object to raw dictionary. + + Normalize: + + - :class:`np.ndarray` to lists (possibly nested) + - scalars to the corresponding built-in type (e.g. :class:`float`) + - :class:`tuple` to lists + - :class:`interpolation.XGrid` to the intrinsic serialization format + + Returns + ------- + dict + dictionary representation + + """ + dictionary = {} + for field in fields(self): + value = getattr(self, field.name) + + # replace numpy arrays with lists + if isinstance(value, np.ndarray): + value = value.tolist() + # replace numpy scalars with python ones + elif isinstance(value, float): + value = float(value) + elif isinstance(value, interpolation.XGrid): + value = value.dump() + elif isinstance(value, tuple): + value = list(value) + + dictionary[field.name] = value + + return dictionary + + +@dataclass +class Operator(DictLike): + """Operator representation. + + To be used to hold the result of a computed 4-dim operator (from a given + scale to another given one). + + Note + ---- + IO works with streams in memory, in order to avoid intermediate write on + disk (keep read from and write to tar file only). + + """ + + operator: npt.NDArray + """Content of the evolution operator.""" + error: Optional[npt.NDArray] = None + """Errors on individual operator elements (mainly used for integration + error, but it can host any kind of error). + """ + + def save(self, stream: BinaryIO, compress: bool = True) -> bool: + """Save content of operator to bytes. + + Parameters + ---------- + stream: BinaryIO + a stream where to save the operator content (in order to be able to + perform the operation both on disk and in memory) + compress: bool + flag to control optional compression (default: `True`) + + Returns + ------- + bool + whether the operator saved contained or not the error (this control + even the format, ``npz`` with errors, ``npy`` otherwise) + + """ + if self.error is None: + np.save(stream, self.operator) + else: + np.savez(stream, operator=self.operator, error=self.error) + stream.seek(0) + + # compress if requested + if compress: + compressed = lz4.frame.compress(stream.read()) + # remove previous content + stream.seek(0) + stream.truncate() + # write compressed data + stream.write(compressed) + stream.seek(0) + + # return the type of array dumped (i.e. 'npy' or 'npz') + return self.error is None + + @classmethod + def load(cls, stream: BinaryIO, compressed: bool = True): + """Load operator from bytes. + + Parameters + ---------- + stream: BinaryIO + a stream to load the operator from (to support the operation both on + disk and in memory) + compressed: bool + declare whether the stream is or is not compressed (default: `True`) + + Returns + ------- + Operator + the loaded instance + + """ + if compressed: + stream = io.BytesIO(lz4.frame.decompress(stream.read())) + content = np.load(stream) + + if isinstance(content, np.ndarray): + op = content + err = None + elif isinstance(content, npyio.NpzFile): + op = content["operator"] + err = content["error"] + else: + raise ValueError( + "Not possible to load operator, content format not recognized" + ) + + return cls(operator=op, error=err) + + +@dataclass +class Debug(DictLike): + """Debug configurations.""" + + skip_singlet: bool = False + """Whether to skip QCD singlet computation.""" + skip_non_singlet: bool = False + """Whether to skip QCD non-singlet computation.""" + + +@dataclass +class Configs(DictLike): + """Solution specific configurations.""" + + ev_op_max_order: Tuple[int] + """Maximum order to use in U matrices expansion. + Used only in ``perturbative`` solutions. + """ + ev_op_iterations: int + """Number of intervals in which to break the global path.""" + interpolation_polynomial_degree: int + """Degree of elements of the intepolation polynomial basis.""" + interpolation_is_log: bool + r"""Whether to use polynomials in :math:`\log(x)`. + If `false`, polynomials are in :math:`x`. + """ + backward_inversion: Literal["exact", "expanded"] + """Which method to use for backward matching conditions.""" + n_integration_cores: int = 1 + """Number of cores used to parallelize integration.""" + + +@dataclass +class Rotations(DictLike): + """Rotations related configurations. + + Here "Rotation" is intended in a broad sense: it includes both rotations in + flavor space (labeled with suffix `pids`) and in :math:`x`-space (labeled + with suffix `grid`). + Rotations in :math:`x`-space correspond to reinterpolate the result on a + different basis of polynomials. + + """ + + xgrid: interpolation.XGrid + """Momentum fraction internal grid.""" + pids: npt.NDArray + """Array of integers, corresponding to internal PIDs.""" + _targetgrid: Optional[interpolation.XGrid] = None + _inputgrid: Optional[interpolation.XGrid] = None + _targetpids: Optional[npt.NDArray] = None + _inputpids: Optional[npt.NDArray] = None + + def __post_init__(self): + """Adjust types when loaded from serialized object.""" + for attr in ("xgrid", "_inputgrid", "_targetgrid"): + value = getattr(self, attr) + if value is None: + continue + if isinstance(value, np.ndarray): + setattr(self, attr, interpolation.XGrid(value)) + elif not isinstance(value, interpolation.XGrid): + setattr(self, attr, interpolation.XGrid.load(value)) + + @property + def inputpids(self) -> npt.NDArray: + """Provide pids expected on the input PDF.""" + if self._inputpids is None: + return self.pids + return self._inputpids + + @property + def targetpids(self) -> npt.NDArray: + """Provide pids corresponding to the output PDF.""" + if self._targetpids is None: + return self.pids + return self._targetpids + + @property + def inputgrid(self) -> interpolation.XGrid: + """Provide :math:`x`-grid expected on the input PDF.""" + if self._inputgrid is None: + return self.xgrid + return self._inputgrid + + @property + def targetgrid(self) -> interpolation.XGrid: + """Provide :math:`x`-grid corresponding to the output PDF.""" + if self._targetgrid is None: + return self.xgrid + return self._targetgrid + + +@dataclass +class EKO: + """Operator interface. + + This class offers an interface to an abstract operator, between memory and + disk. + + An actual operator might be arbitrarily huge, and in particular size + limitations in memory are far more strict than on disk. + Since manually managing, for each application, the burden of off-loading + part of the operator might be hard and occasionally not possible (without a + clear picture of the internals), the library itself offers this facility. + + In particular, the data format on disk has a complete specification, and + can hold a full operator independently of the loading procedure. + In order to accomplish the former goal, the remaining task of partial + loading is done by this class (for the Python library, other + implementations are possible and encouraged). + + For this reason, a core component of an :class:`EKO` object is a path, + referring to the location on disk of the corresponding operator. + Any :class:`EKO` has an associated path: + + - for the computed object, it corresponds to the path where the actual + result of the computation is already saved + - for a new object, it is the path at which any result of final or + intermediate computation is stored, as soon as it is produced + + The computation can be stopped at any time, without the loss of any of the + intermediate results. + + """ + + # operators cache, contains the Q2 grid information + _operators: Dict[float, Optional[Operator]] + # public attributes + # ----------------- + # mandatory, identifying features + path: pathlib.Path + """Path on disk, to which this object is linked (and for which it is + essentially an interface). + """ + Q02: float + """Inital scale.""" + # collections + configs: Configs + """Specific configuration to be used during the calculation of these + operators. + """ + rotations: Rotations + """Manipulation information, describing the current status of the EKO (e.g. + `inputgrid` and `targetgrid`). + """ + debug: Debug + """Debug configurations.""" + # tagging information + version: str = vmod.__version__ + """Library version used to create the corresponding file.""" + data_version: str = vmod.__data_version__ + """Specs version, to which the file adheres.""" + + @property + def xgrid(self) -> interpolation.XGrid: + """Momentum fraction internal grid.""" + return self.rotations.xgrid + + @xgrid.setter + def xgrid(self, value: interpolation.XGrid): + """Set `xgrid` value.""" + self.rotations.xgrid = value + + def __post_init__(self): + """Validate class members.""" + if self.path.suffix != ".tar": + raise ValueError("Not a valid path for an EKO") + + @staticmethod + def opname(q2: float) -> str: + """Operator file name from :math:`Q^2` value.""" + return f"{OPERATORSDIR}/{q2:8.2f}" + + def __getitem__(self, q2: float) -> Operator: + """Retrieve operator for given :math:`Q^2`. + + If the operator is not already in memory, it will be automatically + loaded. + + Parameters + ---------- + q2 : float + :math:`Q^2` value labeling the operator to be retrieved + + Returns + ------- + Operator + the retrieved operator + + """ + if q2 in self._operators: + op = self._operators[q2] + if op is not None: + return op + + with tarfile.open(self.path) as tar: + names = list( + filter(lambda n: n.startswith(self.opname(q2)), tar.getnames()) + ) + + if len(names) == 0: + raise ValueError(f"Q2 value '{q2}' not available in '{self.path}'") + + name = names[0] + compressed = name.endswith(".lz4") + stream = tar.extractfile(name) + + op = Operator.load(stream, compressed=compressed) + + self._operators[q2] = op + return op + + def __setitem__(self, q2: float, op: Operator, compress: bool = True): + """Set operator for given :math:`Q^2`. + + The operator is automatically dumped on disk, . + + Parameters + ---------- + q2: float + :math:`Q^2` value labeling the operator to be set + op: Operator + the retrieved operator + compress: bool + whether to save the operator compressed or not (default: `True`) + + """ + if not isinstance(op, Operator): + raise ValueError("Only an Operator can be added to an EKO") + + stream = io.BytesIO() + without_err = op.save(stream, compress) + stream.seek(0) + + suffix = "npy" if without_err else "npz" + if compress: + suffix += ".lz4" + + info = tarfile.TarInfo(name=f"{self.opname(q2)}.{suffix}") + info.size = len(stream.getbuffer()) + info.mtime = int(time.time()) + info.mode = 436 + # info.uname = os.getlogin() + # info.gname = os.getlogin() + + # TODO: unfortunately Python has no native support for deleting + # files inside tar, so the proper way is to make that function + # ourselves, in the inefficient way of constructing a new archive + # from the existing one, but for the file to be removed + # at the moment, an implicit dependency on `tar` command has been + # introduced -> dangerous for portability + # since it's not raising any error, it is fine to run in any case: + has_file = False + with tarfile.open(self.path, mode="r") as tar: + has_file = f"operators/{q2:8.2f}.{suffix}" in tar.getnames() + + if has_file: + subprocess.run( + f"tar -f {self.path.absolute()} --delete".split() + [info.name] + ) + with tarfile.open(self.path, "a") as tar: + tar.addfile(info, fileobj=stream) + + self._operators[q2] = op + + def __delitem__(self, q2: float): + """Drop operator from memory. + + This method only drops the operator from memory, and it's not expected + to do anything else. + + Autosave is done on set, and explicit saves are performed by the + computation functions. + + If a further explicit save is required, repeat explicit assignment:: + + eko[q2] = eko[q2] + + This is only useful if the operator has been mutated in place, that in + general should be avoided, since the operator should only be the result + of a full computation or a library manipulation. + + + Parameters + ---------- + q2 : float + the value of :math:`Q^2` for which the corresponding operator + should be dropped + + """ + self._operators[q2] = None + + @contextlib.contextmanager + def operator(self, q2: float): + """Retrieve operator and discard immediately. + + To be used as a contextmanager: the operator is automatically loaded as + usual, but after the context manager it is dropped from memory. + + Parameters + ---------- + q2: float + :math:`Q^2` value labeling the operator to be retrieved + + """ + try: + yield self[q2] + finally: + del self[q2] + + @property + def Q2grid(self) -> npt.NDArray: + """Provide the list of :math:`Q^2` as an array.""" + return np.array(list(self._operators)) + + def __iter__(self): + """Iterate over keys (i.e. Q2 values). + + Yields + ------ + float + q2 values + + """ + for q2 in self._operators: + yield q2 + + def items(self): + """Iterate operators, with minimal load. + + Pay attention, this iterator: + + - is not a read-only operation from the point of view of the in-memory + object (since the final result after iteration is no operator loaded) + - but it is a read-only operation from the point of view of the + permanent object on-disk + + Yields + ------ + tuple + couples of ``(q2, operator)``, loaded immediately before, unloaded + immediately after + + """ + for q2 in self.Q2grid: + yield q2, self[q2] + del self[q2] + + def __contains__(self, q2: float) -> bool: + """Check whether :math:`Q^2` operators are present. + + 'Present' means, in this case, they are conceptually part of the + :class:`EKO`. But it is telling nothing about being loaded in memory or + not. + + Returns + ------- + bool + the result of checked condition + + """ + return q2 in self._operators + + def approx( + self, q2: float, rtol: float = 1e-6, atol: float = 1e-10 + ) -> Optional[float]: + """Look for close enough :math:`Q^2` value in the :class:`EKO`. + + Parameters + ---------- + q2: float + value of :math:`Q2` in which neighborhood to look + rtol: float + relative tolerance + atol: float + absolute tolerance + + Returns + ------- + float or None + retrieved value of :math:`Q^2`, if a single one is found + + Raises + ------ + ValueError + if multiple values are find in the neighborhood + + """ + q2s = self.Q2grid + close = q2s[np.isclose(q2, q2s, rtol=rtol, atol=atol)] + + if close.size == 1: + return close[0] + if close.size == 0: + return None + raise ValueError(f"Multiple values of Q2 have been found close to {q2}") + + def unload(self): + """Fully unload the operators in memory.""" + for q2 in self: + del self[q2] + + def deepcopy(self, path: os.PathLike): + """Create a deep copy of current instance. + + The managed on-disk object is copied as well, to the new ``path`` + location. + If you don't want to copy the disk, consider using directly:: + + copy.deepcopy(myeko) + + It will perform the exact same operation, without propagating it to the + disk counterpart. + + Parameters + ---------- + path: os.PathLike + path were to copy the disk counterpart of the operator object + + Returns + ------- + EKO + the copy created + + """ + # return the object fully on disk, in order to avoid expensive copies in + # memory (they would be copied on-disk in any case) + self.unload() + + new = copy.deepcopy(self) + new.path = pathlib.Path(path) + shutil.copy2(self.path, new.path) + + return new + + @staticmethod + def bootstrap(dirpath: os.PathLike, theory: dict, operator: dict): + """Create directory structure. + + Parameters + ---------- + dirpath: os.PathLike + path to create the directory into + theory: dict + theory card to be dumped + operator: dict + operator card to be dumped + + """ + dirpath = pathlib.Path(dirpath) + (dirpath / THEORYFILE).write_text(yaml.dump(theory), encoding="utf-8") + (dirpath / OPERATORFILE).write_text(yaml.dump(operator), encoding="utf-8") + (dirpath / RECIPESDIR).mkdir() + (dirpath / PARTSDIR).mkdir() + (dirpath / OPERATORSDIR).mkdir() + + @staticmethod + def extract(path: os.PathLike, filename: str) -> str: + """Extract file from disk assets. + + Note + ---- + At the moment, it only support text files (since it is returning the + content as a string) + + Parameters + ---------- + path: os.PathLike + path to the disk dump + filename: str + relative path inside the archive of the file to extract + + Returns + ------- + str + file content + + """ + path = pathlib.Path(path) + + with tarfile.open(path, "r") as tar: + fd = tar.extractfile(filename) + content = fd.read().decode() + + return content + + @property + def theory(self) -> dict: + """Provide theory card, retrieving from the dump.""" + # TODO: make an actual attribute, move load to `theory_card`, and the + # type of the attribute will be `eko.runcards.TheoryCard` + return yaml.safe_load(self.extract(self.path, THEORYFILE)) + + @property + def theory_card(self) -> dict: + """Provide theory card, retrieving from the dump.""" + # TODO: return `eko.runcards.TheoryCard` + return self.theory + + @property + def operator_card(self) -> dict: + """Provide operator card, retrieving from the dump.""" + # TODO: return `eko.runcards.OperatorCard` + return yaml.safe_load(self.extract(self.path, OPERATORFILE)) + + def interpolator( + self, mode_N: bool, use_target: bool + ) -> interpolation.InterpolatorDispatcher: + """Return associated interpolation. + + Paramters + --------- + mode_N : bool + interpolate in N-space? + use_target : bool + use target grid? If False, use input grid + + Returns + ------- + interpolation.InterpolatorDispatcher + interpolator + """ + grid = self.rotations.targetgrid if use_target else self.rotations.inputgrid + return interpolation.InterpolatorDispatcher( + grid, self.configs.interpolation_polynomial_degree, mode_N + ) + + @classmethod + def detached(cls, theory: dict, operator: dict, path: pathlib.Path): + """Build the in-memory object alone. + + Note + ---- + This constructor is meant for internal use, backing the usual ones (like + :meth:`new` or :meth:`load`), but it should not be used directly, since + it has no guarantee that the underlying path is valid, breaking the + object semantic. + + Parameters + ---------- + theory : dict + the theory card + operator : dict + the operator card + path : os.PathLike + the underlying path (it has to be a valid object, but it is not + guaranteed, see the note) + + Returns + ------- + EKO + the generated structure + + """ + bases = operator["rotations"] + for basis in ("inputgrid", "targetgrid", "inputpids", "targetpids"): + bases[f"_{basis}"] = bases[basis] + del bases[basis] + bases["pids"] = np.array(br.flavor_basis_pids) + for k in ("xgrid", "_inputgrid", "_targetgrid"): + if operator["rotations"][k] is None: + continue + bases[k] = interpolation.XGrid( + operator["rotations"][k], + log=operator["configs"]["interpolation_is_log"], + ) + + return cls( + path=path, + Q02=float(operator["Q0"] ** 2), + _operators={q2: None for q2 in operator["Q2grid"]}, + configs=Configs.from_dict(operator["configs"]), + rotations=Rotations.from_dict(bases), + debug=Debug.from_dict(operator.get("debug", {})), + ) + + @classmethod + def new(cls, theory: dict, operator: dict, path: Optional[os.PathLike] = None): + """Make structure from runcard-like dictionary. + + This constructor is made to be used with loaded runcards, in order to + minimize the amount of code needed to init a new object (you just to + load the runcard and call this function). + + Note + ---- + An object is initialized with no rotations, since the role of rotations + is to keep the current state of the output object after manipulations + happened. + Since a new object is here in the process of being created, no rotation + has to be logged. + + Parameters + ---------- + theory : dict + the theory card + operator : dict + the operator card + path : os.PathLike + the underlying path (if not provided, it is created in a temporary + path) + + Returns + ------- + EKO + the generated structure + + """ + givenpath = path is not None + + path = pathlib.Path(path if givenpath else tempfile.mkstemp(suffix=".tar")[1]) + if path.exists(): + if givenpath: + raise FileExistsError( + f"File exists at given path '{path}', cannot be used for a new operator." + ) + # delete the file created in case of temporary file + path.unlink() + + with tempfile.TemporaryDirectory() as td: + td = pathlib.Path(td) + cls.bootstrap(td, theory=theory, operator=operator) + + with tarfile.open(path, mode="w") as tar: + for element in td.glob("*"): + tar.add(element, arcname=element.name) + + shutil.rmtree(td) + + eko = cls.detached(theory, operator, path=path) + logger.info(f"New operator created at path '{path}'") + return eko + + @classmethod + def load(cls, path: os.PathLike): + """Load dump into an :class:`EKO` object. + + Parameters + ---------- + path:: os.PathLike + path to the dump to load + + Returns + ------- + EKO + the loaded instance + + """ + path = pathlib.Path(path) + if not tarfile.is_tarfile(path): + raise ValueError("EKO: the corresponding file is not a valid tar archive") + + theory = yaml.safe_load(cls.extract(path, THEORYFILE)) + operator = yaml.safe_load(cls.extract(path, OPERATORFILE)) + + eko = cls.detached(theory, operator, path=path) + logger.info(f"Operator loaded from path '{path}'") + return eko + + @property + def raw(self) -> dict: + """Provide raw representation of the full content. + + Returns + ------- + dict + nested dictionary, storing all the values in the structure, but the + operators themselves + + """ + return dict( + path=str(self.path), + Q0=float(np.sqrt(self.Q02)), + Q2grid=self.Q2grid.tolist(), + configs=self.configs.raw, + rotations=self.rotations.raw, + debug=self.debug.raw, + ) + + def __del__(self): + """Destroy the memory structure gracefully.""" + self.unload() diff --git a/src/eko/runcards.py b/src/eko/runcards.py new file mode 100644 index 000000000..0c46b51ed --- /dev/null +++ b/src/eko/runcards.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +"""Structures to hold runcard information. + +.. todo:: + Inherit from :class:`eko.output.struct.DictLike`, to get + :meth:`eko.output.struct.DictLike.raw` for free and avoid duplication. + +""" +from dataclasses import dataclass +from typing import Tuple + + +@dataclass +class TheoryCard: + """Represent theory card content.""" + + order: Tuple[int, int] + """Perturbatiive order tuple, ``(QCD, QED)``.""" + + @classmethod + def load(cls, card: dict): + """Load from runcard raw content. + + Parameters + ---------- + card: dict + content of the theory runcard + + Returns + ------- + TheoryCard + the loaded instance + + """ + return cls(order=tuple(card["order"])) + + +@dataclass +class OperatorCard: + """Represent operator card content.""" + + xgrid: list + """Grid defining internal interpolation basis.""" + + @classmethod + def load(cls, card: dict): + """Load from runcard raw content. + + Parameters + ---------- + card: dict + content of the theory runcard + + Returns + ------- + TheoryCard + the loaded instance + + """ + return cls(xgrid=card["xgrid"]) diff --git a/src/eko/runner.py b/src/eko/runner.py index 2270f737d..37f37a3e6 100644 --- a/src/eko/runner.py +++ b/src/eko/runner.py @@ -1,32 +1,30 @@ # -*- coding: utf-8 -*- -""" - This file contains the main application class of eko -""" +"""This file contains the main application class of eko.""" import copy import logging +from typing import Optional import numpy as np -from . import basis_rotation as br from . import compatibility, interpolation, msbar_masses from .couplings import Couplings from .evolution_operator.grid import OperatorGrid -from .output import Output +from .output import EKO, Operator, manipulate from .thresholds import ThresholdsAtlas logger = logging.getLogger(__name__) class Runner: - """ - Represents a single input configuration. + """Represents a single input configuration. For details about the configuration, see :doc:`here ` - Parameters + Attributes ---------- - setup : dict - input configurations + setup : dict + input configurations + """ banner = r""" @@ -39,82 +37,101 @@ class Runner: o888ooooood8 o888o o888o `Y8bood8P' """ - def __init__(self, theory_card, operators_card): - self.out = Output() + def __init__(self, theory_card: dict, operators_card: dict): + """Initialize runner. + + Parameters + ---------- + theory_card: dict + theory parameters and options + operators_card: dict + operator specific options + """ new_theory, new_operators = compatibility.update(theory_card, operators_card) # Store inputs self._theory = new_theory # setup basis grid - bfd = interpolation.InterpolatorDispatcher.from_dict(new_operators) - self.out.update(bfd.to_dict()) + bfd = interpolation.InterpolatorDispatcher( + xgrid=interpolation.XGrid( + new_operators["rotations"]["xgrid"], + log=new_operators["configs"]["interpolation_is_log"], + ), + polynomial_degree=new_operators["configs"][ + "interpolation_polynomial_degree" + ], + ) + # setup the Threshold path, compute masses if necessary masses = None if new_theory["HQ"] == "MSBAR": masses = msbar_masses.compute(new_theory) tc = ThresholdsAtlas.from_dict(new_theory, masses=masses) - self.out["q2_ref"] = float(tc.q2_ref) # strong coupling sc = Couplings.from_dict(new_theory, masses=masses) # setup operator grid - self.op_grid = OperatorGrid.from_dict( - new_theory, - new_operators, - tc, - sc, - bfd, - ) - self.out["inputgrid"] = bfd.xgrid_raw - self.out["targetgrid"] = bfd.xgrid_raw - self.post_process = dict( - inputgrid=new_operators.get("inputgrid", bfd.xgrid_raw), - targetgrid=new_operators.get("targetgrid", bfd.xgrid_raw), - inputbasis=new_operators.get("inputbasis"), - targetbasis=new_operators.get("targetbasis"), - ) + self.op_grid = OperatorGrid.from_dict(new_theory, new_operators, tc, sc, bfd) - def get_output(self): - """ - Collects all data for output (to run the evolution) + # save bases manipulations for a post processing step + rot = operators_card.get("rotations", {}) + self.post_process = {} + for key in ("inputgrid", "targetgrid", "inputpids", "targetpids"): + self.post_process[key] = rot.get(key, None) + new_operators["rotations"][key] = None + + self.out = EKO.new(theory=theory_card, operator=new_operators) + + def get_output(self) -> EKO: + """Run evolution and generate output operator. + + Two steps are applied sequentially: + + 1. evolution is performed, computing the evolution operator in an + internal flavor and x-space basis + 2. bases manipulations specified in the runcard are applied Returns ------- - ret : eko.output.Output - output instance + EKO + output instance + """ # add all operators - Q2grid = {} - self.out["inputpids"] = br.flavor_basis_pids - self.out["targetpids"] = br.flavor_basis_pids - self.out["inputgrid"] = self.out["interpolation_xgrid"] - self.out["targetgrid"] = self.out["interpolation_xgrid"] for final_scale, op in self.op_grid.compute().items(): - Q2grid[float(final_scale)] = op - self.out["Q2grid"] = Q2grid + self.out[float(final_scale)] = Operator.from_dict(op) + + def similar_to_none(name: str) -> Optional[np.ndarray]: + grid = self.post_process[name] + + default = self.out.xgrid.grid if "grid" in name else self.out.rotations.pids + if grid is None or ( + len(grid) == default.size and np.allclose(grid, default, atol=1e-12) + ): + return None + + return np.array(grid) + # reshape xgrid - inputgrid = ( - self.post_process["inputgrid"] - if self.post_process["inputgrid"] is not self.out["interpolation_xgrid"] - else None - ) - targetgrid = ( - self.post_process["targetgrid"] - if self.post_process["targetgrid"] is not self.out["interpolation_xgrid"] - else None - ) + inputgrid = similar_to_none("inputgrid") + targetgrid = similar_to_none("targetgrid") if inputgrid is not None or targetgrid is not None: - self.out.xgrid_reshape(targetgrid=targetgrid, inputgrid=inputgrid) + if inputgrid is not None: + inputgrid = interpolation.XGrid(inputgrid) + if targetgrid is not None: + targetgrid = interpolation.XGrid(targetgrid) + manipulate.xgrid_reshape( + self.out, targetgrid=targetgrid, inputgrid=inputgrid + ) # reshape flavors - inputbasis = self.post_process["inputbasis"] - if inputbasis is not None: - inputbasis = np.array(inputbasis) - targetbasis = self.post_process["targetbasis"] - if targetbasis is not None: - targetbasis = np.array(targetbasis) - if inputbasis is not None or targetbasis is not None: - self.out.flavor_reshape(targetbasis=targetbasis, inputbasis=inputbasis) + inputpids = similar_to_none("inputpids") + targetpids = similar_to_none("targetpids") + if inputpids is not None or targetpids is not None: + manipulate.flavor_reshape( + self.out, targetpids=targetpids, inputpids=inputpids + ) + return copy.deepcopy(self.out) diff --git a/src/eko/version.py b/src/eko/version.py index f09cc807b..0aff48f78 100644 --- a/src/eko/version.py +++ b/src/eko/version.py @@ -1,2 +1,3 @@ # -*- coding: utf-8 -*- __version__ = "0.0.0" +__data_version__ = "0.0.1" diff --git a/src/ekobox/__init__.py b/src/ekobox/__init__.py index 3ce63bd8f..3a140e99a 100644 --- a/src/ekobox/__init__.py +++ b/src/ekobox/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- from ekomark import apply -from . import evol_pdf, gen_info, gen_op, gen_theory +from . import evol_pdf, info_file, operators_card, theory_card diff --git a/src/ekobox/cli/__init__.py b/src/ekobox/cli/__init__.py new file mode 100644 index 000000000..95fbff988 --- /dev/null +++ b/src/ekobox/cli/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from . import run +from ._base import command diff --git a/src/ekobox/cli/_base.py b/src/ekobox/cli/_base.py new file mode 100644 index 000000000..b53d3aff2 --- /dev/null +++ b/src/ekobox/cli/_base.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +import click + + +@click.group() +def command(): + pass diff --git a/src/ekobox/cli/run.py b/src/ekobox/cli/run.py new file mode 100644 index 000000000..0afd90bf3 --- /dev/null +++ b/src/ekobox/cli/run.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import click + +from ._base import command + + +@command.command("run") +@click.argument("q2") +def subcommand(q2): + """Launch EKO computation. + + Parameters + ---------- + q2: sequence[float] + sequnce of q2 to compute + + """ + print(f"Running EKO for {q2}") diff --git a/src/ekobox/evol_pdf.py b/src/ekobox/evol_pdf.py index ca019c69b..66eec9025 100644 --- a/src/ekobox/evol_pdf.py +++ b/src/ekobox/evol_pdf.py @@ -1,11 +1,15 @@ # -*- coding: utf-8 -*- +"""Tools to evolve actual PDFs.""" import pathlib +import numpy as np + import eko +import eko.output.legacy from eko import basis_rotation as br from ekomark import apply -from . import gen_info, genpdf +from . import genpdf, info_file def evolve_pdfs( @@ -19,54 +23,41 @@ def evolve_pdfs( name="Evolved_PDF", info_update=None, ): - """ - This function evolves an initial_PDF using a theory card and an operator card - and dump the evolved PDF in lhapdf format + """Evolves one or several initial_PDFs and dump the evolved PDFs in lhapdf format. Parameters ---------- - initial_PDF_list : list(lhapdf object) - list of PDF members to be evolved - - theory_card : dict - theory card - - operators_card : dict - operators card - - path : str - path to cached eko output (if "None" it will be recomputed) - - store_path : str - path where the eko is stored (if "None" will not be saved) - - targetgrid : list(float) - target x-grid (if different from input x-grid) - - install : bool - set whether to install evolved PDF to lhapdf directory - - name : str - set name of evolved PDF - - info_update : dict - dict of info to add or update to default info file - + initial_PDF_list : list(lhapdf object) + list of PDF members to be evolved + theory_card : dict + theory card + operators_card : dict + operators card + path : str + path to cached eko output (if "None" it will be recomputed) + store_path : str + path where the eko is stored (if "None" will not be saved) + targetgrid : list(float) + target x-grid (if different from input x-grid) + install : bool + set whether to install evolved PDF to lhapdf directory + name : str + set name of evolved PDF + info_update : dict + dict of info to add or update to default info file """ eko_output = None if path is not None: my_path = pathlib.Path(path) if my_path.is_dir(): - ops_id = f"o{operators_card['hash'][:6]}_t{theory_card['hash'][:6]}.tar" - ops_id_path = pathlib.Path(ops_id) - outpath = my_path / ops_id_path.relative_to(ops_id_path.anchor) - eko_output = eko.output.Output.load_tar(outpath) + outpath = my_path / ekofileid(theory_card, operators_card) + eko_output = eko.output.legacy.load_tar(outpath) else: - eko_output = eko.output.Output.load_tar(my_path) + eko_output = eko.output.legacy.load_tar(my_path) else: eko_output = eko.run_dglap(theory_card, operators_card) if store_path is not None: - eko_output.dump_tar(store_path) + eko.output.legacy.dump_tar(eko_output, store_path) evolved_PDF_list = [] for initial_PDF in initial_PDF_list: @@ -78,7 +69,7 @@ def evolve_pdfs( info_update = {} info_update["XMin"] = targetgrid[0] info_update["XMax"] = targetgrid[-1] - info = gen_info.create_info_file( + info = info_file.build( theory_card, operators_card, len(evolved_PDF_list), @@ -92,7 +83,7 @@ def evolve_pdfs( * evolved_PDF[Q2]["pdfs"][pid][targetgrid.index(x)], xgrid=targetgrid, Q2grid=operators_card["Q2grid"], - pids=br.flavor_basis_pids, + pids=np.array(br.flavor_basis_pids), ) # all_blocks will be useful in case there will be necessity to dump many blocks # for a single member @@ -105,28 +96,20 @@ def evolve_pdfs( genpdf.install_pdf(name) -def gen_out(theory_card, op_card, path=None): +def ekofileid(theory_card, operators_card): """ - Generates EKO output from theory and operators cards and, if requested, - dumps it in tar format + Return a common filename composed by the hashes. Parameters ---------- - theory_card : dict - theory card - op_card : dict - operators card - path : str - path of dumped output (if "None" output is not dumped) + theory_card : dict + theory card + operators_card : dict + operators card Returns ------- - : eko.output.Output - eko output + str + file name """ - eko_output = eko.run_dglap(theory_card, op_card) - if path is not None: - ops_id = f"o{op_card['hash'][:6]}_t{theory_card['hash'][:6]}" - path = f"{path}/{ops_id}.tar" - eko_output.dump_tar(path) - return eko_output + return f"o{operators_card['hash'][:6]}_t{theory_card['hash'][:6]}.tar" diff --git a/src/ekobox/genpdf/__init__.py b/src/ekobox/genpdf/__init__.py index 496ef9d0b..4df6caf96 100644 --- a/src/ekobox/genpdf/__init__.py +++ b/src/ekobox/genpdf/__init__.py @@ -11,20 +11,37 @@ from . import export, flavors, load -def take_data(parent_pdf_set=None, members=False): +def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): """ - Auxiliary function for generate_pdf. It provides the info, the heads - of the member files and the blocks to be generated to generate_pdf. + Auxiliary function for `generate_pdf`. + + It provides the info, the heads of the member files and the blocks + to be generated to `generate_pdf`. Parameters ---------- - parent_pdf_set : - the PDF set to be used as parent + parent_pdf_set : None or str or dict + the PDF set to be used as parent set members : bool - if true every member of the parent are loaded + if true every member of the parent is loaded + xgrid : list(float) + produced x grid if given + Q2grid : list(float) + produced Q2 grid if given + + Returns + ------- + info : dict + info dictionary + heads : list(str) + heads of member files if necessary + blocks : list(dict) + data blocks """ - xgrid = np.geomspace(1e-9, 1, 240) - Q2grid = np.geomspace(1.3, 1e5, 35) + if xgrid is None: + xgrid = np.geomspace(1e-9, 1, 240) + if Q2grid is None: + Q2grid = np.geomspace(1.3, 1e5, 35) # collect blocks all_blocks = [] info = None @@ -44,9 +61,9 @@ def take_data(parent_pdf_set=None, members=False): info = load.load_info_from_file(parent_pdf_set) # iterate on members for m in range(int(info["NumMembers"])): - dat = load.load_blocks_from_file(parent_pdf_set, m) - heads.append(dat[0]) - all_blocks.append(dat[1]) + head, blocks = load.load_blocks_from_file(parent_pdf_set, m) + heads.append(head) + all_blocks.append(blocks) if not members: break elif isinstance(parent_pdf_set, dict): @@ -65,37 +82,44 @@ def take_data(parent_pdf_set=None, members=False): ) else: raise ValueError("Unknown parent pdf type") - return heads, info, all_blocks + return info, heads, all_blocks def generate_pdf( - name, labels, parent_pdf_set=None, members=False, info_update=None, install=False + name, + labels, + parent_pdf_set=None, + members=False, + info_update=None, + install=False, + xgrid=None, + Q2grid=None, ): """ Generate a new PDF from a parent PDF with a set of flavors. - If parent_pdf_set is the name of an available PDF set, + If `parent_pdf_set` is the name of an available PDF set, it will be used as parent. In order to use the toy PDF - as parent, it is enough to set parent_pdf_set = "toy" or "toylh". - If parent_pdf_set is not specified, a debug PDF constructed as - x * (1-x) for every flavor will be usedas parent. + as parent, it is enough to set `parent_pdf_set` to "toy" or "toylh". + If `parent_pdf_set` is not specified, a debug PDF constructed as + x * (1-x) for every flavor will be used as parent. It is also possible to provide custom functions for each flavor - in the form of a dictionary: {pid: f(x,Q2)}. + in the form of a dictionary: `{pid: f(x,Q2)}`. - In labels it is possible to pass a list of PIDs or evolution basis + With `labels` it is possible to pass a list of PIDs or evolution basis combinations to keep in the generated PDF. In order to project on custom combinations of PIDs, it is also possible to pass a list containing the desired factors for each flavor. The default behaviour is to generate only one member for a PDF set - (the zero member) but it can be changed setting to True the member flag. + (the zero member) but it can be changed setting to True the `members` flag. - The info_update argument is a dictionary and provide to the user a way - to change the info file of the generated PDF set. If a key of info_update + The `info_update` argument is a dictionary and provide to the user as a way + to change the info file of the generated PDF set. If a key of `info_update` matches with one key of the standard info file, the information are updated, otherwise they are simply added. - Turning True the value of the install flag, it is possible to autmatically + Turning True the value of the `install` flag, it is possible to automatically install the generated PDF to the lhapdf directory. By default install is False. Parameters @@ -110,6 +134,10 @@ def generate_pdf( iterate on members install : bool install on LHAPDF path + xgrid : list(float) + produced x grid if given + Q2grid : list(float) + produced Q2 grid if given Examples -------- @@ -126,7 +154,7 @@ def generate_pdf( through the pure-singlet contributions (starting at |NNLO|) >>> from eko import basis_rotation as br - >>> from ekobox.tools import genpdf + >>> from ekobox import genpdf >>> import numpy as np >>> anti_qed_singlet = np.zeros_like(br.flavor_basis_pids, dtype=np.float_) >>> anti_qed_singlet[br.flavor_basis_pids.index(1)] = -4 @@ -147,7 +175,9 @@ def generate_pdf( flavor_combinations = flavors.pid_to_flavor(labels) # labels = verify_labels(args.labels) - heads, info, all_blocks = take_data(parent_pdf_set=parent_pdf_set, members=members) + info, heads, all_blocks = take_data( + parent_pdf_set, members, xgrid=xgrid, Q2grid=Q2grid + ) # filter the PDF new_all_blocks = [] @@ -156,10 +186,7 @@ def generate_pdf( # changing info file according to user choice if info_update is not None: - if isinstance(info_update, dict): - info.update(info_update) - else: - raise TypeError("Info to update are not in a dictionary format") + info.update(info_update) # write info["Flavors"] = [int(pid) for pid in br.flavor_basis_pids] info["NumFlavors"] = len(br.flavor_basis_pids) @@ -190,8 +217,8 @@ def install_pdf(name): print(f"install_pdf {name}") target = pathlib.Path(lhapdf.paths()[0]) src = pathlib.Path(name) - if not src.exists(): - raise FileExistsError(src) + # shutil.move only accepts paths since 3.9 so we need to cast + # https://docs.python.org/3/library/shutil.html?highlight=shutil#shutil.move shutil.move(str(src), str(target)) @@ -212,7 +239,7 @@ def generate_block(xfxQ2, xgrid, Q2grid, pids): Returns ------- - dict : + dict PDF block """ block = dict(Q2grid=Q2grid, pids=pids, xgrid=xgrid) diff --git a/src/ekobox/genpdf/export.py b/src/ekobox/genpdf/export.py index 3a7a02de3..8f906513f 100644 --- a/src/ekobox/genpdf/export.py +++ b/src/ekobox/genpdf/export.py @@ -126,9 +126,7 @@ def dump_set(name, info, member_blocks, pdf_type_list=None): """ dump_info(name, info) for mem, blocks in enumerate(member_blocks): - if not isinstance(pdf_type_list, list): - dump_blocks(name, mem, blocks) - elif len(pdf_type_list) == 0: + if not isinstance(pdf_type_list, list) or len(pdf_type_list) == 0: dump_blocks(name, mem, blocks) else: dump_blocks(name, mem, blocks, pdf_type=pdf_type_list[mem]) diff --git a/src/ekobox/genpdf/flavors.py b/src/ekobox/genpdf/flavors.py index 116443419..121d28c15 100644 --- a/src/ekobox/genpdf/flavors.py +++ b/src/ekobox/genpdf/flavors.py @@ -129,6 +129,7 @@ def is_pid_labels(labels): except (ValueError, TypeError): return False for label in labels: + # label might still be a list (for general projection) if not isinstance(label, np.int_): return False if label not in br.flavor_basis_pids: diff --git a/src/ekobox/genpdf/load.py b/src/ekobox/genpdf/load.py index 4c1112b2d..de5322288 100644 --- a/src/ekobox/genpdf/load.py +++ b/src/ekobox/genpdf/load.py @@ -23,7 +23,7 @@ def load_info_from_file(pdfset_name): Returns ------- - dict : + dict info dictionary """ import lhapdf # pylint: disable=import-error, import-outside-toplevel @@ -55,13 +55,10 @@ def load_blocks_from_file(pdfset_name, member): """ import lhapdf # pylint: disable=import-error, import-outside-toplevel - pdf = lhapdf.mkPDF(pdfset_name, member) src = pathlib.Path(lhapdf.paths()[0]) / pdfset_name # read actual file cnt = [] - with open( - src / f"{pdfset_name}_{pdf.memberID:04d}.dat", "r", encoding="utf-8" - ) as o: + with open(src / f"{pdfset_name}_{member:04d}.dat", "r", encoding="utf-8") as o: cnt = o.readlines() # file head head = cnt[0] diff --git a/src/ekobox/gen_info.py b/src/ekobox/info_file.py similarity index 77% rename from src/ekobox/gen_info.py rename to src/ekobox/info_file.py index 7deab82e9..8476d0bd5 100644 --- a/src/ekobox/gen_info.py +++ b/src/ekobox/info_file.py @@ -1,28 +1,28 @@ # -*- coding: utf-8 -*- +"""LHAPDF info file utilities.""" import copy import math from .genpdf import load -def create_info_file(theory_card, operators_card, num_members, info_update): - """ - Generate a lhapdf info file from theory and operators card +def build(theory_card, operators_card, num_members, info_update): + """Generate a lhapdf info file from theory and operators card. Parameters ---------- - theory_card : dict - theory card - operators_card : dict - operators_card - num_members : int - number of pdf set members - info_update : dict - info to update + theory_card : dict + theory card + operators_card : dict + operators_card + num_members : int + number of pdf set members + info_update : dict + info to update Returns ------- - : dict + dict info file in lhapdf format """ template_info = copy.deepcopy(load.template_info) @@ -32,6 +32,7 @@ def create_info_file(theory_card, operators_card, num_members, info_update): template_info.update(info_update) template_info["NumFlavors"] = 14 template_info["Flavors"] = [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21, 22] + # TODO actually point to input grid template_info["XMin"] = operators_card["interpolation_xgrid"][0] template_info["XMax"] = operators_card["interpolation_xgrid"][-1] template_info["NumMembers"] = num_members diff --git a/src/ekobox/gen_op.py b/src/ekobox/operators_card.py similarity index 57% rename from src/ekobox/gen_op.py rename to src/ekobox/operators_card.py index 9193d2c15..afcfe3332 100644 --- a/src/ekobox/gen_op.py +++ b/src/ekobox/operators_card.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Tools to generate operator cards.""" import copy import yaml @@ -7,24 +8,26 @@ from ekomark.data import operators -def gen_op_card(Q2grid, update=None, name=None): - """ - Generates an operator card with some mandatory user choice +def generate(Q2grid, update=None, name=None): + """Generate operators card. + + Generates an operators card with some mandatory user choice (in this case only the Q2 grid) and some default values which - can be changed by the update input dict + can be changed by the update input dict. Parameters ---------- - Q2grid : list(float) - grid for Q2 - update : dict - dictionary of info to update in op. card - name : str - name of exported op.card (if name not None) + Q2grid : list(float) + grid for Q2 + update : dict + dictionary of info to update in op. card + name : str + name of exported op.card (if name not None) + Returns ------- - : dict - operator card + dict + operators card """ # Constructing the dictionary with some default value def_op = copy.deepcopy(operators.default_card) @@ -34,44 +37,42 @@ def gen_op_card(Q2grid, update=None, name=None): for k in update.keys(): if k not in def_op.keys(): raise ValueError("Provided key not in operators card") - def_op.update(update) + for key, value in update.items(): + def_op[key] = value serialized = sql.serialize(def_op) def_op["hash"] = (sql.add_hash(serialized))[-1] if name is not None: - export_op_card(name, def_op) + dump(name, def_op) return def_op -def export_op_card(name, op): - """ - Export the operators card in the current directory +def dump(name, op): + """Export the operators card. Parameters ---------- name : str - name of the op. card to export - + name of the operators card to export op : dict - op card + operators card """ target = f"{name}.yaml" with open(target, "w", encoding="utf-8") as out: yaml.safe_dump(op, out) -def import_op_card(path): - """ - Import the operators card specified by path +def load(path): + """Import the operators card. Parameters ---------- - path : str - path to op. card in yaml format + path : str + path to operators card in yaml format Returns ------- - : dict - op card + dict + operators card """ with open(path, "r", encoding="utf-8") as o: op = yaml.safe_load(o) diff --git a/src/ekobox/gen_theory.py b/src/ekobox/theory_card.py similarity index 79% rename from src/ekobox/gen_theory.py rename to src/ekobox/theory_card.py index 69fba7cdd..76118a3c9 100644 --- a/src/ekobox/gen_theory.py +++ b/src/ekobox/theory_card.py @@ -5,33 +5,29 @@ from banana.data import sql, theories -def gen_theory_card(pto, initial_scale, update=None, name=None): +def generate(pto, initial_scale, update=None, name=None): """ Generates a theory card with some mandatory user choice and some default values which can be changed by the update input dict Parameters ---------- - pto : int perturbation theory order initial_scale: float - initial scale of evolution + initial scale of evolution [GeV] update : dict info to update to default theory card name : str - name of exported theory card (if name not None ) + name of exported theory card (if name is not None ) Returns ------- - - : dict - theory card + dict + theory card """ # Constructing the dictionary with some default values theory = copy.deepcopy(theories.default_card) - # delete unuseful member - del theory["FNS"] # Adding the mandatory inputs theory["PTO"] = pto theory["Q0"] = initial_scale @@ -42,13 +38,13 @@ def gen_theory_card(pto, initial_scale, update=None, name=None): raise ValueError("Provided key not in theory card") theory.update(update) serialized = sql.serialize(theory) - theory["hash"] = (sql.add_hash(serialized))[-1] + theory["hash"] = sql.add_hash(serialized)[-1] if name is not None: - export_theory_card(name, theory) + dump(name, theory) return theory -def export_theory_card(name, theory): +def dump(name, theory): """ Export the theory card in the current directory @@ -65,7 +61,7 @@ def export_theory_card(name, theory): yaml.safe_dump(theory, out) -def import_theory_card(path): +def load(path): """ Import the theory card specified by path @@ -76,7 +72,7 @@ def import_theory_card(path): Returns ------- - : dict + dict theory card """ with open(path, "r", encoding="utf-8") as o: diff --git a/src/ekobox/utils.py b/src/ekobox/utils.py index e228d7cc4..13622046c 100644 --- a/src/ekobox/utils.py +++ b/src/ekobox/utils.py @@ -3,10 +3,14 @@ import numpy as np +from eko.output import EKO, Operator + # TODO: add a control on the theory (but before we need to implement another # kind of output which includes the theory and operator runcards) -def ekos_product(eko_ini, eko_fin, in_place=True): +def ekos_product( + eko_ini: EKO, eko_fin: EKO, rtol: float = 1e-6, atol: float = 1e-10, in_place=True +) -> EKO: """Returns the product of two ekos Parameters @@ -15,6 +19,10 @@ def ekos_product(eko_ini, eko_fin, in_place=True): initial eko operator eko_fin : eko.output.Output final eko operator + rtol : float + relative tolerance on Q2, used to check compatibility + atol : float + absolute tolerance on Q2, used to check compatibility in_place : bool do operation in place, modifying input arrays @@ -24,19 +32,20 @@ def ekos_product(eko_ini, eko_fin, in_place=True): eko operator """ - if eko_fin["q2_ref"] not in eko_ini["Q2grid"].keys(): + q2match = eko_ini.approx(eko_fin.Q02, rtol=rtol, atol=atol) + if q2match is None: raise ValueError( "Initial Q2 of final eko operator does not match any final Q2 in" " the initial eko operator" ) - ope1 = eko_ini["Q2grid"][eko_fin["q2_ref"]]["operators"] - ope1_error = eko_ini["Q2grid"][eko_fin["q2_ref"]]["operator_errors"] + ope1 = eko_ini[q2match].operator.copy() + ope1_error = eko_ini[q2match].error.copy() ope2_dict = {} ope2_error_dict = {} - for q2, op in eko_fin["Q2grid"].items(): - ope2_dict[q2] = op["operators"] - ope2_error_dict[q2] = op["operator_errors"] + for q2, op in eko_fin.items(): + ope2_dict[q2] = op.operator + ope2_error_dict[q2] = op.error final_op_dict = {} final_op_error_dict = {} @@ -49,14 +58,25 @@ def ekos_product(eko_ini, eko_fin, in_place=True): ) + np.einsum("ajbk,bkcl -> ajcl", ope1_error, op2) final_dict[q2] = { - "operators": final_op_dict[q2], - "operator_errors": final_op_error_dict[q2], + "operator": final_op_dict[q2], + "error": final_op_error_dict[q2], } - final_eko = None if in_place is False: final_eko = copy.deepcopy(eko_ini) else: final_eko = eko_ini - final_eko["Q2grid"] = final_dict + + for q2, op2 in eko_fin.items(): + if q2 in eko_ini: + continue + + op = np.einsum("ajbk,bkcl -> ajcl", ope1, op2.operator) + + error = np.einsum("ajbk,bkcl -> ajcl", ope1, op2.error) + np.einsum( + "ajbk,bkcl -> ajcl", ope1_error, op2.operator + ) + + final_eko[q2] = Operator(operator=op, error=error) + return final_eko diff --git a/src/ekomark/apply.py b/src/ekomark/apply.py index fc216ba62..e738acdaf 100644 --- a/src/ekomark/apply.py +++ b/src/ekomark/apply.py @@ -5,13 +5,13 @@ from eko import interpolation -def apply_pdf(output, lhapdf_like, targetgrid=None, rotate_to_evolution_basis=False): +def apply_pdf(eko, lhapdf_like, targetgrid=None, rotate_to_evolution_basis=False): """ Apply all available operators to the input PDFs. Parameters ---------- - output : eko.output.Output + output : eko.output.EKO eko output object containing all informations lhapdf_like : object object that provides an xfxQ2 callable (as `lhapdf `_ @@ -28,18 +28,18 @@ def apply_pdf(output, lhapdf_like, targetgrid=None, rotate_to_evolution_basis=Fa """ if rotate_to_evolution_basis: return apply_pdf_flavor( - output, lhapdf_like, targetgrid, br.rotate_flavor_to_evolution + eko, lhapdf_like, targetgrid, br.rotate_flavor_to_evolution ) - return apply_pdf_flavor(output, lhapdf_like, targetgrid) + return apply_pdf_flavor(eko, lhapdf_like, targetgrid) -def apply_pdf_flavor(output, lhapdf_like, targetgrid=None, flavor_rotation=None): +def apply_pdf_flavor(eko, lhapdf_like, targetgrid=None, flavor_rotation=None): """ Apply all available operators to the input PDFs. Parameters ---------- - output : eko.output.Output + output : eko.output.EKO eko output object containing all informations lhapdf_like : object object that provides an xfxQ2 callable (as `lhapdf `_ @@ -55,25 +55,25 @@ def apply_pdf_flavor(output, lhapdf_like, targetgrid=None, flavor_rotation=None) output PDFs and their associated errors for the computed Q2grid """ # create pdfs - pdfs = np.zeros((len(output["inputpids"]), len(output["inputgrid"]))) - for j, pid in enumerate(output["inputpids"]): + pdfs = np.zeros((len(eko.rotations.inputpids), len(eko.rotations.inputgrid))) + for j, pid in enumerate(eko.rotations.inputpids): if not lhapdf_like.hasFlavor(pid): continue pdfs[j] = np.array( [ - lhapdf_like.xfxQ2(pid, x, output["q2_ref"]) / x - for x in output["inputgrid"] + lhapdf_like.xfxQ2(pid, x, eko.Q02) / x + for x in eko.rotations.inputgrid.raw ] ) # build output out_grid = {} - for q2, elem in output["Q2grid"].items(): - pdf_final = np.einsum("ajbk,bk", elem["operators"], pdfs) - error_final = np.einsum("ajbk,bk", elem["operator_errors"], pdfs) + for q2, elem in eko.items(): + pdf_final = np.einsum("ajbk,bk", elem.operator, pdfs) + error_final = np.einsum("ajbk,bk", elem.error, pdfs) out_grid[q2] = { - "pdfs": dict(zip(output["targetpids"], pdf_final)), - "errors": dict(zip(output["targetpids"], error_final)), + "pdfs": dict(zip(eko.rotations.targetpids, pdf_final)), + "errors": dict(zip(eko.rotations.targetpids, error_final)), } # rotate to evolution basis @@ -90,7 +90,7 @@ def apply_pdf_flavor(output, lhapdf_like, targetgrid=None, flavor_rotation=None) # rotate/interpolate to target grid if targetgrid is not None: - b = interpolation.InterpolatorDispatcher.from_dict(output, False) + b = eko.interpolator(False, True) rot = b.get_interpolation(targetgrid) for evpdf in out_grid.values(): for pdf_label in evpdf["pdfs"]: diff --git a/src/ekomark/benchmark/external/apfel_utils.py b/src/ekomark/benchmark/external/apfel_utils.py index dbc94a9de..3bb1016c8 100644 --- a/src/ekomark/benchmark/external/apfel_utils.py +++ b/src/ekomark/benchmark/external/apfel_utils.py @@ -36,7 +36,7 @@ def compute_apfel_data( output containing: target_xgrid, values """ - target_xgrid = operators["interpolation_xgrid"] + target_xgrid = operators["xgrid"] pdf_name = pdf.set().name # Load apfel diff --git a/src/ekomark/benchmark/external/lhapdf_utils.py b/src/ekomark/benchmark/external/lhapdf_utils.py index c3533cda6..c623070d2 100644 --- a/src/ekomark/benchmark/external/lhapdf_utils.py +++ b/src/ekomark/benchmark/external/lhapdf_utils.py @@ -28,7 +28,7 @@ def compute_LHAPDF_data(operators, pdf, skip_pdfs, rotate_to_evolution_basis=Fal output containing: target_xgrid, values """ - target_xgrid = operators["interpolation_xgrid"] + target_xgrid = operators["xgrid"] out_tabs = {} for q2 in operators["Q2grid"]: diff --git a/src/ekomark/benchmark/external/pegasus_utils.py b/src/ekomark/benchmark/external/pegasus_utils.py index 07f145848..87fb2f807 100644 --- a/src/ekomark/benchmark/external/pegasus_utils.py +++ b/src/ekomark/benchmark/external/pegasus_utils.py @@ -29,7 +29,7 @@ def compute_pegasus_data(theory, operators, skip_pdfs, rotate_to_evolution_basis """ import pegasus # pylint:disable=import-error,import-outside-toplevel - target_xgrid = operators["interpolation_xgrid"] + target_xgrid = operators["xgrid"] # init pegasus nf = theory["NfFF"] diff --git a/src/ekomark/data/db.py b/src/ekomark/data/db.py index 67328c522..98d754751 100644 --- a/src/ekomark/data/db.py +++ b/src/ekomark/data/db.py @@ -16,7 +16,7 @@ class Operator(Base): # pylint: disable=too-few-public-methods interpolation_is_log = Column(Text) interpolation_polynomial_degree = Column(Integer) - interpolation_xgrid = Column(Text) + xgrid = Column(Text) debug_skip_non_singlet = Column(Boolean) debug_skip_singlet = Column(Boolean) ev_op_max_order = Column(Integer) diff --git a/src/ekomark/data/operators.py b/src/ekomark/data/operators.py index 1a532e5e5..faab3960c 100644 --- a/src/ekomark/data/operators.py +++ b/src/ekomark/data/operators.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -""" -Operator card configurations. -""" +"""Operator card configurations.""" from banana.data import cartesian_product, sql from eko import interpolation @@ -9,19 +7,26 @@ from . import db default_card = dict( - interpolation_xgrid=interpolation.make_grid(30, 20).tolist(), - interpolation_polynomial_degree=4, - interpolation_is_log=True, - debug_skip_non_singlet=False, - debug_skip_singlet=False, - ev_op_max_order=10, - ev_op_iterations=10, - backward_inversion="expanded", - n_integration_cores=0, - Q2grid=[100], + sorted( + dict( + interpolation_xgrid=interpolation.make_grid(30, 20).tolist(), + interpolation_polynomial_degree=4, + interpolation_is_log=True, + ev_op_max_order=10, + ev_op_iterations=10, + backward_inversion="expanded", + n_integration_cores=0, + debug_skip_non_singlet=False, + debug_skip_singlet=False, + Q2grid=[100], + inputgrid=None, + targetgrid=None, + inputpids=None, + targetpids=None, + ).items() + ) ) -default_card = dict(sorted(default_card.items())) lhapdf_config = { # "ev_op_max_order": [10], @@ -50,7 +55,7 @@ def build(update=None): """ - Generate all operator card updates + Generate all operator card updates. Parameters ---------- diff --git a/src/ekomark/navigator/navigator.py b/src/ekomark/navigator/navigator.py index 286e43f86..fca3b746a 100644 --- a/src/ekomark/navigator/navigator.py +++ b/src/ekomark/navigator/navigator.py @@ -71,7 +71,7 @@ def fill_operators(self, op, obj): obj : dict to be updated pandas record """ - xgrid = op["interpolation_xgrid"] + xgrid = op["xgrid"] obj["xgrid"] = ( f"{len(xgrid)}pts: " + f"{'log' if op['interpolation_is_log'] else 'x'}" diff --git a/src/ekomark/plots.py b/src/ekomark/plots.py index d7612d014..92f821af9 100644 --- a/src/ekomark/plots.py +++ b/src/ekomark/plots.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -""" -Plotting tools to show the evolved PDF and the computed operators -""" +"""Plotting tools to show the evolved PDF and the computed operators.""" import io import pprint @@ -15,22 +13,22 @@ def input_figure(theory, ops, pdf_name=None): - """ - Pretty-prints the setup to a figure + """Pretty-prints the setup to a figure. Parameters ---------- - theory : dict - theory card - ops : dict - operator card - pdf_name : str - PDF name + theory : dict + theory card + ops : dict + operator card + pdf_name : str + PDF name Returns ------- - firstPage : matplotlib.pyplot.Figure - figure + firstPage : matplotlib.pyplot.Figure + figure + """ firstPage = plt.figure(figsize=(25, 20)) # theory @@ -56,8 +54,7 @@ def input_figure(theory, ops, pdf_name=None): def plot_dist(x, y, yerr, yref, title=None, oMx_min=1e-2, oMx_max=0.5): - """ - Compare two distributions. + """Compare two distributions. Generates a plot with 3 areas: @@ -68,28 +65,29 @@ def plot_dist(x, y, yerr, yref, title=None, oMx_min=1e-2, oMx_max=0.5): Parameters ---------- - x : numpy.ndarray - list of abscisses - y : numpy.ndarray - computed list of ordinates - yerr : numpy.ndarray - list of ordinate errors - yref : numpy.ndarray - reference list of ordinates + x : numpy.ndarray + list of abscisses + y : numpy.ndarray + computed list of ordinates + yerr : numpy.ndarray + list of ordinate errors + yref : numpy.ndarray + reference list of ordinates Other Parameters ---------------- - title : str, optional - additional overall title - oMx_min : float - maximum value for the large x region, i.e. 1-x > 1 - `oMx_min` - oMx_max : float - minimum value for the large x region, i.e. 1 - `oMx_max` > 1-x + title : str, optional + additional overall title + oMx_min : float + maximum value for the large x region, i.e. 1-x > 1 - `oMx_min` + oMx_max : float + minimum value for the large x region, i.e. 1 - `oMx_max` > 1-x Returns ------- - fig : matplotlib.pyplot.figure - created figure + fig : matplotlib.pyplot.figure + created figure + """ np.seterr(divide="ignore", invalid="ignore") fig = plt.figure(figsize=(15, 5)) @@ -164,7 +162,7 @@ def plot_operator(var_name, op, op_err, log_operator=True, abs_operator=True): """ # get # op = ret["operators"][var_name] - # op_err = ret["operator_errors"][var_name] + # op_err = ret["errors"][var_name] # empty? thre = 1e-8 @@ -244,7 +242,7 @@ def save_operators_to_pdf(path, theory, ops, me, skip_pdfs, change_lab=False): # it's necessary to reshuffle the eko output for q2 in me["Q2grid"]: results = me["Q2grid"][q2]["operators"] - errors = me["Q2grid"][q2]["operator_errors"] + errors = me["Q2grid"][q2]["errors"] # loop on pids for label_out, res, res_err in zip(ops_names, results, errors): @@ -253,7 +251,7 @@ def save_operators_to_pdf(path, theory, ops, me, skip_pdfs, change_lab=False): new_op = {} new_op_err = {} # loop on xgrid point - for j in range(len(me["interpolation_xgrid"])): + for j in range(len(me["xgrid"])): # loop on pid in for label_in, val, val_err in zip(ops_names, res[j], res_err[j]): if label_in in skip_pdfs: diff --git a/tests/conftest.py b/tests/conftest.py index 62d23882d..2e72bad70 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,31 @@ # -*- coding: utf-8 -*- +import os +import pathlib +import shutil +import sys +from contextlib import contextmanager + import numpy as np import pytest +from eko import output +from eko.output import legacy + + +@pytest.fixture +def cd(): + # thanks https://stackoverflow.com/a/24176022/8653979 + @contextmanager + def wrapped(newdir): + prevdir = os.getcwd() + os.chdir(os.path.expanduser(newdir)) + try: + yield + finally: + os.chdir(prevdir) + + return wrapped + class FakePDF: def hasFlavor(self, pid): @@ -27,33 +51,68 @@ def mk_g(self, q2s, lpids, lx): Q2grid = {} for q2 in q2s: Q2grid[q2] = { - "operators": np.random.rand(lpids, lx, lpids, lx), - "operator_errors": np.random.rand(lpids, lx, lpids, lx), - "alphas": np.random.rand(), + "operator": np.random.rand(lpids, lx, lpids, lx), + "error": np.random.rand(lpids, lx, lpids, lx), } return Q2grid - def fake_output(self): - # build data - interpolation_xgrid = np.array([0.5, 1.0]) + def mk_dump(self) -> dict: + xgrid = np.array([0.5, 1.0]) interpolation_polynomial_degree = 1 interpolation_is_log = False pids = [0, 1] q2_ref = 1 q2_out = 2 - Q2grid = self.mk_g([q2_out], len(pids), len(interpolation_xgrid)) - d = dict( - interpolation_xgrid=interpolation_xgrid, - targetgrid=interpolation_xgrid, - inputgrid=interpolation_xgrid, - interpolation_polynomial_degree=interpolation_polynomial_degree, - interpolation_is_log=interpolation_is_log, - q2_ref=q2_ref, - inputpids=pids, - targetpids=pids, + Q2grid = self.mk_g([q2_out], len(pids), len(xgrid)) + return dict( + rotations=dict( + xgrid=xgrid, + pids=pids, + targetgrid=xgrid, + inputgrid=xgrid, + inputpids=pids, + targetpids=pids, + ), + Q0=np.sqrt(q2_ref), + couplings=dict(), + configs=dict( + ev_op_max_order=1, + ev_op_iterations=1, + interpolation_polynomial_degree=interpolation_polynomial_degree, + interpolation_is_log=interpolation_is_log, + backward_inversion="exact", + ), Q2grid=Q2grid, ) - return d + + def fake_output(self): + d = self.mk_dump() + # build data + obj = output.EKO.new(theory={}, operator=d) + for q2, op in d["Q2grid"].items(): + obj[q2] = output.struct.Operator.from_dict(op) + return obj, d + + def fake_legacy(self): + d = self.mk_dump() + bases = d["rotations"].copy() + + # build data + obj = output.EKO.new(theory={}, operator=d) + for q2, op in d["Q2grid"].items(): + obj[q2] = output.struct.Operator.from_dict(op) + + d["inputgrid"] = bases["inputgrid"] + d["targetgrid"] = bases["targetgrid"] + d["inputpids"] = bases["inputpids"] + d["targetpids"] = bases["targetpids"] + + d["interpolation_xgrid"] = bases["xgrid"] + d["pids"] = bases["pids"] + + del d["rotations"] + + return obj, d @pytest.fixture @@ -64,3 +123,47 @@ def fake_factory(): @pytest.fixture def fake_output(): return FakeOutput().fake_output() + + +@pytest.fixture +def fake_legacy(): + return FakeOutput().fake_legacy() + + +@pytest.fixture +def fake_lhapdf(tmp_path): + def lhapdf_paths(): + return [tmp_path] + + # Thanks https://stackoverflow.com/questions/43162722/mocking-a-module-import-in-pytest + module = type(sys)("lhapdf") + module.paths = lhapdf_paths + sys.modules["lhapdf"] = module + + yield tmp_path + + +fakepdf = pathlib.Path(__file__).parents[1] / "benchmarks" / "ekobox" / "fakepdf" + + +def copy_lhapdf(fake_lhapdf, n): + src = fakepdf / n + dst = fake_lhapdf / n + shutil.copytree(src, dst) + yield n + shutil.rmtree(dst) + + +@pytest.fixture +def fake_ct14(fake_lhapdf): + yield from copy_lhapdf(fake_lhapdf, "myCT14llo_NF3") + + +@pytest.fixture +def fake_nn31(fake_lhapdf): + yield from copy_lhapdf(fake_lhapdf, "myNNPDF31_nlo_as_0118") + + +@pytest.fixture +def fake_mstw(fake_lhapdf): + yield from copy_lhapdf(fake_lhapdf, "myMSTW2008nlo90cl") diff --git a/tests/eko/test_compatibility.py b/tests/eko/test_compatibility.py index 5e09d6673..1a9de4d0b 100644 --- a/tests/eko/test_compatibility.py +++ b/tests/eko/test_compatibility.py @@ -1,22 +1,39 @@ # -*- coding: utf-8 -*- -import pytest - from eko import compatibility -theory1 = { - "alphas": 0.1180, - "alphaqed": 0.007496, - "PTO": 2, - "QED": 0, -} +theory1 = {"alphas": 0.1180, "alphaqed": 0.007496, "PTO": 2, "QED": 0, "Q0": 1.0} def test_compatibility(): new_theory = compatibility.update_theory(theory1) + assert new_theory["order"][0] == theory1["PTO"] + 1 + -operator_dict = {"ev_op_max_order": 2} +def operator_dict(xgrid, pids): + return dict( + ev_op_max_order=2, + ev_op_iterations=1, + interpolation_xgrid=xgrid, + interpolation_polynomial_degree=4, + interpolation_is_log=True, + backward_inversion=None, + n_integration_cores=1, + debug_skip_singlet=False, + debug_skip_non_singlet=False, + inputgrid=xgrid, + targetgrid=xgrid, + pids=pids, + inputpids=pids, + targetpids=pids, + ) def test_compatibility_operators(): - new_theory, new_operator = compatibility.update(theory1, operator_dict) + xgrid = [1e-3, 1e-2, 1e-1, 1.0] + pids = [21, -1, 1] + + _, new_operator = compatibility.update(theory1, operator_dict(xgrid, pids)) + + assert new_operator is not None + assert not isinstance(new_operator["configs"]["ev_op_max_order"], int) diff --git a/tests/eko/test_ev_op_grid.py b/tests/eko/test_ev_op_grid.py index 0b6fdbbc0..d2ae1519f 100644 --- a/tests/eko/test_ev_op_grid.py +++ b/tests/eko/test_ev_op_grid.py @@ -41,15 +41,19 @@ def _get_setup(self, use_FFNS): } operators_card = { "Q2grid": [1, 100**2], - "interpolation_xgrid": [0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": True, - "debug_skip_non_singlet": False, - "ev_op_max_order": (2, 0), - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": [0.1, 1.0], + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": (2, 0), + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": { + "skip_singlet": True, + "skip_non_singlet": False, + }, } if use_FFNS: theory_card["FNS"] = "FFNS" @@ -69,8 +73,12 @@ def _get_operator_grid(self, use_FFNS=True, theory_update=None): if theory_update is not None: theory_card.update(theory_update) # create objects - basis_function_dispatcher = interpolation.InterpolatorDispatcher.from_dict( - operators_card + basis_function_dispatcher = interpolation.InterpolatorDispatcher( + interpolation.XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], ) threshold_holder = ThresholdsAtlas.from_dict(theory_card) a_s = Couplings.from_dict(theory_card) @@ -87,8 +95,12 @@ def test_sanity(self): # errors with pytest.raises(ValueError): theory_card, operators_card = self._get_setup(True) - basis_function_dispatcher = interpolation.InterpolatorDispatcher.from_dict( - operators_card + basis_function_dispatcher = interpolation.InterpolatorDispatcher( + interpolation.XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], ) threshold_holder = ThresholdsAtlas.from_dict(theory_card) a_s = Couplings.from_dict(theory_card) @@ -108,14 +120,10 @@ def test_compute_q2grid(self): # we can also pass a single number opg = opgrid.compute() assert len(opg) == 2 - assert all( - k in op for k in ["operators", "operator_errors"] for op in opg.values() - ) + assert all(k in op for k in ["operator", "error"] for op in opg.values()) opg = opgrid.compute(3) assert len(opg) == 1 - assert all( - k in op for k in ["operators", "operator_errors"] for op in opg.values() - ) + assert all(k in op for k in ["operator", "error"] for op in opg.values()) def test_grid_computation_VFNS(self): """Checks that the grid can be computed""" @@ -140,5 +148,5 @@ def test_mod_expanded(self): ) sv_opg = sv_opgrid.compute(3) np.testing.assert_allclose( - opg[3]["operators"], sv_opg[3]["operators"], atol=0.7 * epsilon + opg[3]["operator"], sv_opg[3]["operator"], atol=0.7 * epsilon ) diff --git a/tests/eko/test_ev_operator.py b/tests/eko/test_ev_operator.py index 795712c6c..1e10b4058 100644 --- a/tests/eko/test_ev_operator.py +++ b/tests/eko/test_ev_operator.py @@ -11,7 +11,7 @@ from eko.couplings import Couplings from eko.evolution_operator import Operator, quad_ker from eko.evolution_operator.grid import OperatorGrid -from eko.interpolation import InterpolatorDispatcher +from eko.interpolation import InterpolatorDispatcher, XGrid from eko.kernels import non_singlet as ns from eko.kernels import singlet as s from eko.thresholds import ThresholdsAtlas @@ -157,15 +157,16 @@ def test_quad_ker(monkeypatch): } operators_card = { "Q2grid": [1, 10], - "interpolation_xgrid": [0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": False, - "debug_skip_non_singlet": False, - "ev_op_max_order": 1, - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": [0.1, 1.0], + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": [1, 1], + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": {"skip_singlet": False, "skip_non_singlet": False}, } @@ -227,7 +228,13 @@ def test_exponentiated(self): ocard, ThresholdsAtlas.from_dict(tcard), Couplings.from_dict(tcard), - InterpolatorDispatcher.from_dict(ocard), + InterpolatorDispatcher( + XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], + ), ) # setup objs o = Operator(g.config, g.managers, 3, 2.0, 10.0) @@ -244,7 +251,13 @@ def test_compute_parallel(self, monkeypatch): ocard, ThresholdsAtlas.from_dict(tcard), Couplings.from_dict(tcard), - InterpolatorDispatcher.from_dict(ocard), + InterpolatorDispatcher( + XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], + ), ) # setup objs o = Operator(g.config, g.managers, 3, 2.0, 10.0) @@ -275,7 +288,13 @@ def test_compute(self, monkeypatch): ocard, ThresholdsAtlas.from_dict(tcard), Couplings.from_dict(tcard), - InterpolatorDispatcher.from_dict(ocard), + InterpolatorDispatcher( + XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], + ), ) # setup objs o = Operator(g.config, g.managers, 3, 2.0, 10.0) @@ -337,7 +356,7 @@ def quad_ker_pegasus( mode0 = br.non_singlet_pids_map["ns+"] mode1 = 0 method = "" - logxs = np.log(int_disp.xgrid_raw) + logxs = np.log(int_disp.xgrid.raw) a1 = 1 a0 = 2 nf = 3 diff --git a/tests/eko/test_interpolation.py b/tests/eko/test_interpolation.py index c4e06b7b6..014ab2329 100644 --- a/tests/eko/test_interpolation.py +++ b/tests/eko/test_interpolation.py @@ -24,7 +24,7 @@ def check_is_interpolator(interpolator): assert_almost_equal(one, 1.0) # polynoms need to be "orthogonal" at grid points - for j, (basis_j, xj) in enumerate(zip(interpolator, interpolator.xgrid_raw)): + for j, (basis_j, xj) in enumerate(zip(interpolator, interpolator.xgrid.raw)): one = basis_j(xj) assert_almost_equal( one, @@ -100,56 +100,29 @@ def test_init(self): with pytest.raises(ValueError): interpolation.InterpolatorDispatcher([], 1) - def test_from_dict(self): - d = { - "interpolation_xgrid": ["make_grid", 3, 3], - "interpolation_is_log": False, - "interpolation_polynomial_degree": 1, - } - a = interpolation.InterpolatorDispatcher.from_dict(d) - np.testing.assert_array_almost_equal( - a.xgrid, np.array([1e-7, 1e-4, 1e-1, 0.55, 1.0]) - ) - assert a.polynomial_degree == 1 - dd = { - "interpolation_xgrid": ["make_lambert_grid", 20], - "interpolation_is_log": False, - "interpolation_polynomial_degree": 1, - } - aa = interpolation.InterpolatorDispatcher.from_dict(dd) - assert len(aa.xgrid) == 20 - with pytest.raises(ValueError): - d = { - "interpolation_xgrid": [], - "interpolation_is_log": False, - "interpolation_polynomial_degree": 1, - } - interpolation.InterpolatorDispatcher.from_dict(d) - def test_eq(self): - a = interpolation.InterpolatorDispatcher( - np.linspace(0.1, 1, 10), 4, log=False, mode_N=False - ) - b = interpolation.InterpolatorDispatcher( - np.linspace(0.1, 1, 9), 4, log=False, mode_N=False - ) + # define grids + x9 = interpolation.XGrid(np.linspace(0.1, 1, 9), log=False) + x10 = interpolation.XGrid(np.linspace(0.1, 1, 10), log=False) + # test various options + a = interpolation.InterpolatorDispatcher(x10, 4, mode_N=False) + b = interpolation.InterpolatorDispatcher(x9, 4, mode_N=False) assert a != b - c = interpolation.InterpolatorDispatcher( - np.linspace(0.1, 1, 10), 3, log=False, mode_N=False - ) + c = interpolation.InterpolatorDispatcher(x10, 3, mode_N=False) assert a != c d = interpolation.InterpolatorDispatcher( - np.linspace(0.1, 1, 10), 4, log=True, mode_N=False + np.linspace(0.1, 1, 10), 4, mode_N=False ) assert a != d - e = interpolation.InterpolatorDispatcher( - np.linspace(0.1, 1, 10), 4, log=False, mode_N=False - ) + e = interpolation.InterpolatorDispatcher(x10, 4, mode_N=False) assert a == e # via dict dd = a.to_dict() assert isinstance(dd, dict) - assert a == interpolation.InterpolatorDispatcher.from_dict(dd) + assert a == interpolation.InterpolatorDispatcher( + interpolation.XGrid.load(dd["xgrid"]), + polynomial_degree=dd["polynomial_degree"], + ) def test_iter(self): xgrid = np.linspace(0.1, 1, 10) @@ -159,23 +132,23 @@ def test_iter(self): assert bf == inter_x[k] def test_get_interpolation(self): - xg = [0.5, 1.0] - inter_x = interpolation.InterpolatorDispatcher(xg, 1, False, False) - i = inter_x.get_interpolation(xg) + xg = interpolation.XGrid([0.5, 1.0], log=False) + inter_x = interpolation.InterpolatorDispatcher(xg, 1, False) + i = inter_x.get_interpolation(xg.raw) np.testing.assert_array_almost_equal(i, np.eye(len(xg))) # .75 is exactly inbetween i = inter_x.get_interpolation([0.75]) np.testing.assert_array_almost_equal(i, [[0.5, 0.5]]) def test_evaluate_x(self): - xgrid = np.linspace(0.1, 1, 10) poly_degree = 4 for log in [True, False]: + xgrid = interpolation.XGrid(np.linspace(0.1, 1, 10), log=log) inter_x = interpolation.InterpolatorDispatcher( - xgrid, poly_degree, log=log, mode_N=False + xgrid, poly_degree, mode_N=False ) inter_N = interpolation.InterpolatorDispatcher( - xgrid, poly_degree, log=log, mode_N=True + xgrid, poly_degree, mode_N=True ) for x in [0.2, 0.5]: for bx, bN in zip(inter_x, inter_N): @@ -183,16 +156,14 @@ def test_evaluate_x(self): def test_math(self): """Test math properties of interpolator""" - xgrid = np.linspace(0.09, 1, 10) poly_deg = 4 for log in [True, False]: + xgrid = interpolation.XGrid(np.linspace(0.09, 1, 10), log=log) inter_x = interpolation.InterpolatorDispatcher( - xgrid, poly_deg, log=log, mode_N=False + xgrid, poly_deg, mode_N=False ) check_is_interpolator(inter_x) - inter_N = interpolation.InterpolatorDispatcher( - xgrid, poly_deg, log=log, mode_N=True - ) + inter_N = interpolation.InterpolatorDispatcher(xgrid, poly_deg, mode_N=True) check_correspondence_interpolators(inter_x, inter_N) @@ -203,22 +174,28 @@ def test_init(self): interpolation.BasisFunction([0.1, 1.0], 0, []) def test_eval_N(self): - xg = [0.0, 1.0] - inter_N = interpolation.InterpolatorDispatcher(xg, 1, log=False) + xg = interpolation.XGrid([0.0, 1.0], log=False) + inter_N = interpolation.InterpolatorDispatcher(xg, 1) # p_0(x) = 1-x -> \tilde p_0(N) = 1/N - 1/(N+1) p0N = inter_N[0] assert len(p0N.areas) == 1 p0_cs_ref = [1, -1] for act_c, res_c in zip(p0N.areas[0], p0_cs_ref): assert_almost_equal(act_c, res_c) - p0Nref = lambda N, lnx: (1 / N - 1 / (N + 1)) * np.exp(-N * lnx) + + def p0Nref(N, lnx): + return (1 / N - 1 / (N + 1)) * np.exp(-N * lnx) + # p_1(x) = x -> \tilde p_1(N) = 1/(N+1) p1N = inter_N[1] assert len(p1N.areas) == 1 p1_cs_ref = [0, 1] for act_c, res_c in zip(p1N.areas[0], p1_cs_ref): assert_almost_equal(act_c, res_c) - p1Nref = lambda N, lnx: (1 / (N + 1)) * np.exp(-N * lnx) + + def p1Nref(N, lnx): + return (1 / (N + 1)) * np.exp(-N * lnx) + # iterate configurations for N in [1.0, 2.0, complex(1.0, 1.0)]: # check skip @@ -231,29 +208,41 @@ def test_eval_N(self): def test_log_eval_N(self): xg = [np.exp(-1), 1.0] - inter_N = interpolation.InterpolatorDispatcher(xg, 1, log=True) + inter_N = interpolation.InterpolatorDispatcher(xg, 1) # p_0(x) = -ln(x) p0N = inter_N[0] assert len(p0N.areas) == 1 p0_cs_ref = [0, -1] for act_c, res_c in zip(p0N.areas[0], p0_cs_ref): assert_almost_equal(act_c, res_c) - # Full -> \tilde p_0(N) = exp(-N)(exp(N)-1-N)/N^2 - # MMa: Integrate[x^(n-1) (-Log[x]),{x,1/E,1}] - p0Nref_full = lambda N, lnx: ((np.exp(N) - 1 - N) / N**2) * np.exp( - -N * (lnx + 1) - ) - # partial = lower bound is neglected; - p0Nref_partial = lambda N, lnx: (1 / N**2) * np.exp(-N * lnx) + + def p0Nref_full(N, lnx): + r""" + Full -> \tilde p_0(N) = exp(-N)(exp(N)-1-N)/N^2 + MMa: Integrate[x^(n-1) (-Log[x]),{x,1/E,1}] + """ + return ((np.exp(N) - 1 - N) / N**2) * np.exp(-N * (lnx + 1)) + + def p0Nref_partial(N, lnx): + "partial = lower bound is neglected" + return (1 / N**2) * np.exp(-N * lnx) + p1N = inter_N[1] assert len(p1N.areas) == 1 p1_cs_ref = [1, 1] for act_c, res_c in zip(p1N.areas[0], p1_cs_ref): assert_almost_equal(act_c, res_c) - # p_1(x) = 1+\ln(x) -> \tilde p_1(N) = (exp(-N)-1+N)/N^2 - # MMa: Integrate[x^(n-1) (1+Log[x]),{x,1/E,1}] - p1Nref_full = lambda N, lnx: ((np.exp(-N) - 1 + N) / N**2) * np.exp(-N * lnx) - p1Nref_partial = lambda N, lnx: (1 / N - 1 / N**2) * np.exp(-N * lnx) + + def p1Nref_full(N, lnx): + r""" + p_1(x) = 1+\ln(x) -> \tilde p_1(N) = (exp(-N)-1+N)/N^2 + MMa: Integrate[x^(n-1) (1+Log[x]),{x,1/E,1}] + """ + return ((np.exp(-N) - 1 + N) / N**2) * np.exp(-N * lnx) + + def p1Nref_partial(N, lnx): + return (1 / N - 1 / N**2) * np.exp(-N * lnx) + # iterate configurations for N in [1.0, 2.0, complex(1.0, 1.0)]: # check skip @@ -291,9 +280,8 @@ def test_is_below_x(self): "res": [True, True, False, False, False], }, ]: - inter_x = interpolation.InterpolatorDispatcher( - cfg["xg"], cfg["pd"], log=log - ) + xg = interpolation.XGrid(cfg["xg"], log=log) + inter_x = interpolation.InterpolatorDispatcher(xg, cfg["pd"]) actual = [bf.is_below_x(cfg["x"]) for bf in inter_x] assert actual == cfg["res"] @@ -335,6 +323,27 @@ def test_reference_indices(self): a = interpolation.Area(0, 3, (0, 2), xgrid) +class TestXGrid: + def test_fromcard(self): + aa = [0.1, 1.0] + a = interpolation.XGrid.fromcard(aa, False) + np.testing.assert_array_almost_equal(a.raw, aa) + assert a.size == len(aa) + + bargs = (3, 3) + bb = interpolation.make_grid(*bargs) + b = interpolation.XGrid.fromcard(["make_grid", *bargs], False) + np.testing.assert_array_almost_equal(b.raw, bb) + + cargs = (10,) + cc = interpolation.make_lambert_grid(*cargs) + c = interpolation.XGrid.fromcard(["make_lambert_grid", *cargs], False) + np.testing.assert_array_almost_equal(c.raw, cc) + + with pytest.raises(ValueError): + interpolation.XGrid.fromcard([], False) + + def test_make_grid(): xg = interpolation.make_grid(3, 3) np.testing.assert_array_almost_equal(xg, np.array([1e-7, 1e-4, 1e-1, 0.55, 1.0])) diff --git a/tests/eko/test_ome.py b/tests/eko/test_ome.py index 7f3a31b65..80add61d7 100644 --- a/tests/eko/test_ome.py +++ b/tests/eko/test_ome.py @@ -9,7 +9,7 @@ from eko.couplings import Couplings from eko.evolution_operator.grid import OperatorGrid from eko.harmonics import compute_cache -from eko.interpolation import InterpolatorDispatcher +from eko.interpolation import InterpolatorDispatcher, XGrid from eko.matching_conditions.operator_matrix_element import ( A_non_singlet, A_singlet, @@ -314,15 +314,16 @@ class TestOperatorMatrixElement: } operators_card = { "Q2grid": [20], - "interpolation_xgrid": [0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": True, - "debug_skip_non_singlet": False, - "ev_op_max_order": 1, - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": [0.1, 1.0], + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": 1, + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": {"skip_singlet": True, "skip_non_singlet": False}, } def test_labels(self): @@ -330,22 +331,32 @@ def test_labels(self): for skip_ns in [True, False]: operators_card = { "Q2grid": [1, 10], - "interpolation_xgrid": [0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": skip_singlet, - "debug_skip_non_singlet": skip_ns, - "ev_op_max_order": (2, 0), - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": [0.1, 1.0], + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": (2, 0), + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": { + "skip_singlet": skip_singlet, + "skip_non_singlet": skip_ns, + }, } g = OperatorGrid.from_dict( self.theory_card, operators_card, ThresholdsAtlas.from_dict(self.theory_card), Couplings.from_dict(self.theory_card), - InterpolatorDispatcher.from_dict(operators_card), + InterpolatorDispatcher( + XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], + ), ) o = OperatorMatrixElement( g.config, @@ -389,7 +400,13 @@ def test_compute_n3lo(self): self.operators_card, ThresholdsAtlas.from_dict(self.theory_card), Couplings.from_dict(self.theory_card), - InterpolatorDispatcher.from_dict(self.operators_card), + InterpolatorDispatcher( + XGrid( + self.operators_card["xgrid"], + log=self.operators_card["configs"]["interpolation_is_log"], + ), + self.operators_card["configs"]["interpolation_polynomial_degree"], + ), ) o = OperatorMatrixElement( g.config, @@ -416,13 +433,21 @@ def test_compute_n3lo(self): def test_compute_lo(self): self.theory_card.update({"order": (1, 0)}) - self.operators_card.update({"debug_skip_singlet": False}) + self.operators_card.update( + dict(debug={"skip_singlet": False, "skip_non_singlet": False}) + ) g = OperatorGrid.from_dict( self.theory_card, self.operators_card, ThresholdsAtlas.from_dict(self.theory_card), Couplings.from_dict(self.theory_card), - InterpolatorDispatcher.from_dict(self.operators_card), + InterpolatorDispatcher( + XGrid( + self.operators_card["xgrid"], + log=self.operators_card["configs"]["interpolation_is_log"], + ), + self.operators_card["configs"]["interpolation_polynomial_degree"], + ), ) o = OperatorMatrixElement( g.config, @@ -463,15 +488,19 @@ def test_compute_lo(self): def test_compute_nlo(self): operators_card = { "Q2grid": [20], - "interpolation_xgrid": [0.001, 0.01, 0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": False, - "debug_skip_non_singlet": False, - "ev_op_max_order": (1, 0), - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "xgrid": [0.001, 0.01, 0.1, 1.0], + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": (2, 0), + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "debug": { + "skip_singlet": False, + "skip_non_singlet": False, + }, } t = copy.deepcopy(self.theory_card) t["order"] = (2, 0) @@ -480,7 +509,13 @@ def test_compute_nlo(self): operators_card, ThresholdsAtlas.from_dict(t), Couplings.from_dict(t), - InterpolatorDispatcher.from_dict(operators_card), + InterpolatorDispatcher( + XGrid( + operators_card["xgrid"], + log=operators_card["configs"]["interpolation_is_log"], + ), + operators_card["configs"]["interpolation_polynomial_degree"], + ), ) o = OperatorMatrixElement( g.config, @@ -493,7 +528,7 @@ def test_compute_nlo(self): ) o.compute() - dim = len(operators_card["interpolation_xgrid"]) + dim = len(operators_card["xgrid"]) shape = (dim, dim) for indices in [(100, br.matching_hplus_pid), (200, br.matching_hminus_pid)]: assert o.op_members[(indices[0], indices[0])].value.shape == shape diff --git a/tests/eko/test_output.py b/tests/eko/test_output.py index 6e7fc2617..12ce82fa8 100644 --- a/tests/eko/test_output.py +++ b/tests/eko/test_output.py @@ -4,13 +4,13 @@ import pathlib import shutil import tempfile -from unittest import mock import numpy as np import pytest from eko import basis_rotation as br -from eko import output +from eko import interpolation, output +from eko.output import legacy, manipulate def eko_identity(shape): @@ -23,264 +23,243 @@ def eko_identity(shape): def chk_keys(a, b): """Check all keys are preserved""" assert sorted(a.keys()) == sorted(b.keys()) - for q2, op in a["Q2grid"].items(): - assert q2 in b["Q2grid"] - opb = b["Q2grid"][q2] - assert sorted(op.keys()) == sorted(opb.keys()) - assert op["alphas"] == opb["alphas"] + for key, value in a.items(): + if isinstance(value, dict): + assert sorted(value.keys()) == sorted(b[key].keys()) -class TestOutput: - def test_io(self, fake_output): +class TestLegacy: + def test_io(self, fake_legacy, tmp_path): # create object - o1 = output.Output(fake_output) + o1, fake_card = fake_legacy + for q2, op in fake_card["Q2grid"].items(): + o1[q2] = output.Operator.from_dict(op) + # test streams stream = io.StringIO() - o1.dump_yaml(stream) + legacy.dump_yaml(o1, stream) # rewind and read again stream.seek(0) - o2 = output.Output.load_yaml(stream) - np.testing.assert_almost_equal( - o1["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) - np.testing.assert_almost_equal( - o2["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) + o2 = legacy.load_yaml(stream) + np.testing.assert_almost_equal(o1.xgrid.raw, fake_card["interpolation_xgrid"]) + np.testing.assert_almost_equal(o2.xgrid.raw, fake_card["interpolation_xgrid"]) # fake output files - m_out = mock.mock_open(read_data="") - with mock.patch("builtins.open", m_out) as mock_file: - fn = "test.yaml" - o1.dump_yaml_to_file(fn) - mock_file.assert_called_with(fn, "w", encoding="utf-8") + fpyaml = tmp_path / "test.yaml" + legacy.dump_yaml_to_file(o1, fpyaml) # fake input file - stream.seek(0) - m_in = mock.mock_open(read_data=stream.getvalue()) - with mock.patch("builtins.open", m_in) as mock_file: - fn = "test.yaml" - o3 = output.Output.load_yaml_from_file(fn) - mock_file.assert_called_with(fn, encoding="utf-8") - np.testing.assert_almost_equal( - o3["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) + o3 = legacy.load_yaml_from_file(fpyaml) + np.testing.assert_almost_equal(o3.xgrid.raw, fake_card["interpolation_xgrid"]) # repeat for tar - fn = "test.tar" - with tempfile.TemporaryDirectory() as folder: - fp = pathlib.Path(folder) / fn - o1.dump_tar(fp) - o4 = output.Output.load_tar(fp) - np.testing.assert_almost_equal( - o4["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) + fptar = tmp_path / "test.tar" + legacy.dump_tar(o1, fptar) + o4 = legacy.load_tar(fptar) + np.testing.assert_almost_equal(o4.xgrid.raw, fake_card["interpolation_xgrid"]) fn = "test" with pytest.raises(ValueError, match="wrong suffix"): - o1.dump_tar(fn) + legacy.dump_tar(o1, fn) - def test_rename_issue81(self, fake_output): + def test_rename_issue81(self, fake_legacy): # https://github.com/N3PDF/eko/issues/81 # create object - o1 = output.Output(fake_output) + o1, fake_card = fake_legacy + for q2, op in fake_card["Q2grid"].items(): + o1[q2] = output.Operator.from_dict(op) with tempfile.TemporaryDirectory() as folder: # dump p = pathlib.Path(folder) fp1 = p / "test1.tar" fp2 = p / "test2.tar" - o1.dump_tar(fp1) + legacy.dump_tar(o1, fp1) # rename shutil.move(fp1, fp2) # reload - o4 = output.Output.load_tar(fp2) + o4 = legacy.load_tar(fp2) np.testing.assert_almost_equal( - o4["interpolation_xgrid"], fake_output["interpolation_xgrid"] + o4.xgrid.raw, fake_card["interpolation_xgrid"] ) - def test_io_bin(self, fake_output): + def test_io_bin(self, fake_legacy): # create object - o1 = output.Output(fake_output) + o1, fake_card = fake_legacy + for q2, op in fake_card["Q2grid"].items(): + o1[q2] = output.Operator.from_dict(op) # test streams stream = io.StringIO() - o1.dump_yaml(stream, False) + legacy.dump_yaml(o1, stream, False) # rewind and read again stream.seek(0) - o2 = output.Output.load_yaml(stream) - np.testing.assert_almost_equal( - o1["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) - np.testing.assert_almost_equal( - o2["interpolation_xgrid"], fake_output["interpolation_xgrid"] - ) + o2 = legacy.load_yaml(stream) + np.testing.assert_almost_equal(o1.xgrid.raw, fake_card["interpolation_xgrid"]) + np.testing.assert_almost_equal(o2.xgrid.raw, fake_card["interpolation_xgrid"]) + +class TestManipulate: def test_xgrid_reshape(self, fake_output): # create object - xg = np.geomspace(1e-5, 1.0, 21) - o1 = output.Output(fake_output) - o1["interpolation_xgrid"] = xg - o1["targetgrid"] = xg - o1["inputgrid"] = xg - o1["Q2grid"] = { - 10: dict( - operators=eko_identity([1, 2, len(xg), 2, len(xg)])[0], - operator_errors=np.zeros((2, len(xg), 2, len(xg))), - alphas=np.random.rand(), + xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) + o1, _fake_card = fake_output + o1.xgrid = xg + o1.rotations._targetgrid = xg + o1.rotations._inputgrid = xg + o1._operators = { + 10: output.Operator.from_dict( + dict( + operator=eko_identity([1, 2, len(xg), 2, len(xg)])[0], + error=np.zeros((2, len(xg), 2, len(xg))), + ) ) } - xgp = np.geomspace(1e-5, 1.0, 11) + xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) # only target ot = copy.deepcopy(o1) - ot.xgrid_reshape(xgp) - chk_keys(o1, ot) - assert ot["Q2grid"][10]["operators"].shape == (2, len(xgp), 2, len(xg)) + manipulate.xgrid_reshape(ot, xgp) + chk_keys(o1.raw, ot.raw) + assert ot[10].operator.shape == (2, len(xgp), 2, len(xg)) ott = copy.deepcopy(o1) with pytest.warns(Warning): - ott.xgrid_reshape(xg) - chk_keys(o1, ott) - np.testing.assert_allclose( - ott["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + manipulate.xgrid_reshape(ott, xg) + chk_keys(o1.raw, ott.raw) + np.testing.assert_allclose(ott[10].operator, o1[10].operator) # only input oi = copy.deepcopy(o1) - oi.xgrid_reshape(inputgrid=xgp) - assert oi["Q2grid"][10]["operators"].shape == (2, len(xg), 2, len(xgp)) - chk_keys(o1, oi) + manipulate.xgrid_reshape(oi, inputgrid=xgp) + assert oi[10].operator.shape == (2, len(xg), 2, len(xgp)) + chk_keys(o1.raw, oi.raw) oii = copy.deepcopy(o1) with pytest.warns(Warning): - oii.xgrid_reshape(inputgrid=xg) - chk_keys(o1, oii) - np.testing.assert_allclose( - oii["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + manipulate.xgrid_reshape(oii, inputgrid=xg) + chk_keys(o1.raw, oii.raw) + np.testing.assert_allclose(oii[10].operator, o1[10].operator) # both oit = copy.deepcopy(o1) - oit.xgrid_reshape(xgp, xgp) - chk_keys(o1, oit) + manipulate.xgrid_reshape(oit, xgp, xgp) + chk_keys(o1.raw, oit.raw) op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) - np.testing.assert_allclose(oit["Q2grid"][10]["operators"], op[0], atol=1e-10) + np.testing.assert_allclose(oit[10].operator, op[0], atol=1e-10) # error with pytest.raises(ValueError): - copy.deepcopy(o1).xgrid_reshape() + manipulate.xgrid_reshape(copy.deepcopy(o1)) def test_reshape_io(self, fake_output): # create object - o1 = output.Output(fake_output) + o1, fake_card = fake_output + for q2, op in fake_card["Q2grid"].items(): + o1[q2] = output.Operator.from_dict(op) o2 = copy.deepcopy(o1) - o2.xgrid_reshape([0.1, 1.0], [0.1, 1.0]) - o2.flavor_reshape(inputbasis=np.array([[1, -1], [1, 1]])) + manipulate.xgrid_reshape( + o2, interpolation.XGrid([0.1, 1.0]), interpolation.XGrid([0.1, 1.0]) + ) + manipulate.flavor_reshape(o2, inputpids=np.array([[1, -1], [1, 1]])) # dump stream = io.StringIO() - o2.dump_yaml(stream) + legacy.dump_yaml(o2, stream) # reload stream.seek(0) - o3 = output.Output.load_yaml(stream) - # eko_version is only added in get_raw - del o3["eko_version"] - chk_keys(o1, o3) + o3 = legacy.load_yaml(stream) + chk_keys(o1.raw, o3.raw) - def test_flavor_reshape(self, fake_output): + def test_flavor_reshape(self, fake_output, tmp_path): # create object xg = np.geomspace(1e-5, 1.0, 21) - o1 = output.Output(fake_output) - o1["interpolation_xgrid"] = xg - o1["targetgrid"] = xg - o1["inputgrid"] = xg - o1["Q2grid"] = { - 10: dict( - operators=eko_identity([1, 2, len(xg), 2, len(xg)])[0], - operator_errors=np.zeros((2, len(xg), 2, len(xg))), - alphas=np.random.rand(), + o1, _fake_card = fake_output + o1.xgrid = xg + o1.rotations._targetgrid = xg + o1.rotations._inputgrid = xg + o1[10.0] = output.Operator.from_dict( + dict( + operator=eko_identity([1, 2, len(xg), 2, len(xg)])[0], + error=np.zeros((2, len(xg), 2, len(xg))), ) - } + ) # only target target_r = np.array([[1, -1], [1, 1]]) - ot = copy.deepcopy(o1) - ot.flavor_reshape(target_r) - chk_keys(o1, ot) - assert ot["Q2grid"][10]["operators"].shape == (2, len(xg), 2, len(xg)) - ott = copy.deepcopy(ot) - ott.flavor_reshape(np.linalg.inv(target_r)) - np.testing.assert_allclose( - ott["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + ot = o1.deepcopy(tmp_path / "ot.tar") + manipulate.flavor_reshape(ot, target_r) + chk_keys(o1.raw, ot.raw) + assert ot[10].operator.shape == (2, len(xg), 2, len(xg)) + ott = ot.deepcopy(tmp_path / "ott.tar") + manipulate.flavor_reshape(ott, np.linalg.inv(target_r)) + np.testing.assert_allclose(ott[10].operator, o1[10].operator) with pytest.warns(Warning): - ott.flavor_reshape(np.eye(2)) - chk_keys(o1, ott) - np.testing.assert_allclose( - ott["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + manipulate.flavor_reshape(ott, np.eye(2)) + chk_keys(o1.raw, ott.raw) + np.testing.assert_allclose(ott[10].operator, o1[10].operator) # only input input_r = np.array([[1, -1], [1, 1]]) - oi = copy.deepcopy(o1) - oi.flavor_reshape(inputbasis=input_r) - chk_keys(o1, oi) - assert oi["Q2grid"][10]["operators"].shape == (2, len(xg), 2, len(xg)) + oi = o1.deepcopy(tmp_path / "oi.tar") + manipulate.flavor_reshape(oi, inputpids=input_r) + chk_keys(o1.raw, oi.raw) + assert oi[10].operator.shape == (2, len(xg), 2, len(xg)) oii = copy.deepcopy(oi) - oii.flavor_reshape(inputbasis=np.linalg.inv(input_r)) - np.testing.assert_allclose( - oii["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + manipulate.flavor_reshape(oii, inputpids=np.linalg.inv(input_r)) + np.testing.assert_allclose(oii[10].operator, o1[10].operator) with pytest.warns(Warning): - oii.flavor_reshape(inputbasis=np.eye(2)) - chk_keys(o1, oii) - np.testing.assert_allclose( - oii["Q2grid"][10]["operators"], o1["Q2grid"][10]["operators"] - ) + manipulate.flavor_reshape(oii, inputpids=np.eye(2)) + chk_keys(o1.raw, oii.raw) + np.testing.assert_allclose(oii[10].operator, o1[10].operator) # both - oit = copy.deepcopy(o1) - oit.flavor_reshape(np.array([[1, -1], [1, 1]]), np.array([[1, -1], [1, 1]])) - chk_keys(o1, oit) + oit = o1.deepcopy(tmp_path / "oit.tar") + manipulate.flavor_reshape( + oit, np.array([[1, -1], [1, 1]]), np.array([[1, -1], [1, 1]]) + ) + chk_keys(o1.raw, oit.raw) op = eko_identity([1, 2, len(xg), 2, len(xg)]).copy() - np.testing.assert_allclose(oit["Q2grid"][10]["operators"], op[0], atol=1e-10) + np.testing.assert_allclose(oit[10].operator, op[0], atol=1e-10) # error with pytest.raises(ValueError): - copy.deepcopy(o1).flavor_reshape() + manipulate.flavor_reshape(o1.deepcopy(tmp_path / "fail.tar")) - def test_to_evol(self, fake_factory): - interpolation_xgrid = np.array([0.5, 1.0]) + def test_to_evol(self, fake_factory, tmp_path): + xgrid = np.array([0.5, 1.0]) interpolation_polynomial_degree = 1 interpolation_is_log = False q2_ref = 1 q2_out = 2 - Q2grid = fake_factory.mk_g( - [q2_out], len(br.flavor_basis_pids), len(interpolation_xgrid) - ) + Q2grid = fake_factory.mk_g([q2_out], len(br.flavor_basis_pids), len(xgrid)) d = dict( - interpolation_xgrid=interpolation_xgrid, - targetgrid=interpolation_xgrid, - inputgrid=interpolation_xgrid, - interpolation_polynomial_degree=interpolation_polynomial_degree, - interpolation_is_log=interpolation_is_log, - q2_ref=q2_ref, - inputpids=br.flavor_basis_pids, - targetpids=br.flavor_basis_pids, + Q0=np.sqrt(q2_ref), Q2grid=Q2grid, + rotations=dict( + xgrid=xgrid, + targetgrid=xgrid, + inputgrid=xgrid, + inputpids=br.flavor_basis_pids, + targetpids=br.flavor_basis_pids, + ), + configs=dict( + interpolation_polynomial_degree=interpolation_polynomial_degree, + interpolation_is_log=interpolation_is_log, + ev_op_max_order=1, + ev_op_iterations=1, + backward_inversion="exact", + ), ) - o00 = output.Output(d) - o01 = copy.copy(o00) - o01.to_evol() - o10 = copy.copy(o00) - o10.to_evol(False, True) - o11 = copy.copy(o00) - o11.to_evol(True, True) - chk_keys(o00, o11) + o00 = output.EKO.new(theory={}, operator=d) + o00[q2_out] = output.Operator(**Q2grid[q2_out]) + o01 = o00.deepcopy(tmp_path / "o01.tar") + manipulate.to_evol(o01) + o10 = o00.deepcopy(tmp_path / "o10.tar") + manipulate.to_evol(o10, False, True) + o11 = o00.deepcopy(tmp_path / "o11.tar") + manipulate.to_evol(o11, True, True) + chk_keys(o00.raw, o11.raw) # check the input rotated one - np.testing.assert_allclose(o01["inputpids"], br.evol_basis_pids) - np.testing.assert_allclose(o01["targetpids"], br.flavor_basis_pids) + np.testing.assert_allclose(o01.rotations.inputpids, br.evol_basis_pids) + np.testing.assert_allclose(o01.rotations.targetpids, br.flavor_basis_pids) # rotate also target - o01.to_evol(False, True) - np.testing.assert_allclose( - o01["Q2grid"][q2_out]["operators"], o11["Q2grid"][q2_out]["operators"] - ) - chk_keys(o00, o01) + manipulate.to_evol(o01, False, True) + np.testing.assert_allclose(o01[q2_out].operator, o11[q2_out].operator) + chk_keys(o00.raw, o01.raw) # check the target rotated one - np.testing.assert_allclose(o10["inputpids"], br.flavor_basis_pids) - np.testing.assert_allclose(o10["targetpids"], br.evol_basis_pids) + np.testing.assert_allclose(o10.rotations.inputpids, br.flavor_basis_pids) + np.testing.assert_allclose(o10.rotations.targetpids, br.evol_basis_pids) # rotate also input - o10.to_evol() - np.testing.assert_allclose( - o10["Q2grid"][q2_out]["operators"], o11["Q2grid"][q2_out]["operators"] - ) - chk_keys(o00, o10) + manipulate.to_evol(o10) + np.testing.assert_allclose(o10[q2_out].operator, o11[q2_out].operator) + chk_keys(o00.raw, o10.raw) diff --git a/tests/eko/test_output_struct.py b/tests/eko/test_output_struct.py new file mode 100644 index 000000000..ea8324c2d --- /dev/null +++ b/tests/eko/test_output_struct.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +import io +from dataclasses import dataclass + +import numpy as np +import numpy.typing as npt +import pytest +import yaml + +from eko import compatibility, interpolation, output +from eko.output import struct +from ekobox import operators_card as oc +from ekobox import theory_card as tc + + +@dataclass +class MyDictLike(struct.DictLike): + l: npt.NDArray + f: float + x: interpolation.XGrid + t: tuple + s: str + + +def test_DictLike(): + d = MyDictLike.from_dict( + dict( + l=np.arange(5.0), + f=np.arange(5.0)[-1], + x=interpolation.XGrid([0.1, 1.0]), + t=(1.0, 2.0), + s="s", + ) + ) + assert d.f == 4.0 + dd = MyDictLike.from_dict(d.raw) + assert dd.f == 4.0 + # check we can dump and reload + stream = io.StringIO() + yaml.safe_dump(d.raw, stream) + stream.seek(0) + ddd = yaml.safe_load(stream) + assert "l" in ddd + np.testing.assert_allclose(ddd["l"], np.arange(5.0)) + + +class TestOperator: + def test_value_only(self): + v = np.random.rand(2, 2) + opv = struct.Operator(operator=v) + assert opv.error is None + for compress in (True, False): + stream = io.BytesIO() + opv.save(stream, compress) + stream.seek(0) + opv_ = struct.Operator.load(stream, compress) + np.testing.assert_allclose(opv.operator, opv_.operator) + np.testing.assert_allclose(v, opv_.operator) + assert opv_.error is None + + def test_value_and_error(self): + v, e = np.random.rand(2, 2, 2) + opve = struct.Operator(operator=v, error=e) + for compress in (True, False): + stream = io.BytesIO() + opve.save(stream, compress) + stream.seek(0) + opve_ = struct.Operator.load(stream, compress) + np.testing.assert_allclose(opve.operator, opve_.operator) + np.testing.assert_allclose(v, opve_.operator) + np.testing.assert_allclose(opve.error, opve_.error) + np.testing.assert_allclose(e, opve_.error) + + def test_load_error(self, monkeypatch): + # We might consider dropping this exception since np.load will always return a array (or fail on it's own) + stream = io.BytesIO() + monkeypatch.setattr(np, "load", lambda _: None) + with pytest.raises(ValueError): + struct.Operator.load(stream, False) + + +class TestRotations: + def test_fallback(self): + pids = np.array([1, 2]) + xg = interpolation.XGrid([0.1, 1.0]) + r = struct.Rotations(xgrid=xg, pids=pids) + np.testing.assert_allclose(r.pids, pids) + np.testing.assert_allclose(r.targetpids, pids) + np.testing.assert_allclose(r.inputpids, pids) + assert r.xgrid == xg + assert r.targetgrid == xg + assert r.inputgrid == xg + + def test_overwrite(self): + pids = np.array([1, 2]) + tpids = np.array([3, 4]) + ipids = np.array([5, 6]) + xg = interpolation.XGrid([0.1, 1.0]) + txg = interpolation.XGrid([0.2, 1.0]) + ixg = interpolation.XGrid([0.3, 1.0]) + r = struct.Rotations( + xgrid=xg, + pids=pids, + _targetgrid=txg, + _inputgrid=ixg, + _targetpids=tpids, + _inputpids=ipids, + ) + np.testing.assert_allclose(r.pids, pids) + np.testing.assert_allclose(r.targetpids, tpids) + np.testing.assert_allclose(r.inputpids, ipids) + assert r.xgrid == xg + assert r.targetgrid == txg + assert r.inputgrid == ixg + + def test_init(self): + pids = np.array([1, 2]) + xg = interpolation.XGrid([0.1, 1.0]) + txg = np.array([0.2, 1.0]) + ixg = {"grid": [0.3, 1.0], "log": True} + r = struct.Rotations(xgrid=xg, pids=pids, _targetgrid=txg, _inputgrid=ixg) + assert isinstance(r.xgrid, interpolation.XGrid) + assert isinstance(r.targetgrid, interpolation.XGrid) + assert isinstance(r.inputgrid, interpolation.XGrid) + assert r.xgrid == xg + assert r.targetgrid == interpolation.XGrid(txg) + assert r.inputgrid == interpolation.XGrid.load(ixg) + + +class TestEKO: + def _default_cards(self): + t = tc.generate(0, 1.0) + o = oc.generate([10.0]) + return compatibility.update(t, o) + + def test_new_error(self, tmp_path): + nt, no = self._default_cards() + # try to write to a file different from bla + no_tar_path = tmp_path / "Blub.bla" + with pytest.raises(ValueError): + struct.EKO.new(nt, no, no_tar_path) + # try to overwrite an existing file + exists_path = tmp_path / "Blub.tar" + exists_path.write_text("Blub", encoding="utf-8") + with pytest.raises(FileExistsError): + struct.EKO.new(nt, no, exists_path) + + def test_load_error(self, tmp_path): + # try to read from a non-tar path + no_tar_path = tmp_path / "Blub.tar" + no_tar_path.write_text("Blub", encoding="utf-8") + with pytest.raises(ValueError): + struct.EKO.load(no_tar_path) + + def test_properties(self): + eko = struct.EKO.new(*self._default_cards()) + assert "mc" in eko.theory_card + assert "debug" in eko.operator_card + np.testing.assert_allclose(eko.Q2grid, np.array([10.0])) + assert 10.0 in eko + default_grid = interpolation.XGrid(eko.operator_card["rotations"]["xgrid"]) + assert eko.xgrid == default_grid + for use_target in (True, False): + assert eko.interpolator(False, use_target).xgrid == default_grid + xg = interpolation.XGrid([0.1, 1.0]) + eko.xgrid = xg + assert eko.xgrid == xg + assert "debug" in eko.raw + # check we can dump and reload + stream = io.StringIO() + yaml.safe_dump(eko.raw, stream) + stream.seek(0) + raw_eko = yaml.safe_load(stream) + assert "debug" in raw_eko + + def test_ops(self): + v = np.random.rand(2, 2) + opv = struct.Operator(operator=v) + eko = struct.EKO.new(*self._default_cards()) + # try setting not an operator + with pytest.raises(ValueError): + eko[10.0] = "bla" + # approx + eko[10.0] = opv + assert eko.approx(20.0) is None + assert eko.approx(11.0, atol=2) == 10.0 + eko[11.0] = opv + with pytest.raises(ValueError): + eko.approx(10.5, atol=2) + # iterate + for q2, q2eko in zip((10.0, 11.0), eko): + assert q2 == q2eko + np.testing.assert_allclose(v, eko[q2].operator) + for q2, (q2eko, op) in zip((10.0, 11.0), eko.items()): + assert q2 == q2eko + np.testing.assert_allclose(v, op.operator) + # getter + with pytest.raises(ValueError): + eko[12.0] + with eko.operator(10.0) as op: + np.testing.assert_allclose(v, op.operator) + # overwrite + vv = np.random.rand(2, 2) + opvv = struct.Operator(operator=vv) + eko[11.0] = opvv + np.testing.assert_allclose(vv, eko[11.0].operator) + + def test_interpolator(self): + nt, no = self._default_cards() + txg = np.geomspace(0.1, 1.0, 5) + ixg = np.geomspace(0.01, 1.0, 5) + no["rotations"]["targetgrid"] = txg + no["rotations"]["inputgrid"] = ixg + eko = struct.EKO.new(nt, no) + assert eko.interpolator(False, True).xgrid == interpolation.XGrid(txg) + assert eko.interpolator(False, False).xgrid == interpolation.XGrid(ixg) + + def test_copy(self, tmp_path): + v = np.random.rand(2, 2) + opv = struct.Operator(operator=v) + eko1 = struct.EKO.new(*self._default_cards()) + eko1[10.0] = opv + np.testing.assert_allclose(eko1[10.0].operator, v) + p = tmp_path / "eko2.tar" + eko2 = eko1.deepcopy(p) + np.testing.assert_allclose(eko1[10.0].operator, v) + np.testing.assert_allclose(eko2[10.0].operator, v) + vv = np.random.rand(2, 2) + opvv = struct.Operator(operator=vv) + eko2[10.0] = opvv + np.testing.assert_allclose(eko1[10.0].operator, v) + np.testing.assert_allclose(eko2[10.0].operator, vv) + # try loading again + eko2_ = struct.EKO.load(p) + assert eko2.raw == eko2_.raw + + def test_extract(self, tmp_path): + p = tmp_path / "test.tar" + eko = struct.EKO.new(*self._default_cards(), p) + # check theory file + t = struct.EKO.extract(p, struct.THEORYFILE) + assert isinstance(t, str) + tt = yaml.safe_load(io.StringIO(t)) + assert tt == eko.theory_card + # try a wrong file + with pytest.raises(KeyError): + t = struct.EKO.extract(p, "Blub.bla") + + +class TestLegacy: + def test_items(self, fake_output): + """Test autodump, autoload, and manual unload.""" + eko, fake_card = fake_output + for q2, op in fake_card["Q2grid"].items(): + eko[q2] = output.Operator.from_dict(op) + + q2 = next(iter(fake_card["Q2grid"])) + + eko._operators[q2] = None + assert isinstance(eko[q2], struct.Operator) + assert isinstance(eko._operators[q2], struct.Operator) + + del eko[q2] + + assert eko._operators[q2] is None + + def test_iter(self, fake_output): + """Test managed iteration.""" + eko, fake_card = fake_output + for q2, op in fake_card["Q2grid"].items(): + eko[q2] = output.Operator.from_dict(op) + + q2prev = None + for q2, op in eko.items(): + if q2prev is not None: + assert eko._operators[q2prev] is None + assert isinstance(op, struct.Operator) + q2prev = q2 + + def test_context_operator(self, fake_output): + """Test automated handling through context.""" + eko, fake_card = fake_output + for q2, op in fake_card["Q2grid"].items(): + eko[q2] = output.Operator.from_dict(op) + + q2 = next(iter(fake_card["Q2grid"])) + + with eko.operator(q2) as op: + assert isinstance(op, struct.Operator) + + assert eko._operators[q2] is None diff --git a/tests/eko/test_runcards.py b/tests/eko/test_runcards.py new file mode 100644 index 000000000..e8123ec2d --- /dev/null +++ b/tests/eko/test_runcards.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from eko import runcards as rc + +fake_theory = dict(order=(2, 0)) + +fake_operator = dict(xgrid=[1e-3, 1e-2, 1e-1, 1.0]) + + +class TestTheory: + def test_init(self): + t = rc.TheoryCard(order=fake_theory["order"]) + + assert t.order == fake_theory["order"] + + def test_load(self): + t0 = rc.TheoryCard(order=fake_theory["order"]) + t1 = rc.TheoryCard.load(fake_theory) + + assert t0 == t1 + + +class TestOperator: + def test_init(self): + o = rc.OperatorCard(xgrid=fake_operator["xgrid"]) + + assert o.xgrid == fake_operator["xgrid"] + + def test_load(self): + o0 = rc.OperatorCard(xgrid=fake_operator["xgrid"]) + o1 = rc.OperatorCard.load(fake_operator) + + assert o0 == o1 diff --git a/tests/eko/test_runner.py b/tests/eko/test_runner.py index dc09c214c..c1d491022 100644 --- a/tests/eko/test_runner.py +++ b/tests/eko/test_runner.py @@ -4,7 +4,8 @@ import numpy as np import eko -from eko import compatibility +import eko.interpolation +from eko import basis_rotation as br theory_card = { "alphas": 0.35, @@ -37,15 +38,24 @@ } operators_card = { "Q2grid": [10, 100], - "interpolation_xgrid": [0.01, 0.1, 1.0], - "interpolation_polynomial_degree": 1, - "interpolation_is_log": True, - "debug_skip_singlet": True, - "debug_skip_non_singlet": True, - "ev_op_max_order": (2, 0), - "ev_op_iterations": 1, - "backward_inversion": "exact", - "n_integration_cores": 1, + "Q0": np.sqrt(2), + "configs": { + "interpolation_polynomial_degree": 1, + "interpolation_is_log": True, + "ev_op_max_order": (2, 0), + "ev_op_iterations": 1, + "backward_inversion": "exact", + "n_integration_cores": 1, + }, + "rotations": { + "xgrid": [0.01, 0.1, 1.0], + "pids": np.array(br.flavor_basis_pids), + "inputgrid": None, + "targetgrid": None, + "inputpids": None, + "targetpids": None, + }, + "debug": {"skip_singlet": True, "skip_non_singlet": True}, } @@ -55,88 +65,69 @@ def test_raw(): oc = copy.deepcopy(operators_card) r = eko.runner.Runner(tc, oc) o = r.get_output() - check_shapes( - o, - o["interpolation_xgrid"], - o["interpolation_xgrid"], - tc, - oc, - ) + check_shapes(o, o.xgrid, o.xgrid, tc, oc) def test_targetgrid(): # change targetgrid tc = copy.deepcopy(theory_card) oc = copy.deepcopy(operators_card) + igrid = [0.1, 1.0] + oc["rotations"]["inputgrid"] = igrid tgrid = [0.1, 1.0] - oc["targetgrid"] = tgrid + oc["rotations"]["targetgrid"] = tgrid r = eko.runner.Runner(tc, oc) o = r.get_output() check_shapes( - o, - tgrid, - o["interpolation_xgrid"], - tc, - oc, + o, eko.interpolation.XGrid(tgrid), eko.interpolation.XGrid(igrid), tc, oc ) + # check actual value + np.testing.assert_allclose(o.rotations.targetgrid.raw, tgrid) -def test_targetbasis(): - # change targetbasis +def test_rotate_pids(): + # change pids tc = copy.deepcopy(theory_card) oc = copy.deepcopy(operators_card) - oc["targetbasis"] = np.eye(14) + 0.1 * np.random.rand(14, 14) - oc["inputbasis"] = np.eye(14) + 0.1 * np.random.rand(14, 14) + oc["rotations"]["targetpids"] = np.eye(14) + 0.1 * np.random.rand(14, 14) + oc["rotations"]["inputpids"] = np.eye(14) + 0.1 * np.random.rand(14, 14) r = eko.runner.Runner(tc, oc) o = r.get_output() - check_shapes( - o, - o["interpolation_xgrid"], - o["interpolation_xgrid"], - tc, - oc, - ) + check_shapes(o, o.xgrid, o.xgrid, tc, oc) + # check actual values + assert (o.rotations.targetpids == [0] * 14).all() + assert (o.rotations.inputpids == [0] * 14).all() def check_shapes(o, txs, ixs, theory_card, operators_card): - tpids = len(o["targetpids"]) - ipids = len(o["inputpids"]) + tpids = len(o.rotations.targetpids) + ipids = len(o.rotations.inputpids) op_shape = (tpids, len(txs), ipids, len(ixs)) # check output = input - np.testing.assert_allclose( - o["interpolation_xgrid"], operators_card["interpolation_xgrid"] - ) - np.testing.assert_allclose(o["targetgrid"], txs) - np.testing.assert_allclose(o["inputgrid"], ixs) + np.testing.assert_allclose(o.xgrid.raw, operators_card["rotations"]["xgrid"]) + np.testing.assert_allclose(o.rotations.targetgrid.raw, txs.raw) + np.testing.assert_allclose(o.rotations.inputgrid.raw, ixs.raw) for k in ["interpolation_polynomial_degree", "interpolation_is_log"]: - assert o[k] == operators_card[k] - np.testing.assert_allclose(o["q2_ref"], theory_card["Q0"] ** 2) + assert getattr(o.configs, k) == operators_card["configs"][k] + np.testing.assert_allclose(o.Q02, theory_card["Q0"] ** 2) # check available operators - assert len(o["Q2grid"]) == len(operators_card["Q2grid"]) - assert list(o["Q2grid"].keys()) == operators_card["Q2grid"] - for ops in o["Q2grid"].values(): - assert "operators" in ops - assert "operator_errors" in ops - assert ops["operators"].shape == op_shape - assert ops["operator_errors"].shape == op_shape + assert len(o.Q2grid) == len(operators_card["Q2grid"]) + assert list(o.Q2grid) == operators_card["Q2grid"] + for _, ops in o.items(): + assert ops.operator.shape == op_shape + assert ops.error.shape == op_shape def test_vfns(): - # change targetbasis + # change targetpids tc = copy.deepcopy(theory_card) oc = copy.deepcopy(operators_card) tc["kcThr"] = 1.0 tc["kbThr"] = 1.0 tc["order"] = (3, 0) - oc["debug_skip_non_singlet"] = False + oc["debug"]["skip_non_singlet"] = False # tc,oc = compatibility.update(theory_card,operators_card) r = eko.runner.Runner(tc, oc) o = r.get_output() - check_shapes( - o, - o["interpolation_xgrid"], - o["interpolation_xgrid"], - tc, - oc, - ) + check_shapes(o, o.xgrid, o.xgrid, tc, oc) diff --git a/tests/ekobox/test_cli.py b/tests/ekobox/test_cli.py new file mode 100644 index 000000000..5405e78c3 --- /dev/null +++ b/tests/ekobox/test_cli.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +from click.testing import CliRunner + +from ekobox.cli import command + + +def test_run(): + runner = CliRunner() + result = runner.invoke(command, ["run", "a"]) + assert "Running EKO for" in result.output diff --git a/tests/ekobox/test_evol_pdf.py b/tests/ekobox/test_evol_pdf.py index e69de29bb..cb8ffd703 100644 --- a/tests/ekobox/test_evol_pdf.py +++ b/tests/ekobox/test_evol_pdf.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +from banana import toy + +import eko +import eko.output.legacy as out +from ekobox import evol_pdf as ev_p +from ekobox import operators_card as oc +from ekobox import theory_card as tc + +op = oc.generate( + [100.0], + update={ + "interpolation_xgrid": [0.1, 0.5, 1.0], + "interpolation_polynomial_degree": 1, + }, +) +theory = tc.generate(0, 1.65) + + +def test_evolve_pdfs_run(fake_lhapdf, cd): + n = "test_evolve_pdfs_run" + mytmp = fake_lhapdf / "install" + mytmp.mkdir() + store_path = mytmp / "test.tar" + with cd(mytmp): + ev_p.evolve_pdfs( + [toy.mkPDF("", 0)], theory, op, install=True, name=n, store_path=store_path + ) + p = fake_lhapdf / n + assert p.exists() + # check dumped eko + assert store_path.exists() + assert store_path.is_file() + out.load_tar(store_path) + + +def test_evolve_pdfs_dump_path(fake_lhapdf, cd): + n = "test_evolve_pdfs_dump_path" + peko = fake_lhapdf / ev_p.ekofileid(theory, op) + out.dump_tar(eko.run_dglap(theory, op), peko) + assert peko.exists() + with cd(fake_lhapdf): + ev_p.evolve_pdfs([toy.mkPDF("", 0)], theory, op, name=n, path=fake_lhapdf) + p = fake_lhapdf / n + assert p.exists() + + +def test_evolve_pdfs_dump_file(fake_lhapdf, cd): + n = "test_evolve_pdfs_dump_file" + peko = fake_lhapdf / ev_p.ekofileid(theory, op) + out.dump_tar(eko.run_dglap(theory, op), peko) + assert peko.exists() + with cd(fake_lhapdf): + ev_p.evolve_pdfs([toy.mkPDF("", 0)], theory, op, name=n, path=peko) + p = fake_lhapdf / n + assert p.exists() diff --git a/tests/ekobox/test_genpdf.py b/tests/ekobox/test_genpdf.py new file mode 100644 index 000000000..3f68ae9b4 --- /dev/null +++ b/tests/ekobox/test_genpdf.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +import numpy as np +import pytest + +from eko import basis_rotation as br +from ekobox import genpdf + + +def test_genpdf_exceptions(tmp_path, cd): + with cd(tmp_path): + # wrong label + with pytest.raises(TypeError): + genpdf.generate_pdf( + "test_genpdf_exceptions1", + ["f"], + { + 21: lambda x, Q2: 3.0 * x * (1.0 - x), + 2: lambda x, Q2: 4.0 * x * (1.0 - x), + }, + ) + # wrong parent pdf + with pytest.raises(ValueError): + genpdf.generate_pdf( + "test_genpdf_exceptions2", + ["g"], + 10, + ) + # non-existant PDF set + with pytest.raises(FileNotFoundError): + genpdf.install_pdf("foo") + with pytest.raises(TypeError): + genpdf.generate_pdf("debug", [21], info_update=(10, 15, 20)) + + +def test_generate_block(): + xg = np.linspace(0.0, 1.0, 5) + q2s = np.geomspace(1.0, 1e3, 5) + pids = np.arange(3) + b = genpdf.generate_block(lambda pid, x, q2: pid * x * q2, xg, q2s, pids) + assert isinstance(b, dict) + assert sorted(b.keys()) == sorted(["data", "Q2grid", "xgrid", "pids"]) + assert isinstance(b["data"], np.ndarray) + assert b["data"].shape == (len(xg) * len(q2s), len(pids)) + + +def test_install_pdf(fake_lhapdf, cd): + # move into subdir to be able to move + mytmp = fake_lhapdf / "install" + mytmp.mkdir() + n = "test_install_pdf" + p = mytmp / n + i = "test.info" + with cd(mytmp): + with pytest.raises(FileNotFoundError): + genpdf.install_pdf(p) + p.mkdir() + (p / i).write_text("Bla") + genpdf.install_pdf(p) + pp = fake_lhapdf / n + assert not p.exists() + assert pp.exists() + ppi = pp / i + assert ppi.exists() + assert "Bla" == ppi.read_text() + + +def test_generate_pdf_debug_pid(fake_lhapdf, cd): + mytmp = fake_lhapdf / "install" + mytmp.mkdir() + n = "test_generate_pdf_debug_pid" + xg = np.linspace(0.0, 1.0, 5) + q2s = np.geomspace(1.0, 1e3, 7) + p = mytmp / n + i = f"{n}.info" + with cd(mytmp): + genpdf.generate_pdf( + n, + [21], + None, + info_update={"Debug": "debug"}, + install=True, + xgrid=xg, + Q2grid=q2s, + ) + pp = fake_lhapdf / n + assert not p.exists() + assert pp.exists() + # check info file + ppi = pp / i + assert ppi.exists() + assert "Debug: debug" in ppi.read_text() + ii = genpdf.load.load_info_from_file(n) + assert "Debug" in ii + assert ii["Debug"] == "debug" + # check member file + ppm = pp / f"{n}_0000.dat" + assert ppm.exists() + assert "PdfType: central" in ppm.read_text() + head, blocks = genpdf.load.load_blocks_from_file(n, 0) + assert "PdfType: central" in head + assert len(blocks) == 1 + b = blocks[0] + assert 21 in b["pids"] + for k, line in enumerate(b["data"]): + for pid, f in zip(b["pids"], line): + # the gluon is non-zero in the bulk - x=0 is included here + if ( + pid == 21 + and k > len(b["Q2grid"]) - 1 + and k < len(b["data"]) - len(b["Q2grid"]) + ): + assert not f == 0.0 + else: + assert f == 0.0 + + +def test_generate_pdf_pdf_evol(fake_lhapdf, fake_nn31, fake_mstw, fake_ct14, cd): + # iterate pdfs with their error type and number of blocks + for fake_pdf, err_type, nmem, nb in ( + (fake_nn31, "replica", 1, 2), + (fake_mstw, "error", 1, 3), + (fake_ct14, "", 0, 1), + ): + n = f"test_generate_pdf_{err_type}_evol" + p = fake_lhapdf / n + i = f"{n}.info" + pi = p / i + with cd(fake_lhapdf): + genpdf.generate_pdf( + n, + ["S"], + fake_pdf, + members=nmem > 0, + ) + assert p.exists() + # check info file + assert pi.exists() + # check member files + for m in range(nmem + 1): + pm = p / f"{n}_{m:04d}.dat" + assert pm.exists() + head, blocks = genpdf.load.load_blocks_from_file(n, m) + assert ("PdfType: central" if m == 0 else f"PdfType: {err_type}") in head + assert len(blocks) == nb + for b in blocks: + for k, line in enumerate(b["data"]): + for pid, f in zip(b["pids"], line): + # the singlet is non-zero in the bulk - x >= 0 + if abs(pid) in range(1, 6 + 1) and k < len(b["data"]) - len( + b["Q2grid"] + ): + assert not f == 0.0 + assert not np.isclose(f, 0.0, atol=1e-15) + else: + # MSTW is not 0 at the end, but only almost + if err_type == "error" and k >= len(b["data"]) - len( + b["Q2grid"] + ): + np.testing.assert_allclose(f, 0.0, atol=1e-15) + else: + assert f == 0.0 + + +def test_generate_pdf_toy_antiqed(fake_lhapdf, cd): + # iterate pdfs with their error type and number of blocks + n = "test_generate_pdf_toy_antiqed" + xg = np.linspace(1e-5, 1.0, 5) + q2s = np.geomspace(1.0, 1e3, 7) + anti_qed_singlet = np.zeros_like(br.flavor_basis_pids, dtype=np.float_) + anti_qed_singlet[br.flavor_basis_pids.index(1)] = -4 + anti_qed_singlet[br.flavor_basis_pids.index(-1)] = -4 + anti_qed_singlet[br.flavor_basis_pids.index(2)] = 1 + anti_qed_singlet[br.flavor_basis_pids.index(-2)] = 1 + p = fake_lhapdf / n + i = f"{n}.info" + pi = p / i + with cd(fake_lhapdf): + genpdf.generate_pdf( + n, + [anti_qed_singlet], + "toy", + xgrid=xg, + Q2grid=q2s, + ) + assert p.exists() + # check info file + assert pi.exists() + # check member files + pm = p / f"{n}_0000.dat" + assert pm.exists() + assert "PdfType: central" in pm.read_text() + head, blocks = genpdf.load.load_blocks_from_file(n, 0) + assert "PdfType: central" in head + assert len(blocks) == 1 + b = blocks[0] + for k, line in enumerate(b["data"]): + for pid, f in zip(b["pids"], line): + # the u and d are non-zero in the bulk - x=0 is not included here + if abs(pid) in [1, 2] and k < len(b["data"]) - len(b["Q2grid"]): + assert not f == 0.0 + else: + assert f == 0.0 diff --git a/tests/ekobox/test_genpdf_cli.py b/tests/ekobox/test_genpdf_cli.py new file mode 100644 index 000000000..ec973140e --- /dev/null +++ b/tests/ekobox/test_genpdf_cli.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from click.testing import CliRunner + +from ekobox.genpdf.cli import cli + + +def test_genpdf_CLI_messages(): + runner = CliRunner() + result = runner.invoke(cli, ["generate"]) + assert "Error: Missing argument 'NAME'." in result.output + result = runner.invoke(cli, ["install"]) + assert "Error: Missing argument 'NAME'." in result.output + result = runner.invoke(cli, ["generate", "TestEmptyLabels"]) + assert result.exception and "Labels must contain at least one element" in str( + result.exception + ) + result = runner.invoke(cli, ["generate", "--help"]) + assert "-p, --parent-pdf-set TEXT" in result.output + assert "-m, --members" in result.output + assert "-i, --install" in result.output + + +def test_genpdf_CLI(fake_lhapdf, cd): + mytmp = fake_lhapdf / "install" + mytmp.mkdir() + n = "test_genpdf_CLI" + runner = CliRunner() + with cd(mytmp): + result_gen = runner.invoke(cli, ["generate", n, "21"]) + assert result_gen.exception is None + result_inst = runner.invoke(cli, ["install", n]) + assert result_inst.exception is None + p = fake_lhapdf / n + assert p.exists() + pi = p / f"{n}.info" + assert pi.exists() + pm = p / f"{n}_0000.dat" + assert pm.exists() diff --git a/tests/ekobox/test_genpdf_export.py b/tests/ekobox/test_genpdf_export.py new file mode 100644 index 000000000..578308136 --- /dev/null +++ b/tests/ekobox/test_genpdf_export.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +import numpy as np +import yaml + +from ekobox import genpdf + + +def test_list_to_str(): + a = genpdf.export.list_to_str([1, 2]) + assert isinstance(a, str) + assert "1." in a + b = genpdf.export.list_to_str([1.0, 2.0]) + assert isinstance(b, str) + assert "1." in a + + +def test_array_to_str(): + s = (2, 2) + a = genpdf.export.array_to_str(np.arange(4).reshape(s)) + assert isinstance(a, str) + assert "1." in a + b = np.array([e.split() for e in a.splitlines()]) + assert b.shape == s + + +def test_dump_info(tmp_path): + n = "test" + p = tmp_path / n + f = p / f"{n}.info" + i = {"a": "b", "c": 2} + genpdf.export.dump_info(p, i) + assert p.exists() + assert f.exists() + # the files might not be perfect yaml, but should be yaml compatible + with open(f, "r", encoding="utf-8") as o: + ii = yaml.safe_load(o) + for k, v in i.items(): + assert k in ii + assert ii[k] == v + + +def fake_blocks(n_blocks, n_x, n_q2, n_pids): + bs = [] + for _ in range(n_blocks): + bs.append( + { + "xgrid": np.linspace(0.0, 1.0, n_x), + "Q2grid": np.geomspace(1.0, 1e3, n_q2), + "pids": np.arange(n_pids), + "data": np.random.rand(n_x * n_q2, n_pids), + } + ) + return bs + + +def test_dump_blocks(tmp_path): + n = "test" + p = tmp_path / n + nb = 2 + for m in range(3): + f = p / f"{n}_{m:04d}.dat" + is_my_type = m > 1 + pdf_type = "Bla: blub" if is_my_type else None + genpdf.export.dump_blocks(p, m, fake_blocks(nb, 2, 2, 2), pdf_type=pdf_type) + assert p.exists() + assert f.exists() + cnt = f.read_text() + if is_my_type: + assert "Bla: blub" in cnt + else: + assert ("central" in cnt) == (m == 0) + assert "Format" in cnt + assert cnt.count("---") == nb + 1 + + +def test_dump_set(tmp_path): + n = "test" + p = tmp_path / n + i = {"a": "b", "c": 2} + nmem = 2 + for pdf_type_list in (None, ["Bla: Blub"] * nmem): + genpdf.export.dump_set( + p, i, [fake_blocks(2, 2, 2, 2) for _ in range(nmem)], pdf_type_list + ) + assert p.exists() + f = p / f"{n}.info" + assert f.exists() + for m in range(nmem): + f = p / f"{n}_{m:04d}.dat" + assert f.exists() + if pdf_type_list is not None: + assert "Bla: Blub" in f.read_text() diff --git a/tests/ekobox/test_genpdf_flavors.py b/tests/ekobox/test_genpdf_flavors.py new file mode 100644 index 000000000..19013e2ce --- /dev/null +++ b/tests/ekobox/test_genpdf_flavors.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +from ekobox import genpdf + + +def test_is_evolution(): + assert genpdf.flavors.is_evolution_labels(["V", "T3"]) + assert not genpdf.flavors.is_evolution_labels([[1, 2]]) + assert not genpdf.flavors.is_evolution_labels(["21", "2"]) + + +def test_is_pids(): + assert not genpdf.flavors.is_pid_labels(["V", "T3"]) + assert not genpdf.flavors.is_pid_labels(["35", "9"]) + assert not genpdf.flavors.is_pid_labels({}) + assert not genpdf.flavors.is_pid_labels([[1, 2]]) + assert genpdf.flavors.is_pid_labels([21, 2]) + + +def test_flavors_pid_to_flavor(): + flavs = genpdf.flavors.pid_to_flavor([1, 2, 21, -3]) + for f in flavs: + for g in flavs: + if not np.allclose(f, g): + assert f @ g == 0 + + +def test_flavors_evol_to_flavor(): + flavs = genpdf.flavors.evol_to_flavor(["S", "g", "T3", "V8"]) + for f in flavs: + for g in flavs: + if not np.allclose(f, g): + assert f @ g == 0 + + +def test_flavors_evol_raw(): + blocks = [ + { + "Q2grid": np.array([1, 2]), + "xgrid": np.array([0.1, 1.0]), + "pids": np.array([-1, 21, 1]), + "data": np.array([[0.1, 0.2, 0.1]] * 4), + } + ] + gonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["g"])) + assert len(gonly) == 1 + np.testing.assert_allclose( + gonly[0]["data"], + np.array( + [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 4 + ), + ) + Sonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["S"])) + assert len(Sonly) == 1 + for i in [0, 1, 2, 3]: + # g and gamma are zero + np.testing.assert_allclose(Sonly[0]["data"][i][7], 0) + np.testing.assert_allclose(Sonly[0]["data"][i][0], 0) + # quark are all equal and equal to anti-quarks + for pid in [2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]: + np.testing.assert_allclose(Sonly[0]["data"][i][pid], Sonly[0]["data"][i][1]) + + +def test_flavors_evol_nodata(): + # try with a block without data + blocks = [ + { + "Q2grid": np.array([1, 2]), + "xgrid": np.array([0.1, 1.0]), + "pids": np.array([-1, 21, 1]), + "data": np.array([]), + }, + { + "Q2grid": np.array([1, 2]), + "xgrid": np.array([0.1, 1.0]), + "pids": np.array([-1, 21, 1]), + "data": np.array([[0.1, 0.2, 0.1]] * 4), + }, + ] + gonly = genpdf.flavors.project(blocks, genpdf.flavors.evol_to_flavor(["g"])) + assert len(gonly) == 2 + np.testing.assert_allclose( + gonly[1]["data"], + np.array( + [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]] * 4 + ), + ) diff --git a/tests/ekobox/test_genpdf_load.py b/tests/ekobox/test_genpdf_load.py new file mode 100644 index 000000000..00e823001 --- /dev/null +++ b/tests/ekobox/test_genpdf_load.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from ekobox import genpdf + + +def test_load_info(fake_ct14): + info = genpdf.load.load_info_from_file(fake_ct14) + assert "SetDesc" in info + assert "fake" in info["SetDesc"] + assert sorted(info["Flavors"]) == sorted([-3, -2, -1, 21, 1, 2, 3]) + + +def test_load_data_ct14(fake_ct14): + blocks = genpdf.load.load_blocks_from_file(fake_ct14, 0)[1] + assert len(blocks) == 1 + b0 = blocks[0] + assert isinstance(b0, dict) + assert sorted(b0.keys()) == sorted(["pids", "xgrid", "Q2grid", "data"]) + assert sorted(b0["pids"]) == sorted([-3, -2, -1, 21, 1, 2, 3]) + assert len(b0["data"].T) == 7 + np.testing.assert_allclose(b0["xgrid"][0], 1e-9) diff --git a/benchmarks/ekobox/benchmark_gen_info.py b/tests/ekobox/test_info_file.py similarity index 63% rename from benchmarks/ekobox/benchmark_gen_info.py rename to tests/ekobox/test_info_file.py index 8e8f65566..8cdadabc4 100644 --- a/benchmarks/ekobox/benchmark_gen_info.py +++ b/tests/ekobox/test_info_file.py @@ -2,18 +2,16 @@ import math import numpy as np -import pytest -from ekobox import gen_info as g_i -from ekobox import gen_op as g_o -from ekobox import gen_theory as g_t +from ekobox import info_file +from ekobox import operators_card as oc +from ekobox import theory_card as tc -@pytest.mark.isolated -def benchmark_create_info_file(): - op = g_o.gen_op_card([10, 100]) - theory = g_t.gen_theory_card(1, 10.0, update={"alphas": 0.2}) - info = g_i.create_info_file( +def test_build(): + op = oc.generate([10, 100]) + theory = tc.generate(1, 10.0, update={"alphas": 0.2}) + info = info_file.build( theory, op, 4, info_update={"SetDesc": "Prova", "NewArg": 15.3, "MTop": 1.0} ) assert info["AlphaS_MZ"] == 0.2 diff --git a/tests/ekobox/test_mock.py b/tests/ekobox/test_mock.py new file mode 100644 index 000000000..0d9311086 --- /dev/null +++ b/tests/ekobox/test_mock.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +import numpy as np + +from ekobox import mock + + +def test_eko_identity(): + for s in ((1, 2, 2, 2, 2), (1, 3, 3, 3, 3)): + i = mock.eko_identity(s) + assert s == i.shape + # is identity? + f = np.random.rand(*s[-2:]) + g = np.einsum("qkbja,ja->qkb", i, f) + np.testing.assert_allclose(g[0], f) diff --git a/benchmarks/ekobox/benchmark_gen_op.py b/tests/ekobox/test_operators_card.py similarity index 51% rename from benchmarks/ekobox/benchmark_gen_op.py rename to tests/ekobox/test_operators_card.py index cacc832cd..55fc51ffa 100644 --- a/benchmarks/ekobox/benchmark_gen_op.py +++ b/tests/ekobox/test_operators_card.py @@ -1,30 +1,28 @@ # -*- coding: utf-8 -*- import pytest -from ekobox import gen_op as g_o +from ekobox import operators_card as oc -@pytest.mark.isolated -def benchmark_gen_op_card(): - op = g_o.gen_op_card([10, 100]) +def test_generate_ocard(): + op = oc.generate([10, 100]) assert op["Q2grid"] == [10, 100] assert op["interpolation_polynomial_degree"] == 4 up_err = {"Prova": "Prova"} with pytest.raises(ValueError): - op = g_o.gen_op_card([10], update=up_err) + op = oc.generate([10], update=up_err) up = {"interpolation_polynomial_degree": 2, "interpolation_is_log": False} - op = g_o.gen_op_card([100], update=up) + op = oc.generate([100], update=up) assert op["Q2grid"] == [100] assert op["interpolation_polynomial_degree"] == 2 assert op["interpolation_is_log"] is False -@pytest.mark.isolated -def benchmark_export_load_op_card(tmp_path, cd): +def test_dump_load_op_card(tmp_path, cd): with cd(tmp_path): - op = g_o.gen_op_card([100], name="debug_op") - g_o.export_op_card("debug_op_two", op) - op_loaded = g_o.import_op_card("debug_op.yaml") - op_two_loaded = g_o.import_op_card("debug_op_two.yaml") + op = oc.generate([100], name="debug_op") + oc.dump("debug_op_two", op) + op_loaded = oc.load("debug_op.yaml") + op_two_loaded = oc.load("debug_op_two.yaml") for key in op.keys(): assert op[key] == op_loaded[key] == op_two_loaded[key] diff --git a/tests/ekobox/test_theory_card.py b/tests/ekobox/test_theory_card.py new file mode 100644 index 000000000..7b1f55ead --- /dev/null +++ b/tests/ekobox/test_theory_card.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import pytest + +from ekobox import theory_card as tc + + +def test_generate_theory_card(): + theory = tc.generate(0, 1.0) + assert theory["PTO"] == 0 + assert theory["Q0"] == 1.0 + assert theory["mt"] == 173.07 + up_err = {"Prova": "Prova"} + with pytest.raises(ValueError): + theory = tc.generate(0, 1.0, update=up_err) + up = {"mb": 132.3, "PTO": 2} + theory = tc.generate(0, 1.0, update=up) + assert theory["PTO"] == 2 + assert theory["mb"] == 132.3 + + +def test_dump_load_theory_card(tmp_path, cd): + with cd(tmp_path): + theory = tc.generate(2, 12.3, name="debug_theory") + tc.dump("debug_theory_two", theory) + theory_loaded = tc.load("debug_theory.yaml") + theory_two_loaded = tc.load("debug_theory_two.yaml") + for key in theory.keys(): + assert theory[key] == theory_loaded[key] == theory_two_loaded[key] diff --git a/tests/ekobox/test_utils.py b/tests/ekobox/test_utils.py new file mode 100644 index 000000000..e773696bb --- /dev/null +++ b/tests/ekobox/test_utils.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +import numpy as np +import pytest + +import eko +from ekobox import operators_card as oc +from ekobox import theory_card as tc +from ekobox import utils + + +def test_ekos_product(): + # Generating two ekos + op1 = oc.generate( + [60.0, 80.0, 100.0], + update={ + "interpolation_xgrid": [0.1, 0.5, 1.0], + "interpolation_polynomial_degree": 1, + }, + ) + theory1 = tc.generate(0, 5.0) + + op2 = oc.generate( + [80.0, 100.0, 120.0], + update={ + "interpolation_xgrid": [0.1, 0.5, 1.0], + "interpolation_polynomial_degree": 1, + }, + ) + theory2 = tc.generate(0, 10.0) + theory_err = tc.generate(0, 5.0) + + eko_ini = eko.run_dglap(theory1, op1) + eko_fin = eko.run_dglap(theory2, op2) + # Test_error + eko_fin_err = eko.run_dglap(theory_err, op2) + with pytest.raises(ValueError): + _ = utils.ekos_product(eko_ini, eko_fin_err) + # product is copied + eko_res = utils.ekos_product(eko_ini, eko_fin, in_place=False) + + assert eko_res.Q02 == eko_ini.Q02 + np.testing.assert_allclose(eko_res.Q2grid[1:], eko_fin.Q2grid) + np.testing.assert_allclose(eko_ini[80.0].operator, eko_res[80.0].operator) + + # product overwrites initial + eko_res2 = utils.ekos_product(eko_ini, eko_fin) + + assert eko_res2.Q02 == eko_ini.Q02 + np.testing.assert_allclose(eko_res2.Q2grid[1:], eko_fin.Q2grid) + np.testing.assert_allclose(eko_res[80.0].operator, eko_res2[80.0].operator) diff --git a/tests/ekomark/test_apply.py b/tests/ekomark/test_apply.py index 8869d8201..46c6997c8 100644 --- a/tests/ekomark/test_apply.py +++ b/tests/ekomark/test_apply.py @@ -1,26 +1,24 @@ # -*- coding: utf-8 -*- import numpy as np -from eko import output from ekomark import apply class TestApply: - def test_apply(self, fake_output, fake_pdf): - q2_out = list(fake_output["Q2grid"].keys())[0] - # create object - o = output.Output(fake_output) + def test_apply(self, fake_legacy, fake_pdf): + o, fake_card = fake_legacy + q2_out = list(fake_card["Q2grid"].keys())[0] # fake pdfs pdf_grid = apply.apply_pdf(o, fake_pdf) - assert len(pdf_grid) == len(fake_output["Q2grid"]) + assert len(pdf_grid) == len(fake_card["Q2grid"]) pdfs = pdf_grid[q2_out]["pdfs"] - assert list(pdfs.keys()) == fake_output["targetpids"] - ref_pid1 = fake_output["Q2grid"][q2_out]["operators"][0, :, 1, :] @ np.ones( - len(fake_output["interpolation_xgrid"]) + assert list(pdfs.keys()) == o.rotations.targetpids + ref_pid1 = fake_card["Q2grid"][q2_out]["operator"][0, :, 1, :] @ np.ones( + len(o.rotations.xgrid) ) np.testing.assert_allclose(pdfs[0], ref_pid1) - ref_pid2 = fake_output["Q2grid"][q2_out]["operators"][1, :, 1, :] @ np.ones( - len(fake_output["interpolation_xgrid"]) + ref_pid2 = fake_card["Q2grid"][q2_out]["operator"][1, :, 1, :] @ np.ones( + len(o.rotations.xgrid) ) np.testing.assert_allclose(pdfs[1], ref_pid2) # rotate to target_grid @@ -28,27 +26,27 @@ def test_apply(self, fake_output, fake_pdf): pdf_grid = apply.apply_pdf(o, fake_pdf, target_grid) assert len(pdf_grid) == 1 pdfs = pdf_grid[q2_out]["pdfs"] - assert list(pdfs.keys()) == fake_output["targetpids"] + assert list(pdfs.keys()) == o.rotations.targetpids - def test_apply_flavor(self, fake_output, fake_pdf, monkeypatch): - q2_out = list(fake_output["Q2grid"].keys())[0] - # create object - o = output.Output(fake_output) + def test_apply_flavor(self, fake_legacy, fake_pdf, monkeypatch): + o, fake_card = fake_legacy + q2_out = list(fake_card["Q2grid"].keys())[0] # fake pdfs monkeypatch.setattr( "eko.basis_rotation.rotate_flavor_to_evolution", np.ones((2, 2)) ) monkeypatch.setattr( - "eko.basis_rotation.flavor_basis_pids", fake_output["targetpids"] + "eko.basis_rotation.flavor_basis_pids", + o.rotations.targetpids, ) fake_evol_basis = ("a", "b") monkeypatch.setattr("eko.basis_rotation.evol_basis", fake_evol_basis) pdf_grid = apply.apply_pdf(o, fake_pdf, rotate_to_evolution_basis=True) - assert len(pdf_grid) == len(fake_output["Q2grid"]) + assert len(pdf_grid) == len(fake_card["Q2grid"]) pdfs = pdf_grid[q2_out]["pdfs"] assert list(pdfs.keys()) == list(fake_evol_basis) ref_a = ( - fake_output["Q2grid"][q2_out]["operators"][0, :, 1, :] - + fake_output["Q2grid"][q2_out]["operators"][1, :, 1, :] - ) @ np.ones(len(fake_output["interpolation_xgrid"])) + fake_card["Q2grid"][q2_out]["operator"][0, :, 1, :] + + fake_card["Q2grid"][q2_out]["operator"][1, :, 1, :] + ) @ np.ones(len(o.rotations.xgrid)) np.testing.assert_allclose(pdfs["a"], ref_a)