diff --git a/benchmarks/CT14_bench.py b/benchmarks/CT14_bench.py index 31a1e87d3..716c604ba 100644 --- a/benchmarks/CT14_bench.py +++ b/benchmarks/CT14_bench.py @@ -34,7 +34,7 @@ class BenchmarkCT14(Runner): # Rotate to evolution basis rotate_to_evolution_basis = False - def benchmark_llo_NF3(self, Q0=5, Q2grid=(100,)): + def benchmark_llo_NF3(self, Q0=5, mugrid=(10,)): theory_card = base_theory.copy() theory_card.update( { @@ -47,7 +47,7 @@ def benchmark_llo_NF3(self, Q0=5, Q2grid=(100,)): "MaxNfAs": 3, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [ -6, -5, @@ -66,7 +66,7 @@ def benchmark_llo_NF3(self, Q0=5, Q2grid=(100,)): ] self.run([theory_card], [operator_card], ["CT14llo_NF3"]) - def benchmark_llo_NF4(self, Q0=5, Q2grid=(100,)): + def benchmark_llo_NF4(self, Q0=5, mugrid=(10,)): theory_card = base_theory.copy() theory_card.update( { @@ -79,7 +79,7 @@ def benchmark_llo_NF4(self, Q0=5, Q2grid=(100,)): "MaxNfAs": 4, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [ -6, -5, @@ -94,7 +94,7 @@ def benchmark_llo_NF4(self, Q0=5, Q2grid=(100,)): ] self.run([theory_card], [operator_card], ["CT14llo_NF4"]) - def benchmark_llo_NF6(self, Q0=10, Q2grid=(1e6,)): + def benchmark_llo_NF6(self, Q0=10, mugrid=(30,)): theory_card = base_theory.copy() theory_card.update( { @@ -107,7 +107,7 @@ def benchmark_llo_NF6(self, Q0=10, Q2grid=(1e6,)): "MaxNfAs": 6, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [22, "ph"] self.run([theory_card], [operator_card], ["CT14llo_NF6"]) diff --git a/benchmarks/CT18_bench.py b/benchmarks/CT18_bench.py index cc7c06e47..9777244f5 100644 --- a/benchmarks/CT18_bench.py +++ b/benchmarks/CT18_bench.py @@ -30,7 +30,7 @@ class BenchmarkCT18(Runner): # Rotate to evolution basis rotate_to_evolution_basis = True - def benchmark_nnlo(self, Q0=1.295, Q2grid=(1e4,)): + def benchmark_nnlo(self, Q0=1.295, mugrid=(1e4,)): theory_card = base_theory.copy() theory_card.update( { @@ -43,7 +43,7 @@ def benchmark_nnlo(self, Q0=1.295, Q2grid=(1e4,)): "MaxNfAs": 5, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [ -6, 6, @@ -54,7 +54,7 @@ def benchmark_nnlo(self, Q0=1.295, Q2grid=(1e4,)): ] self.run([theory_card], [operator_card], ["CT18NNLO"]) - def benchmark_nnlo_qed(self, Q0=1.295, Q2grid=(1e4,)): + def benchmark_nnlo_qed(self, Q0=1.295, mugrid=(1e4,)): theory_card = base_theory.copy() theory_card.update( { @@ -67,7 +67,7 @@ def benchmark_nnlo_qed(self, Q0=1.295, Q2grid=(1e4,)): "MaxNfAs": 5, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [ -6, 6, @@ -76,7 +76,7 @@ def benchmark_nnlo_qed(self, Q0=1.295, Q2grid=(1e4,)): ] self.run([theory_card], [operator_card], ["CT18qed"]) - def benchmark_znnlo(self, Q0=1.3, Q2grid=(1e4,)): + def benchmark_znnlo(self, Q0=1.3, mugrid=(1e4,)): theory_card = base_theory.copy() theory_card.update( { @@ -90,7 +90,7 @@ def benchmark_znnlo(self, Q0=1.3, Q2grid=(1e4,)): "mc": 1.4, } ) - operator_card = {"Q2grid": list(Q2grid)} + operator_card = {"mugrid": list(mugrid)} self.skip_pdfs = lambda _theory: [ -6, 6, diff --git a/benchmarks/DSSV_bench.py b/benchmarks/DSSV_bench.py index f10b0200d..3df7af356 100644 --- a/benchmarks/DSSV_bench.py +++ b/benchmarks/DSSV_bench.py @@ -3,7 +3,6 @@ Note that the PDF set is private, but can be obtained from the authors upon request. """ -import numpy as np from banana import register from eko import interpolation @@ -23,7 +22,7 @@ class BenchmarkDSSV(Runner): def skip_pdfs(self, _theory): return [-6, 6, 5, -5, 4, -4, 22, "ph", "T35", "V35", "T24", "V24", "T15", "V15"] - def benchmark(self, Q0=1.65, Q2grid=(100,)): + def benchmark(self, Q0=1.65, mugrid=(100,)): theory_card = { "Qref": 1.0, "mc": 1.4, @@ -42,7 +41,7 @@ def benchmark(self, Q0=1.65, Q2grid=(100,)): } operator_card = { - "Q2grid": list(Q2grid), + "mugrid": list(mugrid), "polarized": [True], "interpolation_xgrid": interpolation.lambertgrid(50, 1e-5), } @@ -50,7 +49,7 @@ def benchmark(self, Q0=1.65, Q2grid=(100,)): if __name__ == "__main__": - low2 = 5**2 - high2 = 30**2 + low = 5 + high = 30 dssv = BenchmarkDSSV() - dssv.benchmark(Q0=np.sqrt(low2), Q2grid=[high2]) + dssv.benchmark(Q0=low, mugrid=[high]) diff --git a/benchmarks/HERA20_bench.py b/benchmarks/HERA20_bench.py index 057a25954..ce7c6714b 100644 --- a/benchmarks/HERA20_bench.py +++ b/benchmarks/HERA20_bench.py @@ -35,7 +35,7 @@ class BenchmarkHERA20(Runner): # Rotate to evolution basis rotate_to_evolution_basis = True - def benchmark_nnlo(self, Q0=1.3, Q2grid=(1e4,)): + def benchmark_nnlo(self, Q0=1.3, mugrid=(100,)): theory_card = base_theory.copy() theory_card.update( { @@ -47,7 +47,7 @@ def benchmark_nnlo(self, Q0=1.3, Q2grid=(1e4,)): } ) operator_card = base_op.copy() - operator_card.update({"Q2grid": list(Q2grid)}) + operator_card.update({"mugrid": list(mugrid)}) self.skip_pdfs = lambda _theory: [ -6, 6, diff --git a/benchmarks/NNPDF_bench.py b/benchmarks/NNPDF_bench.py index 5a443d5c4..f21c29c1e 100644 --- a/benchmarks/NNPDF_bench.py +++ b/benchmarks/NNPDF_bench.py @@ -1,7 +1,6 @@ """ Benchmark NNPDF pdf family """ -import numpy as np from banana import register from eko import interpolation @@ -51,7 +50,7 @@ def skip_pdfs(self, _theory): class BenchmarkNNPDF31(BenchmarkNNPDF): """Benchmark NNPDF3.1""" - def benchmark_nlo(self, Q0=1.65, Q2grid=(100,)): + def benchmark_nlo(self, Q0=1.65, mugrid=(10,)): theory_card = { **base_theory, "PTO": 1, @@ -59,14 +58,14 @@ def benchmark_nlo(self, Q0=1.65, Q2grid=(100,)): "Q0": Q0, } - operator_card = {**base_operator, "Q2grid": list(Q2grid)} + operator_card = {**base_operator, "mugrid": list(mugrid)} self.run([theory_card], [operator_card], ["NNPDF31_nlo_as_0118"]) class BenchmarkNNPDF31_luxqed(BenchmarkNNPDF): """Benchmark NNPDF3.1_luxqed""" - def benchmark_nnlo(self, Q0=1.65, Q2grid=(100,)): + def benchmark_nnlo(self, Q0=1.65, mugrid=(10,)): theory_card = { **base_theory, "PTO": 2, @@ -84,7 +83,7 @@ def benchmark_nnlo(self, Q0=1.65, Q2grid=(100,)): operator_card = { **base_operator, - "Q2grid": list(Q2grid), + "mugrid": list(mugrid), "ev_op_iterations": 10, } self.run([theory_card], [operator_card], ["NNPDF31_nnlo_as_0118_luxqed"]) @@ -93,7 +92,7 @@ def benchmark_nnlo(self, Q0=1.65, Q2grid=(100,)): class BenchmarkNNPDF40(BenchmarkNNPDF): """Benchmark NNPDF4.0""" - def benchmark_nlo(self, Q0=1.65, Q2grid=(100,)): + def benchmark_nlo(self, Q0=1.65, mugrid=(10,)): theory_card = { **base_theory, "PTO": 1, @@ -101,10 +100,10 @@ def benchmark_nlo(self, Q0=1.65, Q2grid=(100,)): "Q0": Q0, } - operator_card = {**base_operator, "Q2grid": list(Q2grid)} + operator_card = {**base_operator, "mugrid": list(mugrid)} self.run([theory_card], [operator_card], ["NNPDF40_nlo_as_01180"]) - def benchmark_nnlo(self, Q0=1.65, Q2grid=(100,)): + def benchmark_nnlo(self, Q0=1.65, mugrid=(10,)): theory_card = { **base_theory, "PTO": 2, @@ -114,14 +113,14 @@ def benchmark_nnlo(self, Q0=1.65, Q2grid=(100,)): "Q0": Q0, } - operator_card = {**base_operator, "Q2grid": list(Q2grid)} + operator_card = {**base_operator, "mugrid": list(mugrid)} self.run([theory_card], [operator_card], ["NNPDF40_nnlo_as_01180"]) class BenchmarkNNPDFpol11(BenchmarkNNPDF): """Benchmark NNPDFpol11""" - def benchmark(self, Q0=1.65, Q2grid=(100,)): + def benchmark(self, Q0=1.65, mugrid=(10,)): theory_card = { "Qref": 91.2, "mc": 1.41421, @@ -140,7 +139,7 @@ def benchmark(self, Q0=1.65, Q2grid=(100,)): operator_card = { **base_operator, - "Q2grid": list(Q2grid), + "mugrid": list(mugrid), "polarized": True, "interpolation_xgrid": interpolation.lambertgrid(50, 1e-5), } @@ -148,17 +147,17 @@ def benchmark(self, Q0=1.65, Q2grid=(100,)): if __name__ == "__main__": - low2 = 5**2 - high2 = 30**2 + low = 5 + high = 30 # nn31 = BenchmarkNNPDF31() # # test forward - # nn31.benchmark_nlo(Q0=np.sqrt(low2), Q2grid=[10]) + # nn31.benchmark_nlo(Q0=low, mugrid=[10]) # # test backward - # #nn31.benchmark_nlo(Q0=np.sqrt(high2), Q2grid=[low2]) + # #nn31.benchmark_nlo(Q0=high, mugrid=[low]) nn40 = BenchmarkNNPDF40() - nn40.benchmark_nnlo(Q2grid=[100]) - # nn40.benchmark_nnlo(Q0=np.sqrt(high2), Q2grid=[low2]) + nn40.benchmark_nnlo(mugrid=[10]) + # nn40.benchmark_nnlo(Q0=high, mugrid=[low]) # nnpol = BenchmarkNNPDFpol11() - # nnpol.benchmark(Q0=np.sqrt(low2), Q2grid=[high2]) + # nnpol.benchmark(Q0=low, mugrid=[high]) # obj = BenchmarkNNPDF31_luxqed() # obj.benchmark_nnlo(Q0=5.0) diff --git a/benchmarks/apfel_bench.py b/benchmarks/apfel_bench.py index a04838cfc..0f9524f90 100644 --- a/benchmarks/apfel_bench.py +++ b/benchmarks/apfel_bench.py @@ -128,7 +128,7 @@ def benchmark_msbar(self, pto): "HQ": ["MSBAR"], } ) - self.run(cartesian_product(th), operators.build({"Q2grid": [[100]]}), ["ToyLH"]) + self.run(cartesian_product(th), operators.build({"mugrid": [[10]]}), ["ToyLH"]) class BenchmarkFFNS(ApfelBenchmark): diff --git a/benchmarks/conftest.py b/benchmarks/conftest.py index b38da1506..f70a42d16 100644 --- a/benchmarks/conftest.py +++ b/benchmarks/conftest.py @@ -25,7 +25,7 @@ def set_(flavors: int) -> TheoryCard: @pytest.fixture def operator_card(): card = cards.example.operator() - card.rotations.xgrid = interpolation.XGrid([0.1, 0.3, 0.5, 1.0]) + card.xgrid = interpolation.XGrid([0.1, 0.3, 0.5, 1.0]) card.configs.interpolation_polynomial_degree = 2 return card diff --git a/benchmarks/eko/benchmark_evol_to_unity.py b/benchmarks/eko/benchmark_evol_to_unity.py index 978d11c62..a67dcb6ac 100644 --- a/benchmarks/eko/benchmark_evol_to_unity.py +++ b/benchmarks/eko/benchmark_evol_to_unity.py @@ -1,5 +1,4 @@ import pathlib -from math import nan import numpy as np import pytest @@ -10,25 +9,27 @@ from eko.io import types from eko.io.runcards import OperatorCard, TheoryCard from eko.runner.legacy import Runner +from eko.quantities.couplings import CouplingsInfo # from ekore.matching_conditions.operator_matrix_element import OperatorMatrixElement def update_cards(theory: TheoryCard, operator: OperatorCard): - theory.couplings = types.CouplingsRef( - alphas=types.FloatRef(value=0.35, scale=float(np.sqrt(2))), - alphaem=types.FloatRef(value=0.007496, scale=nan), + theory.couplings = CouplingsInfo( + alphas=0.35, + alphaem=0.007496, + scale=float(np.sqrt(2)), max_num_flavs=6, num_flavs_ref=None, ) - theory.num_flavs_init = 4 - theory.intrinsic_flavors = [4, 5] - theory.quark_masses.c.value = 1.0 - theory.quark_masses.b.value = 4.75 - theory.quark_masses.t.value = 173.0 + theory.heavy.num_flavs_init = 4 + theory.heavy.intrinsic_flavors = [4, 5] + theory.heavy.masses.c.value = 1.0 + theory.heavy.masses.b.value = 4.75 + theory.heavy.masses.t.value = 173.0 operator.mu0 = float(np.sqrt(2)) - operator.mu2grid = [10] - operator.rotations.xgrid = XGrid(np.linspace(1e-1, 1, 30)) + operator.mugrid = [(10, 5)] + operator.xgrid = XGrid(np.linspace(1e-1, 1, 30)) operator.configs.interpolation_polynomial_degree = 1 operator.configs.ev_op_max_order = (2, 0) operator.configs.ev_op_iterations = 1 diff --git a/benchmarks/eko/benchmark_msbar_evolution.py b/benchmarks/eko/benchmark_msbar_evolution.py index 3ffbe5e22..26b8873af 100644 --- a/benchmarks/eko/benchmark_msbar_evolution.py +++ b/benchmarks/eko/benchmark_msbar_evolution.py @@ -6,6 +6,8 @@ from eko.couplings import Couplings, couplings_mod_ev from eko.io import types from eko.io.runcards import OperatorCard, TheoryCard +from eko.quantities.couplings import CouplingEvolutionMethod +from eko.quantities.heavy_quarks import QuarkMassRef, QuarkMassScheme # try to load APFEL - if not available, we'll use the cached values try: @@ -18,13 +20,13 @@ def update_theory(theory: TheoryCard): theory.order = (3, 0) - theory.couplings.alphas.scale = 91 - theory.couplings.alphaem.value = 0.007496 + theory.couplings.scale = 91 + theory.couplings.alphaem = 0.007496 theory.couplings.num_flavs_ref = 5 - theory.quark_masses_scheme = types.QuarkMassSchemes.MSBAR - theory.quark_masses.c = types.QuarkMassRef(value=1.5, scale=18) - theory.quark_masses.b = types.QuarkMassRef(value=4.1, scale=20) - theory.quark_masses.t = types.QuarkMassRef(value=175.0, scale=175.0) + theory.heavy.masses_scheme = QuarkMassScheme.MSBAR + theory.heavy.masses.c = QuarkMassRef([1.5, 18]) + theory.heavy.masses.b = QuarkMassRef([4.1, 20]) + theory.heavy.masses.t = QuarkMassRef([175.0, 175.0]) @pytest.mark.isolated @@ -34,10 +36,10 @@ def benchmark_APFEL_msbar_evolution( ): update_theory(theory_card) bench_values = dict(zip(np.power([1, 96, 150], 2), [3, 5, 5])) - theory_card.quark_masses.c = types.QuarkMassRef(value=1.4, scale=2.0) - theory_card.quark_masses.b = types.QuarkMassRef(value=4.5, scale=4.5) + theory_card.heavy.masses.c = QuarkMassRef([1.4, 2.0]) + theory_card.heavy.masses.b = QuarkMassRef([4.5, 4.5]) coupl = theory_card.couplings - qmasses = theory_card.quark_masses + qmasses = theory_card.heavy.masses apfel_vals_dict = { "exact": { @@ -95,9 +97,9 @@ def benchmark_APFEL_msbar_evolution( else types.EvolutionMethod.ITERATE_EXACT ) couplevmeth = ( - types.CouplingEvolutionMethod.EXPANDED + CouplingEvolutionMethod.EXPANDED if method == "expanded" - else types.CouplingEvolutionMethod.EXACT + else CouplingEvolutionMethod.EXACT ) for order in [1, 2, 3]: theory_card.order = (order, 0) @@ -105,16 +107,20 @@ def benchmark_APFEL_msbar_evolution( couplings=theory_card.couplings, order=theory_card.order, masses=msbar_masses.compute( - theory_card.quark_masses, + theory_card.heavy.masses, couplings=theory_card.couplings, order=theory_card.order, evmeth=couplevmeth, - matching=np.power(list(iter(theory_card.matching)), 2.0), + matching=np.power( + list(iter(theory_card.heavy.matching_ratios)), 2.0 + ), xif2=theory_card.xif**2, ).tolist(), - thresholds_ratios=np.power(list(iter(theory_card.matching)), 2.0), + thresholds_ratios=np.power( + list(iter(theory_card.heavy.matching_ratios)), 2.0 + ), method=couplevmeth, - hqm_scheme=theory_card.quark_masses_scheme, + hqm_scheme=theory_card.heavy.masses_scheme, ) my_vals = [] for Q2, nf_to in bench_values.items(): @@ -140,7 +146,7 @@ def benchmark_APFEL_msbar_evolution( apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(order - 1) apfel.SetAlphaEvolution(method) - apfel.SetAlphaQCDRef(coupl.alphas.value, coupl.alphas.scale) + apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale) apfel.SetVFNS() apfel.SetMSbarMasses( qmasses.c.value, qmasses.b.value, qmasses.t.value @@ -187,15 +193,15 @@ def benchmark_APFEL_msbar_solution( theory = theory_card operator = operator_card coupl = theory_card.couplings - qmasses = theory_card.quark_masses + qmasses = theory_card.heavy.masses for order in [1, 2, 3]: theory.order = (order, 0) my_masses = msbar_masses.compute( - theory.quark_masses, + theory.heavy.masses, couplings=theory_card.couplings, order=theory_card.order, - evmeth=types.CouplingEvolutionMethod.EXACT, - matching=np.power(list(iter(theory_card.matching)), 2.0), + evmeth=CouplingEvolutionMethod.EXACT, + matching=np.power(list(iter(theory_card.heavy.matching_ratios)), 2.0), xif2=theory_card.xif**2, ) # get APFEL numbers - if available else use cache @@ -208,7 +214,7 @@ def benchmark_APFEL_msbar_solution( apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(order - 1) apfel.SetAlphaEvolution("exact") - apfel.SetAlphaQCDRef(coupl.alphas.value, coupl.alphas.scale) + apfel.SetAlphaQCDRef(coupl.alphas, coupl.scale) apfel.SetVFNS() apfel.SetMSbarMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value) apfel.SetMassScaleReference( @@ -230,26 +236,26 @@ def benchmark_msbar_solution_kthr(self, theory_card: TheoryCard): """ update_theory(theory_card) theory_card.order = (1, 0) - theory_card.matching.c = 1.2 - theory_card.matching.b = 1.8 - theory_card.matching.t = 1.0 + theory_card.heavy.matching_ratios.c = 1.2 + theory_card.heavy.matching_ratios.b = 1.8 + theory_card.heavy.matching_ratios.t = 1.0 my_masses_thr = msbar_masses.compute( - theory_card.quark_masses, + theory_card.heavy.masses, couplings=theory_card.couplings, order=theory_card.order, - evmeth=types.CouplingEvolutionMethod.EXACT, - matching=np.power(list(iter(theory_card.matching)), 2.0), + evmeth=CouplingEvolutionMethod.EXACT, + matching=np.power(list(iter(theory_card.heavy.matching_ratios)), 2.0), xif2=theory_card.xif**2, ) apfel_masses_thr = [1.9891, 4.5102, 175.0000] - theory_card.matching.c = 1.0 - theory_card.matching.b = 1.0 + theory_card.heavy.matching_ratios.c = 1.0 + theory_card.heavy.matching_ratios.b = 1.0 my_masses_plain = msbar_masses.compute( - theory_card.quark_masses, + theory_card.heavy.masses, couplings=theory_card.couplings, order=theory_card.order, - evmeth=types.CouplingEvolutionMethod.EXACT, - matching=np.power(list(iter(theory_card.matching)), 2.0), + evmeth=CouplingEvolutionMethod.EXACT, + matching=np.power(list(iter(theory_card.heavy.matching_ratios)), 2.0), xif2=theory_card.xif**2, ) diff --git a/benchmarks/eko/benchmark_strong_coupling.py b/benchmarks/eko/benchmark_strong_coupling.py index 85dd1341b..44e9c72fe 100644 --- a/benchmarks/eko/benchmark_strong_coupling.py +++ b/benchmarks/eko/benchmark_strong_coupling.py @@ -1,5 +1,4 @@ """This module benchmarks alpha_s against LHAPDF and APFEL.""" -from math import nan import numpy as np import pytest @@ -7,8 +6,9 @@ from eko import thresholds from eko.beta import beta_qcd from eko.couplings import Couplings -from eko.io import types from eko.io.runcards import TheoryCard +from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo +from eko.quantities.heavy_quarks import QuarkMassScheme # try to load LHAPDF - if not available, we'll use the cached values try: @@ -38,10 +38,11 @@ def ref_couplings( ref_values, ref_scale: float, -) -> types.CouplingsRef: - return types.CouplingsRef( - alphas=types.FloatRef(value=ref_values[0], scale=ref_scale), - alphaem=types.FloatRef(value=ref_values[1], scale=nan), +) -> CouplingsInfo: + return CouplingsInfo( + alphas=ref_values[0], + alphaem=ref_values[1], + scale=ref_scale, max_num_flavs=6, num_flavs_ref=None, ) @@ -61,14 +62,14 @@ def test_a_s(self): as_FFNS_LO = Couplings( couplings=ref, order=(1, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=[0, 0, np.inf], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) # check local np.testing.assert_approx_equal( - as_FFNS_LO.a(ref_mu2)[0], ref.alphas.value / 4.0 / np.pi + as_FFNS_LO.a(ref_mu2)[0], ref.alphas / 4.0 / np.pi ) # check high result = as_FFNS_LO.a(ask_q2)[0] @@ -83,9 +84,9 @@ def benchmark_LHA_paper(self): as_FFNS_LO = Couplings( couplings=coupling_ref, order=(1, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=[0, np.inf, np.inf], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) me = as_FFNS_LO.a(1e4)[0] * 4 * np.pi @@ -96,9 +97,9 @@ def benchmark_LHA_paper(self): as_VFNS_LO = Couplings( couplings=coupling_ref, order=(1, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_list, - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) me = as_VFNS_LO.a(1e4)[0] * 4 * np.pi @@ -142,9 +143,9 @@ def benchmark_APFEL_ffns(self): as_FFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXPANDED, + method=CouplingEvolutionMethod.EXPANDED, masses=threshold_holder.area_walls[1:-1], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -217,9 +218,9 @@ def benchmark_pegasus_ffns(self): as_FFNS = Couplings( couplings=couplings, order=(order, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.area_walls[1:-1], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -293,9 +294,9 @@ def benchmark_APFEL_vfns(self): as_VFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXPANDED, + method=CouplingEvolutionMethod.EXPANDED, masses=threshold_list, - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -438,9 +439,9 @@ def benchmark_APFEL_vfns_fact_to_ren(self): as_VFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_list.tolist(), - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=np.array([1.0, 1.0, 1.0]) / fact_to_ren_lin**2, ) my_vals = [] @@ -488,9 +489,9 @@ def benchmark_APFEL_vfns_threshold(self): as_VFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXPANDED, + method=CouplingEvolutionMethod.EXPANDED, masses=threshold_list.tolist(), - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=thresholds_ratios, ) my_vals = [] @@ -539,10 +540,10 @@ def benchmark_APFEL_vfns_msbar(self): as_VFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXPANDED, + method=CouplingEvolutionMethod.EXPANDED, masses=m2s.tolist(), thresholds_ratios=[1.0, 1.0, 1.0], - hqm_scheme=types.QuarkMassSchemes.MSBAR, + hqm_scheme=QuarkMassScheme.MSBAR, ) my_vals = [] for Q2 in Q2s: @@ -587,9 +588,9 @@ def benchmark_lhapdf_ffns_lo(self): as_FFNS_LO = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(1, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.area_walls[1:-1], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -661,9 +662,9 @@ def benchmark_apfel_exact(self): sc = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.area_walls[1:-1], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -736,9 +737,9 @@ def benchmark_lhapdf_exact(self): sc = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_holder.area_walls[1:-1], - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -786,9 +787,9 @@ def benchmark_lhapdf_zmvfns_lo(self): as_VFNS_LO = Couplings( couplings=ref_couplings(coupling_ref, np.sqrt(scale_ref)), order=(1, 0), - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=threshold_list, - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) my_vals = [] @@ -834,27 +835,27 @@ def benchmark_lhapdf_zmvfns_lo(self): def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard): theory = theory_card theory.order = (3, 0) - theory.couplings.alphas.value = 0.35 - theory.couplings.alphas.scale = float(np.sqrt(2)) - theory.couplings.alphaem.value = 0.007496 + theory.couplings.alphas = 0.35 + theory.couplings.scale = float(np.sqrt(2)) + theory.couplings.alphaem = 0.007496 theory.couplings.num_flavs_ref = 4 - theory.num_flavs_init = 3 + theory.heavy.num_flavs_init = 3 theory.xif = np.sqrt(2.0) - theory.quark_masses.c.value = np.sqrt(2.0) - theory.quark_masses.b.value = 4.5 - theory.quark_masses.t.value = 175.0 - qmasses = theory.quark_masses + theory.heavy.masses.c.value = np.sqrt(2.0) + theory.heavy.masses.b.value = 4.5 + theory.heavy.masses.t.value = 175.0 + qmasses = theory.heavy.masses - masses = tuple(mq.value**2 for mq in theory.quark_masses) + masses = tuple(mq.value**2 for mq in theory.heavy.masses) mu2s = [2.0] sc = Couplings( couplings=theory.couplings, order=theory.order, - method=types.CouplingEvolutionMethod.EXACT, + method=CouplingEvolutionMethod.EXACT, masses=[m2 / theory.xif**2 for m2 in masses], - hqm_scheme=types.QuarkMassSchemes.POLE, - thresholds_ratios=np.power(list(iter(theory.matching)), 2.0), + hqm_scheme=QuarkMassScheme.POLE, + thresholds_ratios=np.power(list(iter(theory.heavy.matching_ratios)), 2.0), ) xif2 = theory.xif**2 for mu2 in mu2s: @@ -881,13 +882,13 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard): apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(theory.order[0] - 1) apfel.SetAlphaEvolution("exact") - apfel.SetAlphaQCDRef( - theory.couplings.alphas.value, theory.couplings.alphas.scale - ) + apfel.SetAlphaQCDRef(theory.couplings.alphas, theory.couplings.scale) apfel.SetVFNS() apfel.SetPoleMasses(qmasses.c.value, qmasses.b.value, qmasses.t.value) apfel.SetMassMatchingScales( - theory.matching.c, theory.matching.b, theory.matching.t + theory.heavy.matching_ratios.c, + theory.heavy.matching_ratios.b, + theory.heavy.matching_ratios.t, ) apfel.SetRenFacRatio(1.0 / theory.xif) apfel.InitializeAPFEL() diff --git a/benchmarks/ekobox/benchmark_evol_pdf.py b/benchmarks/ekobox/benchmark_evol_pdf.py index a01af69d1..3a9dc0f4a 100644 --- a/benchmarks/ekobox/benchmark_evol_pdf.py +++ b/benchmarks/ekobox/benchmark_evol_pdf.py @@ -17,20 +17,20 @@ def benchmark_evolve_single_member( tmp_path, cd, lhapdf_path, theory_card: TheoryCard, operator_card: OperatorCard ): - mu2grid = [20.0, 100.0, 10000.0] + mugrid = [(3.0, 4), (10.0, 5), (100.0, 5)] theory = theory_card theory.order = (1, 0) - theory.couplings.alphas.value = 0.118000 - theory.couplings.alphas.scale = 91.1876 - theory.couplings.alphaem.value = 0.007496 + theory.couplings.alphas = 0.118000 + theory.couplings.scale = 91.1876 + theory.couplings.alphaem = 0.007496 theory.couplings.max_num_flavs = 3 - theory.num_flavs_max_pdf = 3 - theory.quark_masses.c.value = 1.3 - theory.quark_masses.b.value = 4.75 - theory.quark_masses.t.value = 172 + theory.heavy.num_flavs_max_pdf = 3 + theory.heavy.masses.c.value = 1.3 + theory.heavy.masses.b.value = 4.75 + theory.heavy.masses.t.value = 172 op = operator_card op.mu0 = 5.0 - op.mu2grid = mu2grid + op.mugrid = mugrid # lhapdf import (maybe i have to dump with a x*), do plots) with lhapdf_path(test_pdf): pdf = lhapdf.mkPDF("myCT14llo_NF3", 0) @@ -47,24 +47,24 @@ def benchmark_evolve_single_member( all_blocks = (load.load_blocks_from_file("EvPDF", 0))[1] info = load.load_info_from_file("EvPDF") ev_pdf = lhapdf.mkPDF("EvPDF", 0) - assert info["XMin"] == op.rotations.xgrid.raw[0] + assert info["XMin"] == op.xgrid.raw[0] assert info["SetDesc"] == "MyEvolvedPDF" - assert info["MZ"] == theory.couplings.alphas.scale + assert info["MZ"] == theory.couplings.scale assert info["Debug"] == "Debug" - xgrid = op.rotations.xgrid.raw - for Q2 in [20.0, 100.0, 10000.0]: + xgrid = op.xgrid.raw + for idx, mu2 in enumerate(op.mu2grid): for x in xgrid[10:40]: for pid in [21, 1, -1, 2, -2, 3, -3]: np.testing.assert_allclose( - pdf.xfxQ2(pid, x, Q2), - all_blocks[0]["data"][ - mu2grid.index(Q2) + xgrid.tolist().index(x) * len(mu2grid) - ][br.flavor_basis_pids.index(pid)], + pdf.xfxQ2(pid, x, mu2), + all_blocks[0]["data"][idx + xgrid.tolist().index(x) * len(mugrid)][ + br.flavor_basis_pids.index(pid) + ], rtol=1e-2, ) np.testing.assert_allclose( - pdf.xfxQ2(pid, x, Q2), - ev_pdf.xfxQ2(pid, x, Q2), + pdf.xfxQ2(pid, x, mu2), + ev_pdf.xfxQ2(pid, x, mu2), rtol=1e-2, ) @@ -77,8 +77,8 @@ def benchmark_evolve_more_members( theory.order = (1, 0) op = operator_card op.mu0 = 1.0 - op.mu2grid = [10, 100] - op.rotations.xgrid = XGrid([1e-7, 0.01, 0.1, 0.2, 0.3]) + op.mugrid = [(10.0, 5), (100.0, 5)] + op.xgrid = XGrid([1e-7, 0.01, 0.1, 0.2, 0.3]) with lhapdf_path(test_pdf): pdfs = lhapdf.mkPDFs("myMSTW2008nlo90cl") d = tmp_path / "sub" @@ -98,9 +98,9 @@ def benchmark_evolve_more_members( new_pdf_1 = lhapdf.mkPDF("Debug", 0) new_pdf_2 = lhapdf.mkPDF("Debug", 1) info = load.load_info_from_file("Debug") - assert info["XMin"] == op.rotations.xgrid.raw[0] + assert info["XMin"] == op.xgrid.raw[0] assert len(pdfs) == len(new_pdfs) - for Q2 in [10, 100]: + for mu2 in [10, 100]: for x in [1e-7, 0.01, 0.1, 0.2, 0.3]: for pid in [21, 1, 2]: - assert new_pdf_1.xfxQ2(pid, x, Q2) != new_pdf_2.xfxQ2(pid, x, Q2) + assert new_pdf_1.xfxQ2(pid, x, mu2) != new_pdf_2.xfxQ2(pid, x, mu2) diff --git a/benchmarks/ekobox/genpdf/benchmark_export.py b/benchmarks/ekobox/genpdf/benchmark_export.py index 1a88a77ba..716767b21 100644 --- a/benchmarks/ekobox/genpdf/benchmark_export.py +++ b/benchmarks/ekobox/genpdf/benchmark_export.py @@ -55,11 +55,11 @@ def benchmark_dump_blocks(tmp_path, cd): np.testing.assert_allclose(v, blocks2[0][k]) _pdf = lhapdf.mkPDF("new_pdf", 0) for x in new_blocks[0]["xgrid"][1:-1]: - for q2 in new_blocks[0]["Q2grid"]: + for mu2 in new_blocks[0]["mu2grid"]: data_from_block = new_blocks[0]["data"][ - new_blocks[0]["Q2grid"].index(q2) - + len(new_blocks[0]["Q2grid"]) + new_blocks[0]["mu2grid"].index(mu2) + + len(new_blocks[0]["mu2grid"]) * list(new_blocks[0]["xgrid"]).index(x) ][6] - data_from_pdf = _pdf.xfxQ2(21, x, q2) + data_from_pdf = _pdf.xfxQ2(21, x, mu2) np.testing.assert_allclose(data_from_block, data_from_pdf) diff --git a/benchmarks/ekobox/genpdf/benchmark_load.py b/benchmarks/ekobox/genpdf/benchmark_load.py index dfcfd04eb..149a4553d 100644 --- a/benchmarks/ekobox/genpdf/benchmark_load.py +++ b/benchmarks/ekobox/genpdf/benchmark_load.py @@ -18,7 +18,7 @@ def benchmark_load_data_ct14(): assert len(blocks) == 1 b0 = blocks[0] assert isinstance(b0, dict) - assert sorted(b0.keys()) == sorted(["pids", "xgrid", "Q2grid", "data"]) + assert sorted(b0.keys()) == sorted(["pids", "xgrid", "mu2grid", "data"]) assert sorted(b0["pids"]) == sorted([-3, -2, -1, 21, 1, 2, 3]) assert len(b0["data"].T) == 7 np.testing.assert_allclose(b0["xgrid"][0], 1e-9) @@ -31,7 +31,7 @@ def benchmark_load_data_mstw(): assert len(blocks) == 3 b0 = blocks[0] assert isinstance(b0, dict) - assert sorted(b0.keys()) == sorted(["pids", "xgrid", "Q2grid", "data"]) + assert sorted(b0.keys()) == sorted(["pids", "xgrid", "mu2grid", "data"]) assert sorted(b0["pids"]) == sorted([-5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5]) np.testing.assert_allclose(b0["xgrid"][0], 1e-6) diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py index c12fdca9d..821908689 100644 --- a/benchmarks/lha_paper_bench.py +++ b/benchmarks/lha_paper_bench.py @@ -5,11 +5,9 @@ import numpy as np from banana import register -from banana.data import cartesian_product from eko.interpolation import lambertgrid from ekomark.benchmark.runner import Runner -from ekomark.data import operators register(__file__) @@ -115,7 +113,7 @@ def run_lha(self, theory_updates): theory_updates, [ { - "Q2grid": [1e4], + "mugrid": [100], "ev_op_iterations": 10, "interpolation_xgrid": lambertgrid(60).tolist(), } @@ -235,7 +233,7 @@ def run_lha(self, theory_updates): theory_updates, [ { - "Q2grid": [1e4], + "mugrid": [100], "ev_op_iterations": 10, "interpolation_xgrid": lambertgrid(60).tolist(), "polarized": True, diff --git a/benchmarks/sandbox.py b/benchmarks/sandbox.py index a2f223565..e75b2801f 100644 --- a/benchmarks/sandbox.py +++ b/benchmarks/sandbox.py @@ -58,7 +58,7 @@ def generate_operators(): ops = { "ev_op_iterations": [1], # "ev_op_max_order": [20], - "Q2grid": [[100]], + "mugrid": [[10]], # "debug_skip_singlet": [True], } return ops @@ -140,7 +140,7 @@ def lha(self): ] self.run( [theory_updates], - [{"Q2grid": [1e4], "debug_skip_singlet": True}], + [{"mugrid": [100], "debug_skip_singlet": True}], ["ToyLH"], ) diff --git a/doc/source/development/ekomark_runners.rst b/doc/source/development/ekomark_runners.rst index 45447c378..6a6572b5e 100644 --- a/doc/source/development/ekomark_runners.rst +++ b/doc/source/development/ekomark_runners.rst @@ -66,7 +66,7 @@ The minimal setup of the input cards must contain: * - ``interpolation_is_log`` - :py:obj:`bool` - use logarithmic interpolation? - * - ``Q2grid`` + * - ``mu2grid`` - :py:obj:`dict` - all operators at the requested values of :math:`Q^2` represented by the key diff --git a/doc/source/overview/tutorials/alpha_s.ipynb b/doc/source/overview/tutorials/alpha_s.ipynb index ed949951f..4752b596d 100644 --- a/doc/source/overview/tutorials/alpha_s.ipynb +++ b/doc/source/overview/tutorials/alpha_s.ipynb @@ -31,13 +31,16 @@ "cell_type": "code", "execution_count": 1, "id": "2cdb5e10-4c68-4cf6-b15d-f464cafae04f", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "import math\n", "import numpy as np\n", "from eko.couplings import Couplings\n", "from eko.io import types as ekotypes\n", + "from eko.quantities.heavy_quarks import MatchingScales, QuarkMassScheme\n", "\n", "# set the (alpha_s, alpha_em) reference values\n", "alphas_ref = ekotypes.FloatRef(value=0.118, scale=91.0)\n", @@ -58,7 +61,7 @@ " order,\n", " ekotypes.CouplingEvolutionMethod.EXACT,\n", " heavy_quark_masses,\n", - " ekotypes.QuarkMassSchemes.POLE,\n", + " QuarkMassScheme.POLE,\n", " thresholds_ratios,\n", ")" ] @@ -110,9 +113,9 @@ "hash": "0a84ba3ac8703c04e87bc503a7d00188dfd591ad56130da93c406115a1e4a408" }, "kernelspec": { - "display_name": "eko-KkPVjVhh-py3.10", + "display_name": "eko-VHUucTmo-py3.10", "language": "python", - "name": "eko-kkpvjvhh-py3.10" + "name": "eko-vhuuctmo-py3.10" }, "language_info": { "codemirror_mode": { @@ -124,7 +127,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.10.7" } }, "nbformat": 4, diff --git a/doc/source/overview/tutorials/pdf.ipynb b/doc/source/overview/tutorials/pdf.ipynb index e7cddf4a5..c46408d31 100644 --- a/doc/source/overview/tutorials/pdf.ipynb +++ b/doc/source/overview/tutorials/pdf.ipynb @@ -179,7 +179,7 @@ "metadata": {}, "source": [ "Now, we set the theory inputs: in this example we will evolve our toy PDF at LO and create a new LHAPDF object with\n", - "a size two `Q2grid`." + "a size two `mu2grid`." ] }, { diff --git a/extras/matching/check-matching.py b/extras/matching/check-matching.py index c081f10ef..e080883cb 100644 --- a/extras/matching/check-matching.py +++ b/extras/matching/check-matching.py @@ -73,7 +73,7 @@ "interpolation_polynomial_degree": 4, "debug_skip_non_singlet": False, "ev_op_max_order": 10, - "Q2grid": np.geomspace(4.0, 100.0, 3), + "mugrid": [(2.0, 4), (5.0, 5), (10, 5)], "mtime": None, "interpolation_is_log": "1", "xgrid": None, @@ -124,7 +124,8 @@ def collect_data(): with eko.EKO.open(f"./eko_{id}.tar") as evolution_operator: x = evolution_operator.metadata.rotations.targetgrid.raw data[id] = { - q2: el["pdfs"] for q2, el in apply_pdf(evolution_operator, pdf).items() + mu2: el["pdfs"] + for mu2, el in apply_pdf(evolution_operator, pdf).items() } return data @@ -136,9 +137,9 @@ def collect_data(): def plot(data, plots, fn, title): ptos = data.keys() - Q2s = plots["Q2s"] - major_q2s = (plots["ks"] * plots["scale"]) ** 2 - minor_q2s = utils.log_minor_ticks(np.min(Q2s), np.max(Q2s), majors=major_q2s) + mu2s = plots["mu2s"] + major_mu2s = (plots["ks"] * plots["scale"]) ** 2 + minor_mu2s = utils.log_minor_ticks(np.min(mu2s), np.max(mu2s), majors=major_mu2s) fig = plt.figure(figsize=(10, 5)) fig.suptitle(title) @@ -149,11 +150,11 @@ def plot(data, plots, fn, title): ax0plots = {} for pto, hatch, color in zip(ptos, hatches, colors): p = {} - p["line"] = ax0.semilogx(Q2s, data[pto][1], color=color) - # ax.plot(Q2s, data[pto][0],"x") - # ax.plot(Q2s, data[pto][2],"v") + p["line"] = ax0.semilogx(mu2s, data[pto][1], color=color) + # ax.plot(mu2s, data[pto][0],"x") + # ax.plot(mu2s, data[pto][2],"v") p["fill"] = ax0.fill_between( - Q2s, + mu2s, data[pto][0], data[pto][2], facecolor=clr.to_rgba(color, alpha=0.1), @@ -163,9 +164,9 @@ def plot(data, plots, fn, title): ) ax0plots[pto] = p - ax0.set_xlim([np.min(Q2s), np.max(Q2s)]) - ax0.xaxis.set_ticks(major_q2s) - ax0.xaxis.set_ticks(minor_q2s, minor=True) + ax0.set_xlim([np.min(mu2s), np.max(mu2s)]) + ax0.xaxis.set_ticks(major_mu2s) + ax0.xaxis.set_ticks(minor_mu2s, minor=True) ax0.xaxis.set_ticklabels(("$m_b^2/4$", "$m_b^2$", "$4 m_b^2$")) ax0.tick_params(which="minor", bottom=True, labelbottom=False) ax0.grid(which="minor", axis="both", color=".9", linewidth=0.6, linestyle="--") @@ -178,9 +179,9 @@ def plot(data, plots, fn, title): ax = fig.add_subplot(gs[i, 1]) ptoaxes.append(ax) - ax.plot(Q2s, data[pto][1] / data[pto][1] - 1.0, color=color) + ax.plot(mu2s, data[pto][1] / data[pto][1] - 1.0, color=color) ax.fill_between( - Q2s, + mu2s, data[pto][0] / data[pto][1] - 1.0, data[pto][2] / data[pto][1] - 1.0, facecolor=clr.to_rgba(color, alpha=0.1), @@ -189,7 +190,7 @@ def plot(data, plots, fn, title): edgecolor=clr.to_rgba(color, alpha=0.4), ) ax.set_xscale("log") - ax.set_xlim([np.min(Q2s), np.max(Q2s)]) + ax.set_xlim([np.min(mu2s), np.max(mu2s)]) if "ylim" in plots["breakout"]: ax.set_ylim(plots["breakout"]["ylim"]) ax.yaxis.tick_right() @@ -201,8 +202,8 @@ def plot(data, plots, fn, title): right=True, labelbottom=False, ) - ax.xaxis.set_ticks(major_q2s) - ax.xaxis.set_ticks(minor_q2s, minor=True) + ax.xaxis.set_ticks(major_mu2s) + ax.xaxis.set_ticks(minor_mu2s, minor=True) ax.xaxis.set_ticklabels(("$m_b^2/4$", "$m_b^2$", "$4 m_b^2$")) ax.grid(which="minor", axis="both", color=".9", linewidth=0.6, linestyle="--") @@ -215,7 +216,7 @@ def plot(data, plots, fn, title): def dump(select_pid, pid_label: str, xidx: int): data = collect_data() x = o["interpolation_xgrid"][xidx] - q2s = list(data[0].keys()) + mu2s = list(data[0].keys()) scale = get_theory(0)["mb"] ks = np.array([get_theory(tid)["kbThr"] for tid in [1, 0, 2]]) baseline = np.array([select_pid(el)[xidx] for el in data[0].values()]) @@ -228,7 +229,7 @@ def dump(select_pid, pid_label: str, xidx: int): plot_data[pto] = lo plot_cfg = dict( ks=ks, - Q2s=q2s, + mu2s=mu2s, scale=scale, xlabel=r"$\mu^2$", ylabel=f"x{pid_label}(x={x:.3e})", diff --git a/src/eko/couplings.py b/src/eko/couplings.py index d14db74f8..9983952bb 100644 --- a/src/eko/couplings.py +++ b/src/eko/couplings.py @@ -9,24 +9,17 @@ """ import logging import warnings -from math import isnan from typing import List import numba as nb import numpy as np import scipy -from eko.io.types import ( - CouplingEvolutionMethod, - CouplingsRef, - EvolutionMethod, - MatchingScales, - Order, - QuarkMassSchemes, -) - from . import constants, thresholds from .beta import b_qcd, b_qed, beta_qcd, beta_qed +from .io.types import EvolutionMethod, Order +from .quantities.couplings import CouplingEvolutionMethod, CouplingsInfo +from .quantities.heavy_quarks import QuarkMassScheme logger = logging.getLogger(__name__) @@ -428,11 +421,11 @@ class Couplings: def __init__( self, - couplings: CouplingsRef, + couplings: CouplingsInfo, order: Order, method: CouplingEvolutionMethod, masses: List[float], - hqm_scheme: QuarkMassSchemes, + hqm_scheme: QuarkMassScheme, thresholds_ratios: List[float], ): # Sanity checks @@ -440,9 +433,9 @@ def assert_positive(name, var): if var <= 0: raise ValueError(f"{name} has to be positive - got: {var}") - assert_positive("alpha_s_ref", couplings.alphas.value) - assert_positive("alpha_em_ref", couplings.alphaem.value) - assert_positive("scale_ref", couplings.alphas.scale) + assert_positive("alpha_s_ref", couplings.alphas) + assert_positive("alpha_em_ref", couplings.alphaem) + assert_positive("scale_ref", couplings.scale) if order[0] not in [1, 2, 3, 4]: raise NotImplementedError("a_s beyond N3LO is not implemented") if order[1] not in [0, 1, 2]: @@ -455,14 +448,14 @@ def assert_positive(name, var): nf_ref = couplings.num_flavs_ref max_nf = couplings.max_num_flavs scheme_name = hqm_scheme.name - self.alphaem_running = False if isnan(couplings.alphaem.scale) else True + self.alphaem_running = couplings.em_running self.decoupled_running = False # create new threshold object self.a_ref = np.array(couplings.values) / 4.0 / np.pi # convert to a_s and a_em self.thresholds = thresholds.ThresholdsAtlas( masses, - couplings.alphas.scale**2, + couplings.scale**2, nf_ref, thresholds_ratios=thresholds_ratios, max_nf=max_nf, @@ -477,7 +470,8 @@ def assert_positive(name, var): ) if self.order[1] > 0: logger.info( - "Electromagnetic Coupling: a_em(µ_R^2=%f)%s=%f=%f/(4π)\nalphaem running: %r\ndecoupled running: %r", + "Electromagnetic Coupling: a_em(µ_R^2=%f)%s=%f=%f/(4π)\nalphaem" + " running: %r\ndecoupled running: %r", self.q2_ref, f"^(nf={nf_ref})" if nf_ref else "", self.a_ref[1], @@ -783,7 +777,8 @@ def a( def a_s(self, scale_to, fact_scale=None, nf_to=None): r"""Compute strong coupling. - The strong oupling uses the normalization :math:`a_s(\mu_R^2) = \frac{\alpha_s(\mu_R^2)}{4\pi}`. + The strong oupling uses the normalization :math:`a_s(\mu_R^2) = + \frac{\alpha_s(\mu_R^2)}{4\pi}`. Parameters ---------- @@ -798,13 +793,15 @@ def a_s(self, scale_to, fact_scale=None, nf_to=None): ------- a_s : float couplings :math:`a_s(\mu_R^2) = \frac{\alpha_s(\mu_R^2)}{4\pi}` + """ return self.a(scale_to, fact_scale, nf_to)[0] def a_em(self, scale_to, fact_scale=None, nf_to=None): r"""Compute electromagnetic coupling. - The electromagnetic oupling uses the normalization :math:`a_em(\mu_R^2) = \frac{\alpha_em(\mu_R^2)}{4\pi}`. + The electromagnetic oupling uses the normalization :math:`a_em(\mu_R^2) + = \frac{\alpha_em(\mu_R^2)}{4\pi}`. Parameters ---------- diff --git a/src/eko/io/dictlike.py b/src/eko/io/dictlike.py index 8abaf9eeb..f606e4e27 100644 --- a/src/eko/io/dictlike.py +++ b/src/eko/io/dictlike.py @@ -18,6 +18,7 @@ # classes are supposed to be dataclasses +@dataclasses.dataclass class DictLike: """Dictionary compatibility base class, for dataclasses. @@ -29,11 +30,8 @@ class DictLike: """ - def __init__(self, **kwargs): - """Empty initializer.""" - @classmethod - def from_dict(cls, dictionary): + def _from_dict(cls, dictionary): """Initialize dataclass object from raw dictionary. Parameters @@ -80,8 +78,18 @@ def from_dict(cls, dictionary): # finally construct the class, just passing the arguments by name return cls(**dictionary) - @property - def raw(self): + @classmethod + def from_dict(cls, dictionary): + """Deserialize, overwritable interface. + + The default implementation is just :meth:`DictLike._from_dict`, but it + can be safely overwritten (usually transforming the input before a call + to :meth:`DictLike._from_dict` itself). + + """ + return cls._from_dict(dictionary) + + def _raw(self): """Convert dataclass object to raw dictionary. Normalize: @@ -104,10 +112,35 @@ def raw(self): return dictionary + @property + def raw(self): + """Overwritable serialization. + + The default implementation is just :meth:`DictLike._raw`, but it can be + safely overwritten (usually starting from :meth:`DictLike._raw` + itself). + + """ + return self._raw() + + @property + def public_raw(self): + """Serialize only public attributes.""" + return {k: v for k, v in self._raw().items() if not k.startswith("_")} + def load_field(type_, value): """Deserialize dataclass field.""" # TODO: nice place for a match statement... + if type(type_) is typing.NewType: + return load_field(type_.__supertype__, value) + try: + # before py3.10 typing.NewType was just a function, so the check above + # would fail + return load_field(type_.__supertype__, value) + except AttributeError: + pass + if typing.get_origin(type_) is not None: # this has to go first since for followin ones I will assume they are # valid classes, cf. the module docstring @@ -164,6 +197,10 @@ def load_typing(type_, value): return None raise TypeError + if issubclass(origin, (list, typing.Generic)): + T = typing.get_args(type_)[0] + return origin([load_field(T, x) for x in value]) + return load_field(origin, value) @@ -186,5 +223,7 @@ def raw_field(value): if dataclasses.is_dataclass(value): # not supporting nested DictLike inside nested plain dataclasses return dataclasses.asdict(value) + if isinstance(value, list): + return [raw_field(el) for el in value] return value diff --git a/src/eko/io/legacy.py b/src/eko/io/legacy.py index 09531bbc6..58899eae4 100644 --- a/src/eko/io/legacy.py +++ b/src/eko/io/legacy.py @@ -13,9 +13,7 @@ import yaml from eko.interpolation import XGrid -from eko.io.runcards import Rotations -from .. import basis_rotation as br from . import raw from .dictlike import DictLike from .struct import EKO, Operator @@ -54,7 +52,10 @@ def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): # get actual grids arrays = load_arrays(innerdir) - grid = op5to4(metaold["Q2grid"], arrays) + op5 = metaold.get("Q2grid") + if op5 is None: + op5 = metaold["mu2grid"] + grid = op5to4(op5, arrays) with EKO.create(dest) as builder: # here I'm plainly ignoring the static analyzer, the types are faking @@ -98,36 +99,26 @@ class PseudoOperator(DictLike): mu20: float mu2grid: npt.NDArray - rotations: Rotations + xgrid: XGrid configs: dict @classmethod def from_old(cls, old: RawCard): """Load from old metadata.""" mu20 = float(old["q2_ref"]) - mu2grid = np.array(old["Q2grid"]) + mu2list = old.get("Q2grid") + if mu2list is None: + mu2list = old["mu2grid"] + mu2grid = np.array(mu2list) xgrid = XGrid(old["interpolation_xgrid"]) - pids = old.get("pids", np.array(br.flavor_basis_pids)) - - rotations = Rotations(xgrid=xgrid, pids=pids) - - def set_if_different(name: str, default: npt.NDArray): - basis = old.get(name) - if basis is not None and not np.allclose(basis, default): - setattr(rotations, name, basis) - - set_if_different("inputpids", pids) - set_if_different("targetpids", pids) - set_if_different("inputgrid", xgrid.raw) - set_if_different("targetgrid", xgrid.raw) configs = dict( interpolation_polynomial_degree=old.get("interpolation_polynomial_degree"), interpolation_is_log=old.get("interpolation_is_log"), ) - return cls(mu20=mu20, mu2grid=mu2grid, rotations=rotations, configs=configs) + return cls(mu20=mu20, mu2grid=mu2grid, xgrid=xgrid, configs=configs) ARRAY_SUFFIX = ".npy.lz4" diff --git a/src/eko/io/runcards.py b/src/eko/io/runcards.py index e0d66944b..d8afdfc32 100644 --- a/src/eko/io/runcards.py +++ b/src/eko/io/runcards.py @@ -5,30 +5,31 @@ Squares are consistenly taken inside. """ -from dataclasses import dataclass +from dataclasses import dataclass, fields from math import nan -from typing import Optional, Union +from typing import List, Optional, Union import numpy as np import numpy.typing as npt +from eko.thresholds import ThresholdsAtlas + from .. import basis_rotation as br -from .. import interpolation +from .. import interpolation, msbar_masses from .. import version as vmod +from ..couplings import couplings_mod_ev +from ..quantities import heavy_quarks as hq +from ..quantities.couplings import CouplingsInfo +from ..quantities.heavy_quarks import HeavyInfo, QuarkMassScheme from .dictlike import DictLike from .types import ( - CouplingsRef, EvolutionMethod, - FlavorsNumber, - HeavyQuarkMasses, - IntrinsicFlavors, InversionMethod, - MatchingScales, Order, - QuarkMassSchemes, RawCard, ScaleVariationsMethod, T, + Target, ) @@ -38,28 +39,31 @@ class TheoryCard(DictLike): order: Order """Perturbative order tuple, ``(QCD, QED)``.""" - couplings: CouplingsRef + couplings: CouplingsInfo """Couplings configuration.""" - num_flavs_init: Optional[FlavorsNumber] - r"""Number of active flavors at fitting scale. - - I.e. :math:`n_{f,\text{ref}}(\mu^2_0)`, formerly called ``nf0``. - - """ - num_flavs_max_pdf: FlavorsNumber - """Maximum number of quark PDFs.""" - intrinsic_flavors: IntrinsicFlavors - """List of intrinsic quark PDFs.""" - quark_masses: HeavyQuarkMasses - """List of heavy quark masses.""" - quark_masses_scheme: QuarkMassSchemes - """Scheme used to specify heavy quark masses.""" - matching: MatchingScales - """Matching scale of heavy quark masses""" + heavy: HeavyInfo + """Heavy quarks related information.""" xif: float """Ratio between factorization scale and process scale.""" +def masses(theory: TheoryCard, evmeth: EvolutionMethod): + """Compute masses in the chosen scheme.""" + if theory.heavy.masses_scheme is QuarkMassScheme.MSBAR: + return msbar_masses.compute( + theory.heavy.masses, + theory.couplings, + theory.order, + couplings_mod_ev(evmeth), + np.power(theory.heavy.matching_ratios, 2.0), + xif2=theory.xif**2, + ).tolist() + if theory.heavy.masses_scheme is QuarkMassScheme.POLE: + return [mq.value**2 for mq in theory.heavy.masses] + + raise ValueError(f"Unknown mass scheme '{theory.heavy.masses_scheme}'") + + @dataclass class Debug(DictLike): """Debug configurations.""" @@ -82,6 +86,10 @@ class Configs(DictLike): """ ev_op_iterations: int """Number of intervals in which to break the global path.""" + scvar_method: Optional[ScaleVariationsMethod] + """Scale variation method.""" + inversion_method: Optional[InversionMethod] + """Which method to use for backward matching conditions.""" interpolation_polynomial_degree: int """Degree of elements of the intepolation polynomial basis.""" interpolation_is_log: bool @@ -92,10 +100,6 @@ class Configs(DictLike): """If `true` do polarized evolution.""" time_like: bool """If `true` do time-like evolution.""" - scvar_method: Optional[ScaleVariationsMethod] - """""" - inversion_method: Optional[InversionMethod] - """Which method to use for backward matching conditions.""" n_integration_cores: int = 1 """Number of cores used to parallelize integration.""" @@ -113,9 +117,7 @@ class Rotations(DictLike): """ xgrid: interpolation.XGrid - """Momentum fraction internal grid.""" - pids: npt.NDArray - """Array of integers, corresponding to internal PIDs.""" + """Internal momentum fraction grid.""" _targetgrid: Optional[interpolation.XGrid] = None _inputgrid: Optional[interpolation.XGrid] = None _targetpids: Optional[npt.NDArray] = None @@ -132,6 +134,11 @@ def __post_init__(self): elif not isinstance(value, interpolation.XGrid): setattr(self, attr, interpolation.XGrid.load(value)) + @property + def pids(self): + """Internal flavor basis, used for computation.""" + return np.array(br.flavor_basis_pids) + @property def inputpids(self) -> npt.NDArray: """Provide pids expected on the input PDF.""" @@ -176,6 +183,33 @@ def targetgrid(self) -> interpolation.XGrid: def targetgrid(self, value: interpolation.XGrid): self._targetgrid = value + @classmethod + def from_dict(cls, dictionary: dict): + """Deserialize rotation. + + Load from full state, but with public names. + + """ + d = dictionary.copy() + for f in fields(cls): + if f.name.startswith("_"): + d[f.name] = d.pop(f.name[1:]) + return cls._from_dict(d) + + @property + def raw(self): + """Serialize rotation. + + Pass through interfaces, access internal values but with a public name. + + """ + d = self._raw() + for key in d.copy(): + if key.startswith("_"): + d[key[1:]] = d.pop(key) + + return d + @dataclass class OperatorCard(DictLike): @@ -183,25 +217,15 @@ class OperatorCard(DictLike): mu0: float """Initial scale.""" + mugrid: List[Target] + xgrid: interpolation.XGrid + """Momentum fraction internal grid.""" # collections configs: Configs """Specific configuration to be used during the calculation of these operators.""" debug: Debug """Debug configurations.""" - rotations: Rotations - """Rotations configurations. - - The operator card will only contain the interpolation xgrid and the pids. - - """ - - # TODO: drop legacy compatibility, only linear scales in runcards, such - # that we will always avoid taking square roots, and we are consistent with - # the other scales - _mugrid: Optional[npt.NDArray] = None - _mu2grid: Optional[npt.NDArray] = None - """Array of final scales.""" # optional eko_version: str = vmod.__version__ @@ -214,31 +238,14 @@ def mu20(self): return self.mu0**2 @property - def mugrid(self): - """Only setter enabled, access only to :attr:`mu2grid`.""" - raise ValueError("Use mu2grid") - - @mugrid.setter - def mugrid(self, value): - """Set scale grid with linear values.""" - self._mugrid = value - self._mu2grid = None - - @property - def mu2grid(self): + def mu2grid(self) -> npt.NDArray: """Grid of squared final scales.""" - if self._mugrid is not None: - return self._mugrid**2 - if self._mu2grid is not None: - return self._mu2grid - - raise RuntimeError("Mu2 grid has not been initialized") + return np.array([mu for mu, _ in self.mugrid]) ** 2 - @mu2grid.setter - def mu2grid(self, value): - """Set scale grid with quadratic values.""" - self._mugrid = None - self._mu2grid = value + @property + def pids(self): + """Internal flavor basis, used for computation.""" + return np.array(br.flavor_basis_pids) Card = Union[TheoryCard, OperatorCard] @@ -256,7 +263,11 @@ class Legacy: "EXP": "iterate-expanded", "TRN": "truncated", } - HEAVY = "cbt" + + @staticmethod + def heavies(pattern: str, old_th: dict): + """Retrieve a set of values for all heavy flavors.""" + return [old_th[pattern % q] for q in hq.FLAVORS] @staticmethod def fallback(*args: T, default: Optional[T] = None) -> T: @@ -275,9 +286,6 @@ def new_theory(self): old = self.theory new = {} - def heavies(pattern: str): - return {q: old[pattern % q] for q in self.HEAVY} - new["order"] = [old["PTO"] + 1, old["QED"]] alphaem = self.fallback(old.get("alphaqed"), old.get("alphaem"), default=0.0) if "QrefQED" not in old: @@ -293,18 +301,18 @@ def heavies(pattern: str): new["num_flavs_init"] = old["nf0"] new["num_flavs_max_pdf"] = old["MaxNfPdf"] intrinsic = [] - for idx, q in enumerate(self.HEAVY): + for idx, q in enumerate(hq.FLAVORS): if old.get(f"i{q}".upper()) == 1: intrinsic.append(idx + 4) new["intrinsic_flavors"] = intrinsic - new["matching"] = heavies("k%sThr") + new["matching"] = self.heavies("k%sThr", old) new["quark_masses_scheme"] = old["HQ"] - ms = heavies("m%s") - mus = heavies("Qm%s") + ms = self.heavies("m%s", old) + mus = self.heavies("Qm%s", old) if old["HQ"] == "POLE": - new["quark_masses"] = {q: (ms[q], nan) for q in self.HEAVY} + new["quark_masses"] = [[m, nan] for m in ms] elif old["HQ"] == "MSBAR": - new["quark_masses"] = {q: (ms[q], mus[q]) for q in self.HEAVY} + new["quark_masses"] = [[m, mu] for m, mu in zip(ms, mus)] else: raise ValueError() @@ -320,7 +328,16 @@ def new_operator(self): new = {} new["mu0"] = old_th["Q0"] - new["_mu2grid"] = old["Q2grid"] + if "mugrid" in old: + mugrid = old["mugrid"] + else: + mu2grid = old["Q2grid"] if "Q2grid" in old else old["mu2grid"] + mugrid = np.sqrt(mu2grid) + new["mugrid"] = flavored_mugrid( + mugrid, + list(self.heavies("m%s", old_th)), + list(self.heavies("k%sThr", old_th)), + ) new["configs"] = {} evmod = old_th["ModEv"] @@ -347,11 +364,7 @@ def new_operator(self): for k in ("debug_skip_non_singlet", "debug_skip_singlet"): new["debug"][k[lpref:]] = old[k] - new["rotations"] = {} - new["rotations"]["pids"] = old.get("pids", br.flavor_basis_pids) - new["rotations"]["xgrid"] = old["interpolation_xgrid"] - for basis in ("inputgrid", "targetgrid", "inputpids", "targetpids"): - new["rotations"][f"_{basis}"] = old[basis] + new["xgrid"] = old["interpolation_xgrid"] return OperatorCard.from_dict(new) @@ -376,3 +389,24 @@ def update(theory: Union[RawCard, TheoryCard], operator: Union[RawCard, Operator cards = Legacy(theory, operator) return cards.new_theory, cards.new_operator + + +def flavored_mugrid(mugrid: list, masses: list, matching_ratios: list): + r"""Upgrade :math:`\mu^2` grid to contain also target number flavors. + + It determines the number of flavors for the PDF set at the target scale, + inferring it according to the specified scales. + + This method should not be used to write new runcards, but rather to have a + consistent default for comparison with other softwares and existing PDF + sets. + There is no one-to-one relation between number of running flavors and final + scales, unless matchings are all applied. But this is a custom choice, + since it is possible to have PDFs in different |FNS| at the same scales. + + """ + tc = ThresholdsAtlas( + masses=(np.array(masses) ** 2).tolist(), + thresholds_ratios=(np.array(matching_ratios) ** 2).tolist(), + ) + return [(mu, tc.nf(mu**2)) for mu in mugrid] diff --git a/src/eko/io/struct.py b/src/eko/io/struct.py index af4128953..956fd3fc5 100644 --- a/src/eko/io/struct.py +++ b/src/eko/io/struct.py @@ -474,13 +474,7 @@ def path(self, value: pathlib.Path): @property def raw(self): """Override default :meth:`DictLike.raw` representation to exclude path.""" - raw = super().raw - - for key in raw.copy(): - if key.startswith("_"): - del raw[key] - - return raw + return self.public_raw @dataclass @@ -985,7 +979,7 @@ def raw(self) -> dict: operators themselves """ - return dict(Q2grid=self.mu2grid.tolist(), metadata=self.metadata.raw) + return dict(mu2grid=self.mu2grid.tolist(), metadata=self.metadata.raw) @dataclass @@ -1049,7 +1043,7 @@ def build(self) -> EKO: metadata = Metadata( _path=self.path, mu20=self.operator.mu20, - rotations=copy.deepcopy(self.operator.rotations), + rotations=Rotations(xgrid=self.operator.xgrid), ) InternalPaths(self.path).bootstrap( theory=self.theory.raw, diff --git a/src/eko/io/types.py b/src/eko/io/types.py index 1ac602fb8..e809b198b 100644 --- a/src/eko/io/types.py +++ b/src/eko/io/types.py @@ -1,142 +1,95 @@ -"""Common type definitions, only used for static analysis. - -Unfortunately, Python has still some discrepancies between runtime classes and -type hints, so it is better not to mix dataclasses and generics. - -E.g. before it was implemented:: - - @dataclasses.dataclass - class RunningReference(DictLike, Generic[Quantity]): - value: Quantity - scale: float - -but in this way it is not possible to determine that ``RunningReference`` is -subclassing ``DictLike``, indeed:: - - inspec.isclass(RunningReference) # False - issubclass(RunningReference, DictLike) # raise an error, since - # RunningReference is not a class - -Essentially classes can be used for type hints, but types are not all classes, -especially when they involve generics. - -For this reason I prefer the less elegant dynamic generation, that seems to -preserve type hints. - -""" -import dataclasses +"""Common type definitions, only used for static analysis.""" import enum import typing -from math import isnan -from typing import Any, Dict, Optional +from typing import Any, Dict, Generic, Tuple, TypeVar -from .dictlike import DictLike +# Energy scales +# ------------- -T = typing.TypeVar("T") +Scalar = float +Scale = float +LinearScale = Scale +SquaredScale = Scale +# TODO: replace with (requires py>=3.9) +# LinearScale = Annotated[Scale, 1] +# SquaredScale = Annotated[Scale, 2] -def reference_running(quantity: typing.Type[typing.Union[int, float]]): - """Generate running quantities reference point classes. - The motivation for dynamic generation is provided in module docstring. +# Flavors +# ------- - """ - - @dataclasses.dataclass - class ReferenceRunning(DictLike): - value: quantity - scale: float - - return ReferenceRunning - - -IntRef = reference_running(int) -FloatRef = reference_running(float) - -Order = typing.Tuple[int, int] +Order = Tuple[int, int] FlavorsNumber = int FlavorIndex = int IntrinsicFlavors = typing.List[FlavorIndex] +# Targets +# ------- -@dataclasses.dataclass -class CouplingsRef(DictLike): - """Reference values for coupling constants.""" - - alphas: FloatRef - alphaem: FloatRef - max_num_flavs: FlavorsNumber - num_flavs_ref: Optional[FlavorsNumber] - r"""Number of active flavors at strong coupling reference scale. - - I.e. :math:`n_{f,\text{ref}}(\mu^2_{\text{ref}})`, formerly called - ``nfref``. +Target = Tuple[LinearScale, FlavorsNumber] - """ - def __post_init__(self): - """Validate couplings. +# Scale functions +# --------------- - If they are both running, they have to be defined at the same scale. +T = TypeVar("T") - Usually :attr:`alphaem` is not running, thus its scale is set to nan. - """ - assert self.alphas.scale == self.alphaem.scale or isnan(self.alphaem.scale) +class ReferenceRunning(list, Generic[T]): + """Running quantities reference point. - @property - def values(self): - """Collect only couplings values.""" - return (self.alphas.value, self.alphaem.value) + To simplify serialization, the class is just storing the content as a list, + but: + - it is constructed with a ``Running.typed(T, Scale)`` signature + - it should always be used through the property accessors, rather then + using the list itself -def heavy_quark(quarkattr): - """Generate heavy quark properties container classes. + """ - The motivation for dynamic generation is provided in module docstring. + @classmethod + def typed(cls, value: T, scale: Scale): + """Define constructor from individual values. - """ + This is the preferred constructor for references, since respects the + intended types of the values. + It is not the default one only to simplify (de)serialization. - @dataclasses.dataclass - class HeavyQuarks(DictLike): - """Access heavy quarks attributes by name.""" + """ + return cls([value, scale]) - c: quarkattr - """Charm quark.""" - b: quarkattr - """Bottom quark.""" - t: quarkattr - """Top quark.""" + @property + def value(self) -> T: + """Reference value, given at a specified scale.""" + return self[0] - def __getitem__(self, key: int): - """Allow access by index. + @value.setter + def value(self, value: T): + self[0] = value - Consequently it allows iteration and containing check. + @property + def scale(self) -> Scale: + """Reference scale, at which the value of the function is given.""" + return self[1] - """ - return getattr(self, "cbt"[key]) + @scale.setter + def scale(self, value: Scale): + self[1] = value - return HeavyQuarks +FlavNumRef = ReferenceRunning[FlavorsNumber] +LinearScaleRef = ReferenceRunning[LinearScale] -QuarkMassRef = FloatRef -HeavyQuarkMasses = heavy_quark(QuarkMassRef) -MatchingScale = float -MatchingScales = heavy_quark(MatchingScale) +# Numerical methods +# ----------------- -# TODO: upgrade all the following to StrEnum, new in py3.11 +# TODO: upgrade all the following to StrEnum (requires py>=3.11) # with that, it is possible to replace all non-alias right sides with calls to # enum.auto() -class QuarkMassSchemes(enum.Enum): - """Scheme to define heavy quark masses.""" - - MSBAR = "msbar" - POLE = "pole" - - class EvolutionMethod(enum.Enum): """DGLAP solution method.""" @@ -150,13 +103,6 @@ class EvolutionMethod(enum.Enum): DECOMPOSE_EXPANDED = "decompose-expanded" -class CouplingEvolutionMethod(enum.Enum): - """Beta functions solution method.""" - - EXACT = "exact" - EXPANDED = "expanded" - - class ScaleVariationsMethod(enum.Enum): """Method used to account for factorization scale variation.""" diff --git a/src/eko/msbar_masses.py b/src/eko/msbar_masses.py index 0fe377489..d615111a7 100644 --- a/src/eko/msbar_masses.py +++ b/src/eko/msbar_masses.py @@ -1,25 +1,17 @@ r"""|RGE| for the |MSbar| masses.""" -from math import nan from typing import List import numba as nb import numpy as np from scipy import integrate, optimize -from eko.io.types import ( - CouplingEvolutionMethod, - CouplingsRef, - HeavyQuarkMasses, - MatchingScales, - Order, - QuarkMassRef, - QuarkMassSchemes, -) - from .basis_rotation import quark_names from .beta import b_qcd, beta_qcd from .couplings import Couplings, invert_matching_coeffs from .gamma import gamma +from .io.types import FlavorsNumber, Order +from .quantities.couplings import CouplingEvolutionMethod, CouplingsInfo +from .quantities.heavy_quarks import HeavyQuarkMasses, QuarkMassRef, QuarkMassScheme from .thresholds import ThresholdsAtlas, flavor_shift, is_downward_path @@ -348,7 +340,7 @@ def evolve(m2_ref, q2m_ref, strong_coupling, xif2, q2_to, nf_ref=None, nf_to=Non def compute( masses_ref: HeavyQuarkMasses, - couplings: CouplingsRef, + couplings: CouplingsInfo, order: Order, evmeth: CouplingEvolutionMethod, matching: List[float], @@ -381,8 +373,8 @@ def compute( """ # TODO: sketch in the docs how the MSbar computation works with a figure. - mu2_ref = couplings.alphas.scale**2 - nf_ref: int = couplings.num_flavs_ref + mu2_ref = couplings.scale**2 + nf_ref: FlavorsNumber = couplings.num_flavs_ref masses = np.concatenate((np.zeros(nf_ref - 3), np.full(6 - nf_ref, np.inf))) def sc(thr_masses): @@ -392,7 +384,7 @@ def sc(thr_masses): method=evmeth, masses=thr_masses, thresholds_ratios=matching, - hqm_scheme=QuarkMassSchemes.MSBAR, + hqm_scheme=QuarkMassScheme.MSBAR, ) # First you need to look for the thr around the given as_ref @@ -476,6 +468,6 @@ def sc(thr_masses): ) # Check the msbar ordering - if not (masses == np.sort(masses)).all(): + if not np.allclose(masses, np.sort(masses)): raise ValueError("MSbar masses are not to be sorted") - return masses + return np.sort(masses) diff --git a/src/eko/quantities/__init__.py b/src/eko/quantities/__init__.py new file mode 100644 index 000000000..3193b125b --- /dev/null +++ b/src/eko/quantities/__init__.py @@ -0,0 +1,8 @@ +"""Physical and internal objects. + +This subpackage contains the definitions of many objects, with the purpose of +using them as type hints, but not only (at least for some of them). + +Few related functions are also defined here. + +""" diff --git a/src/eko/quantities/couplings.py b/src/eko/quantities/couplings.py new file mode 100644 index 000000000..12511d960 --- /dev/null +++ b/src/eko/quantities/couplings.py @@ -0,0 +1,61 @@ +"""Types and quantities related to theory couplings.""" +import dataclasses +import enum +from typing import Optional + +from ..io.dictlike import DictLike +from ..io.types import FlavorsNumber, LinearScale, ReferenceRunning, Scalar + +Coupling = Scalar +CouplingRef = ReferenceRunning[Coupling] + + +@dataclasses.dataclass +class CouplingsInfo(DictLike): + """Reference values for coupling constants. + + Also includes further information, defining the run of the couplings. + + """ + + alphas: Coupling + alphaem: Coupling + scale: LinearScale + max_num_flavs: FlavorsNumber + num_flavs_ref: Optional[FlavorsNumber] + r"""Number of active flavors at strong coupling reference scale. + + I.e. :math:`n_{f,\text{ref}}(\mu^2_{\text{ref}})`, formerly called + ``nfref``. + + """ + em_running: bool = False + + @property + def alphas_ref(self): + r""":math:`\alpha_s` reference point.""" + return CouplingRef.typed(self.alphas, self.scale) + + @property + def alphaem_ref(self): + r""":math:`\alpha` reference point.""" + if self.em_running: + raise ValueError("Non-running electromagnetic coupling.") + + return CouplingRef.typed(self.alphaem, self.scale) + + @property + def values(self): + """Collect only couplings values.""" + return (self.alphas, self.alphaem) + + +# TODO: upgrade the following to StrEnum (requires py>=3.11) with that, it is +# possible to replace all non-alias right sides with calls to enum.auto() + + +class CouplingEvolutionMethod(enum.Enum): + """Beta functions solution method.""" + + EXACT = "exact" + EXPANDED = "expanded" diff --git a/src/eko/quantities/heavy_quarks.py b/src/eko/quantities/heavy_quarks.py new file mode 100644 index 000000000..3f061d977 --- /dev/null +++ b/src/eko/quantities/heavy_quarks.py @@ -0,0 +1,121 @@ +"""Heavy quarks related quantities.""" +import enum +from dataclasses import dataclass +from typing import Generic, Optional, Sequence, TypeVar + +import numpy as np + +from ..io.dictlike import DictLike +from ..io.types import ( + FlavorsNumber, + IntrinsicFlavors, + LinearScale, + ReferenceRunning, + SquaredScale, +) + +FLAVORS = "cbt" + +T = TypeVar("T") + + +class HeavyQuarks(list, Generic[T]): + """Access heavy quarks attributes by name.""" + + def __init__(self, args: Sequence[T]): + if len(args) != 3: + raise ValueError("Pass values for exactly three quarks.") + + self.extend(args) + + @property + def c(self) -> T: + """Charm quark.""" + return self[0] + + @c.setter + def c(self, value: T): + self[0] = value + + @property + def b(self) -> T: + """Bottom quark.""" + return self[1] + + @b.setter + def b(self, value: T): + self[1] = value + + @property + def t(self) -> T: + """Top quark.""" + return self[2] + + @t.setter + def t(self, value: T): + self[2] = value + + +QuarkMass = LinearScale +QuarkMassRef = ReferenceRunning[QuarkMass] +HeavyQuarkMasses = HeavyQuarks[QuarkMassRef] +MatchingRatio = float +MatchingRatios = HeavyQuarks[MatchingRatio] +MatchingScale = SquaredScale +MatchingScales = HeavyQuarks[MatchingScale] + + +def scales_from_ratios( + ratios: MatchingRatios, masses: HeavyQuarkMasses +) -> MatchingScales: + """Convert ratios to squared scales. + + .. todo:: + + make this a method + + """ + return MatchingScales(*(np.power(ratios, 2.0) * np.power(masses, 2.0)).tolist()) + + +# TODO: upgrade the following to StrEnum (requires py>=3.11) with that, it is +# possible to replace all non-alias right sides with calls to enum.auto() + + +class QuarkMassScheme(enum.Enum): + """Scheme to define heavy quark masses.""" + + MSBAR = "msbar" + POLE = "pole" + + +@dataclass +class HeavyInfo(DictLike): + """Collect information about heavy quarks. + + This is meant to be used mainly as a theory card section, and to be passed + around when all or a large part of this information is required. + + """ + + num_flavs_init: Optional[FlavorsNumber] + r"""Number of active flavors at fitting scale. + + I.e. :math:`n_{f,\text{ref}}(\mu^2_0)`, formerly called ``nf0``. + + """ + num_flavs_max_pdf: FlavorsNumber + """Maximum number of quark PDFs.""" + intrinsic_flavors: IntrinsicFlavors + """List of intrinsic quark PDFs.""" + masses: HeavyQuarkMasses + """List of heavy quark masses.""" + masses_scheme: QuarkMassScheme + """Scheme used to specify heavy quark masses.""" + matching_ratios: MatchingRatios + """Matching scale of heavy quark masses""" + + @property + def matching_scales(self) -> MatchingScales: + """Compute matching scales.""" + return scales_from_ratios(self.matching_ratios, self.masses) diff --git a/src/eko/runner/compute.py b/src/eko/runner/compute.py new file mode 100644 index 000000000..acaabe605 --- /dev/null +++ b/src/eko/runner/compute.py @@ -0,0 +1,8 @@ +"""Compute parts from recipes.""" +from .parts import Part +from .recipes import Recipe + + +def compute(recipe: Recipe) -> Part: + """Compute EKO component in isolation.""" + return Part("ciao") diff --git a/src/eko/runner/legacy.py b/src/eko/runner/legacy.py index b9567eb70..94ae2281d 100644 --- a/src/eko/runner/legacy.py +++ b/src/eko/runner/legacy.py @@ -5,11 +5,11 @@ import numpy as np -from .. import interpolation, msbar_masses +from .. import interpolation from ..couplings import Couplings, couplings_mod_ev from ..evolution_operator.grid import OperatorGrid from ..io import EKO, Operator, runcards -from ..io.types import QuarkMassSchemes, RawCard, ScaleVariationsMethod +from ..io.types import RawCard, ScaleVariationsMethod from ..thresholds import ThresholdsAtlas from . import commons @@ -56,35 +56,22 @@ def __init__( # setup basis grid bfd = interpolation.InterpolatorDispatcher( - xgrid=new_operator.rotations.xgrid, + xgrid=new_operator.xgrid, polynomial_degree=new_operator.configs.interpolation_polynomial_degree, ) # setup the Threshold path, compute masses if necessary - masses = None - if new_theory.quark_masses_scheme is QuarkMassSchemes.MSBAR: - masses = msbar_masses.compute( - new_theory.quark_masses, - new_theory.couplings, - new_theory.order, - couplings_mod_ev(new_operator.configs.evolution_method), - np.power(list(iter(new_theory.matching)), 2.0), - xif2=new_theory.xif**2, - ).tolist() - elif new_theory.quark_masses_scheme is QuarkMassSchemes.POLE: - masses = [mq.value**2 for mq in new_theory.quark_masses] - else: - raise ValueError(f"Unknown mass scheme '{new_theory.quark_masses_scheme}'") + masses = runcards.masses(new_theory, new_operator.configs.evolution_method) # call explicitly iter to explain the static analyzer that is an # iterable - thresholds_ratios = np.power(list(iter(new_theory.matching)), 2.0) + thresholds_ratios = np.power(list(iter(new_theory.heavy.matching_ratios)), 2.0) tc = ThresholdsAtlas( masses=masses, q2_ref=new_operator.mu20, - nf_ref=new_theory.num_flavs_init, + nf_ref=new_theory.heavy.num_flavs_init, thresholds_ratios=thresholds_ratios, - max_nf=new_theory.num_flavs_max_pdf, + max_nf=new_theory.heavy.num_flavs_max_pdf, ) # strong coupling @@ -93,7 +80,7 @@ def __init__( order=new_theory.order, method=couplings_mod_ev(new_operator.configs.evolution_method), masses=masses, - hqm_scheme=new_theory.quark_masses_scheme, + hqm_scheme=new_theory.heavy.masses_scheme, thresholds_ratios=thresholds_ratios * ( new_theory.xif**2 @@ -107,8 +94,8 @@ def __init__( mu2grid=new_operator.mu2grid, order=new_theory.order, masses=masses, - mass_scheme=new_theory.quark_masses_scheme.value, - intrinsic_flavors=new_theory.intrinsic_flavors, + mass_scheme=new_theory.heavy.masses_scheme.value, + intrinsic_flavors=new_theory.heavy.intrinsic_flavors, xif=new_theory.xif, configs=new_operator.configs, debug=new_operator.debug, diff --git a/src/eko/runner/parts.py b/src/eko/runner/parts.py new file mode 100644 index 000000000..aed1067aa --- /dev/null +++ b/src/eko/runner/parts.py @@ -0,0 +1,38 @@ +"""Operator components.""" +from abc import ABC +from dataclasses import dataclass + +import numpy.typing as npt + +from ..io.dictlike import DictLike +from ..io.types import SquaredScale + + +@dataclass +class Part(DictLike, ABC): + """An atomic operator ingredient. + + The operator is always in flavor basis, and on the x-grid specified for + computation, i.e. it is a rank-4 tensor of shape:: + + (flavor basis) x (x-grid) x (flavor basis) x (x-grid) + + """ + + operator: npt.NDArray + + +@dataclass +class Evolution(Part): + """Evolution in a fixed number of flavors.""" + + final: bool + mu20: SquaredScale + mu2: SquaredScale + + +@dataclass +class Matching(Part): + """Matching conditions between two different flavor schemes, at a given scale.""" + + mu2: SquaredScale diff --git a/src/eko/runner/recipes.py b/src/eko/runner/recipes.py new file mode 100644 index 000000000..0e6163b66 --- /dev/null +++ b/src/eko/runner/recipes.py @@ -0,0 +1,54 @@ +"""Recipes containing instructions for atomic computations.""" +from abc import ABC +from dataclasses import dataclass + +from .. import EKO +from .. import scale_variations as sv +from ..io import runcards +from ..io.dictlike import DictLike +from ..io.types import SquaredScale +from ..thresholds import ThresholdsAtlas + + +@dataclass +class Recipe(DictLike, ABC): + """Base recipe structure.""" + + name: str + + +@dataclass +class EvolutionRecipe(Recipe): + """Recipe compute evolution with a fixed number of light flavors.""" + + final: bool + mu20: SquaredScale + mu2: SquaredScale + + +@dataclass +class MatchingRecipe(Recipe): + """Recipe to compute the matching between two different flavor number schemes.""" + + mu2: SquaredScale + + +def create(eko: EKO): + """Create all associated recipes.""" + _ = eko.theory_card.matching + + masses = runcards.masses( + eko.theory_card, eko.operator_card.configs.evolution_method + ) + + tc = ThresholdsAtlas( + masses=masses, + q2_ref=eko.operator_card.mu20, + nf_ref=eko.theory_card.num_flavs_init, + thresholds_ratios=None, + max_nf=eko.theory_card.num_flavs_max_pdf, + ) + + for mu2 in eko.mu2grid: + expanded = eko.operator_card.configs.scvar_method is sv.Modes.expanded + mu2f = mu2 * eko.theory_card.xif**2 if expanded else mu2 diff --git a/src/eko/thresholds.py b/src/eko/thresholds.py index b8c2781ed..eb0bd7fa2 100644 --- a/src/eko/thresholds.py +++ b/src/eko/thresholds.py @@ -1,50 +1,36 @@ r"""Holds the classes that define the |FNS|.""" import logging +from dataclasses import astuple, dataclass +from typing import List, Optional import numpy as np logger = logging.getLogger(__name__) +@dataclass class PathSegment: - """Oriented path in the threshold landscape. + """Oriented path in the threshold landscape.""" - Attributes - ---------- - q2_from : float - starting point - q2_to : float - final point - nf : int - number of active flavors - - Parameters - ---------- - q2_from : float - starting point - q2_to : float - final point - nf : int - number of active flavors - """ - - def __init__(self, q2_from, q2_to, nf): - self.q2_from = q2_from - self.q2_to = q2_to - self.nf = nf + q2_from: float + """Starting point.""" + q2_to: float + """Final point.""" + nf: int + """Number of active flavors.""" @property - def is_downward_q2(self): + def is_downward_q2(self) -> bool: """Return True if ``q2_from`` is bigger than ``q2_to``.""" return self.q2_from > self.q2_to @property def tuple(self): - """Return tuple representation suitable for hashing.""" - return (self.q2_from, self.q2_to, self.nf) + """Deprecated: use directly `dataclasses.astuple`.""" + return astuple(self) - def __repr__(self): - """Return string representation.""" + def __str__(self): + """Textual representation, mainly for logging purpose.""" return f"PathSegment({self.q2_from} -> {self.q2_to}, nf={self.nf})" @@ -58,32 +44,33 @@ class ThresholdsAtlas: def __init__( self, - masses, - q2_ref=None, - nf_ref=None, - thresholds_ratios=None, - max_nf=None, + masses: List[float], + q2_ref: Optional[float] = None, + nf_ref: Optional[int] = None, + thresholds_ratios: Optional[List[float]] = None, + max_nf: Optional[int] = None, ): """Create basic atlas. Parameters ---------- - masses : list(float) + masses : list of quark masses squared - q2_ref : float + q2_ref : reference scale - nf_ref : int + nf_ref : number of active flavors at the reference scale - thresholds_ratios : list(float) + thresholds_ratios : list of ratios between matching scales and masses squared - max_nf : int - maximum number of active flavors + max_nf : + maximum number of active flavors, if `None` no maximum is set + """ - masses = list(masses) - if masses != sorted(masses): + sorted_masses = sorted(masses) + if not np.allclose(masses, sorted_masses): raise ValueError("masses need to be sorted") # combine them - thresholds = self.build_area_walls(masses, thresholds_ratios, max_nf) + thresholds = self.build_area_walls(sorted_masses, thresholds_ratios, max_nf) self.area_walls = [0] + thresholds + [np.inf] # check nf_ref @@ -113,13 +100,13 @@ def __init__( self.thresholds_ratios = thresholds_ratios logger.info(str(self)) - def __repr__(self): - """Return string representation.""" + def __str__(self): + """Textual representation, mainly for logging purpose.""" walls = " - ".join([f"{w:.2e}" for w in self.area_walls]) return f"ThresholdsAtlas [{walls}], ref={self.q2_ref} @ {self.nf_ref}" @classmethod - def ffns(cls, nf, q2_ref=None): + def ffns(cls, nf: int, q2_ref: Optional[float] = None): """Create a |FFNS| setup. The function creates simply sufficient thresholds at ``0`` (in the @@ -128,37 +115,43 @@ def ffns(cls, nf, q2_ref=None): Parameters ---------- - nf : int + nf : number of light flavors - q2_ref : float + q2_ref : reference scale + """ return cls([0] * (nf - 3) + [np.inf] * (6 - nf), q2_ref) @staticmethod - def build_area_walls(masses, thresholds_ratios=None, max_nf=None): + def build_area_walls( + masses: List[float], + thresholds_ratios: Optional[List[float]] = None, + max_nf: Optional[int] = None, + ): r"""Create the object from the informations on the run card. The thresholds are computed by :math:`(m_q \cdot k_q^{Thr})`. Parameters ---------- - masses : list(float) + masses : heavy quark masses squared - thresholds_ratios : list(float) + thresholds_ratios : list of ratios between matching scales and masses squared - max_nf : int + max_nf : maximum number of flavors Returns ------- list threshold list + """ if len(masses) != 3: raise ValueError("There have to be 3 quark masses") if thresholds_ratios is None: - thresholds_ratios = (1.0, 1.0, 1.0) + thresholds_ratios = [1.0, 1.0, 1.0] if len(thresholds_ratios) != 3: raise ValueError("There have to be 3 quark threshold ratios") if max_nf is None: @@ -171,21 +164,28 @@ def build_area_walls(masses, thresholds_ratios=None, max_nf=None): thresholds = thresholds[: max_nf - 3] return thresholds - def path(self, q2_to, nf_to=None, q2_from=None, nf_from=None): + def path( + self, + q2_to: float, + nf_to: Optional[int] = None, + q2_from: Optional[float] = None, + nf_from: Optional[int] = None, + ): """Get path from ``q2_from`` to ``q2_to``. Parameters ---------- - q2_to: float + q2_to: target value of q2 - q2_from: float + q2_from: starting value of q2 Returns ------- list(PathSegment) List of :class:`PathSegment` to go through in order to get from ``q2_from`` - to ``q2_to``. + to ``q2_to`` + """ # fallback to init config if q2_from is None: @@ -228,12 +228,13 @@ def nf(self, q2): ------- int number of active flavors + """ ref_idx = np.digitize(q2, self.area_walls) return 2 + ref_idx -def is_downward_path(path): +def is_downward_path(path: List[PathSegment]) -> bool: """Determine if a path is downward. Criterias are: @@ -245,20 +246,21 @@ def is_downward_path(path): Parameters ---------- - path : list(PathSegment) + path : path Returns ------- bool True for a downward path + """ if len(path) == 1: return path[0].is_downward_q2 return path[1].nf < path[0].nf -def flavor_shift(is_downward): +def flavor_shift(is_downward: bool) -> int: """Determine the shift to number of light flavors. Parameters @@ -270,5 +272,6 @@ def flavor_shift(is_downward): ------- int shift to number of light flavors which can be 3 or 4 + """ return 4 if is_downward else 3 diff --git a/src/ekobox/apply.py b/src/ekobox/apply.py index 3c1f2cbe6..fb657edaa 100644 --- a/src/ekobox/apply.py +++ b/src/ekobox/apply.py @@ -32,7 +32,7 @@ def apply_pdf( Returns ------- out_grid : dict - output PDFs and their associated errors for the computed Q2grid + output PDFs and their associated errors for the computed mu2grid """ if rotate_to_evolution_basis: if not qed: @@ -71,7 +71,7 @@ def apply_pdf_flavor( Returns ------- out_grid : dict - output PDFs and their associated errors for the computed Q2grid + output PDFs and their associated errors for the computed mu2grid """ # create pdfs pdfs = np.zeros((len(eko.rotations.inputpids), len(eko.rotations.inputgrid))) diff --git a/src/ekobox/cards.py b/src/ekobox/cards.py index 231dc7266..077e7f8da 100644 --- a/src/ekobox/cards.py +++ b/src/ekobox/cards.py @@ -5,30 +5,33 @@ import numpy as np import yaml -from eko import basis_rotation as br from eko.io import runcards -from eko.io.types import RawCard +from eko.io.types import RawCard, ReferenceRunning _theory = dict( order=[1, 0], couplings=dict( - alphas=[0.118, 91.2], - alphaem=[0.007496252, nan], + alphas=0.118, + alphaem=0.007496252, + scale=91.2, num_flavs_ref=None, max_num_flavs=6, ), - num_flavs_init=None, - num_flavs_max_pdf=6, - intrinsic_flavors=[4], - quark_masses={q: [mq, nan] for mq, q in zip((2.0, 4.5, 173.07), "cbt")}, - quark_masses_scheme="POLE", - matching=[1.0, 1.0, 1.0], + heavy=dict( + num_flavs_init=None, + num_flavs_max_pdf=6, + intrinsic_flavors=[4], + masses=[ReferenceRunning([mq, nan]) for mq in (2.0, 4.5, 173.07)], + masses_scheme="POLE", + matching_ratios=[1.0, 1.0, 1.0], + ), xif=1.0, ) _operator = dict( mu0=1.65, - _mugrid=[100.0], + mugrid=[(100.0, 5)], + xgrid=np.geomspace(1e-7, 1.0, 50).tolist(), configs=dict( evolution_method="iterate-exact", ev_op_max_order=[10, 0], @@ -45,10 +48,6 @@ skip_singlet=False, skip_non_singlet=False, ), - rotations=dict( - xgrid=np.geomspace(1e-7, 1.0, 50).tolist(), - pids=list(br.flavor_basis_pids), - ), ) @@ -74,7 +73,7 @@ def raw_theory(cls): @classmethod def raw_operator(cls): """Provide example operator card unstructured.""" - return _theory.copy() + return _operator.copy() def dump(card: RawCard, path: os.PathLike): diff --git a/src/ekobox/cli/inspect.py b/src/ekobox/cli/inspect.py index 1e923a7d8..f37f09cce 100644 --- a/src/ekobox/cli/inspect.py +++ b/src/ekobox/cli/inspect.py @@ -1,4 +1,4 @@ -"""Launch EKO calculations, with legacy Q2grid mode.""" +"""Launch EKO calculations, with legacy mu2grid mode.""" import pathlib import click diff --git a/src/ekobox/cli/run.py b/src/ekobox/cli/run.py index 62e763f0c..16e11c5fd 100644 --- a/src/ekobox/cli/run.py +++ b/src/ekobox/cli/run.py @@ -1,4 +1,4 @@ -"""Launch EKO calculations, with legacy Q2grid mode.""" +"""Launch EKO calculations, with legacy mu2grid mode.""" import pathlib from typing import Sequence diff --git a/src/ekobox/evol_pdf.py b/src/ekobox/evol_pdf.py index 6e03b92d8..3be7201de 100644 --- a/src/ekobox/evol_pdf.py +++ b/src/ekobox/evol_pdf.py @@ -65,7 +65,7 @@ def evolve_pdfs( ) if targetgrid is None: - targetgrid = operators_card.rotations.xgrid + targetgrid = operators_card.xgrid if info_update is None: info_update = {} info_update["XMin"] = targetgrid.raw[0] @@ -84,7 +84,7 @@ def evolve_pdfs( lambda pid, x, Q2, evolved_PDF=evolved_PDF: targetlist[targetlist.index(x)] * evolved_PDF[Q2]["pdfs"][pid][targetlist.index(x)], xgrid=targetlist, - Q2grid=operators_card.mu2grid, + mu2grid=operators_card.mu2grid, pids=np.array(br.flavor_basis_pids), ) # all_blocks will be useful in case there will be necessity to dump many blocks diff --git a/src/ekobox/genpdf/__init__.py b/src/ekobox/genpdf/__init__.py index aa468951e..a5ed41865 100644 --- a/src/ekobox/genpdf/__init__.py +++ b/src/ekobox/genpdf/__init__.py @@ -13,7 +13,7 @@ logger = logging.getLogger(__name__) -def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): +def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): """ Auxiliary function for `generate_pdf`. @@ -28,7 +28,7 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): if true every member of the parent is loaded xgrid : list(float) produced x grid if given - Q2grid : list(float) + mu2grid : list(float) produced Q2 grid if given Returns @@ -42,8 +42,8 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): """ if xgrid is None: xgrid = np.geomspace(1e-9, 1, 240) - if Q2grid is None: - Q2grid = np.geomspace(1.3, 1e5, 35) + if mu2grid is None: + mu2grid = np.geomspace(1.3, 1e5, 35) # collect blocks all_blocks = [] info = None @@ -63,7 +63,7 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): else: toylh = toy.mkPDF("", 0) all_blocks.append( - [generate_block(toylh.xfxQ2, xgrid, Q2grid, br.flavor_basis_pids)] + [generate_block(toylh.xfxQ2, xgrid, mu2grid, br.flavor_basis_pids)] ) else: @@ -84,7 +84,7 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, Q2grid=None): if pid not in parent_pdf_set else parent_pdf_set[pid](x, Q2), xgrid, - Q2grid, + mu2grid, br.flavor_basis_pids, ) ] @@ -102,7 +102,7 @@ def generate_pdf( info_update=None, install=False, xgrid=None, - Q2grid=None, + mu2grid=None, ): """ Generate a new PDF from a parent PDF with a set of flavors. @@ -147,7 +147,7 @@ def generate_pdf( install on LHAPDF path xgrid : list(float) produced x grid if given - Q2grid : list(float) + mu2grid : list(float) produced Q2 grid if given Examples @@ -187,7 +187,7 @@ def generate_pdf( # labels = verify_labels(args.labels) info, heads, all_blocks = take_data( - parent_pdf_set, members, xgrid=xgrid, Q2grid=Q2grid + parent_pdf_set, members, xgrid=xgrid, mu2grid=mu2grid ) # filter the PDF @@ -236,14 +236,14 @@ def install_pdf(name): shutil.move(str(src), str(target)) -def generate_block(xfxQ2, xgrid, Q2grid, pids): +def generate_block(xfxQ2, xgrid, mu2grid, pids): """Generate an LHAPDF data block from a callable. Parameters ---------- xfxQ2 : callable LHAPDF like callable - Q2grid : list(float) + mu2grid : list(float) Q grid pids : list(int) Flavors list @@ -256,10 +256,10 @@ def generate_block(xfxQ2, xgrid, Q2grid, pids): PDF block """ - block = dict(Q2grid=Q2grid, pids=pids, xgrid=xgrid) + block = dict(mu2grid=mu2grid, pids=pids, xgrid=xgrid) data = [] for x in xgrid: - for Q2 in Q2grid: + for Q2 in mu2grid: data.append(np.array([xfxQ2(pid, x, Q2) for pid in pids])) block["data"] = np.array(data) return block diff --git a/src/ekobox/genpdf/export.py b/src/ekobox/genpdf/export.py index 1304c8595..98bab23b0 100644 --- a/src/ekobox/genpdf/export.py +++ b/src/ekobox/genpdf/export.py @@ -1,3 +1,4 @@ +"""PDF set writer.""" import io import pathlib import re @@ -7,37 +8,37 @@ def list_to_str(ls, fmt="%.6e"): - """ - Convert a list of numbers to a string + """Convert a list of numbers to a string. Parameters ---------- - ls : list(float) - list - fmt : str - format string + ls : list(float) + list + fmt : str + format string Returns ------- - str : - final string + str : + final string + """ return " ".join([fmt % x for x in ls]) def array_to_str(ar): - """ - Convert an array of numbers to a string + """Convert an array of numbers to a string. Parameters ---------- - ar : list(list(float)) - array + ar : list(list(float)) + array Returns ------- - str : - final string + str : + final string + """ table = "" for line in ar: @@ -46,19 +47,19 @@ def array_to_str(ar): def dump_blocks(name, member, blocks, pdf_type=None): - """ - Write LHAPDF data file. + """Write LHAPDF data file. Parameters ---------- - name : str or os.PathLike - target name or path - member : int - PDF member - blocks : list(dict) - pdf blocks of data - inherit : str - str to be copied in the head of member files + name : str or os.PathLike + target name or path + member : int + PDF member + blocks : list(dict) + pdf blocks of data + inherit : str + str to be copied in the head of member files + """ path_name = pathlib.Path(name) target = path_name / f"{path_name.stem}_{member:04d}.dat" @@ -74,15 +75,14 @@ def dump_blocks(name, member, blocks, pdf_type=None): o.write("Format: lhagrid1\n---\n") for b in blocks: o.write(list_to_str(b["xgrid"]) + "\n") - o.write(list_to_str(list(np.sqrt(b["Q2grid"]))) + "\n") + o.write(list_to_str(list(np.sqrt(b["mu2grid"]))) + "\n") o.write(list_to_str(b["pids"], "%d") + "\n") o.write(array_to_str(b["data"])) o.write("---\n") def dump_info(name, info): - """ - Write LHAPDF info file. + """Write LHAPDF info file. NOTE: Since LHAPDF info files are not truely yaml files, we have to use a slightly more complicated function to @@ -90,10 +90,11 @@ def dump_info(name, info): Parameters ---------- - name : str or os.Pathlike - target name or path - info : dict - info dictionary + name : str or os.Pathlike + target name or path + info : dict + info dictionary + """ path_name = pathlib.Path(name) target = path_name / f"{path_name.stem}.info" @@ -109,19 +110,19 @@ def dump_info(name, info): def dump_set(name, info, member_blocks, pdf_type_list=None): - """ - Dump a whole set. + """Dump a whole set. Parameters ---------- - name : str - target name - info : dict - info dictionary - member_blocks : list(list(dict)) - blocks for all members - pdf_type : list(str) - list of strings to be copied in the head of member files + name : str + target name + info : dict + info dictionary + member_blocks : list(list(dict)) + blocks for all members + pdf_type : list(str) + list of strings to be copied in the head of member files + """ dump_info(name, info) for mem, blocks in enumerate(member_blocks): diff --git a/src/ekobox/genpdf/load.py b/src/ekobox/genpdf/load.py index dbb23960d..f4ecb9f74 100644 --- a/src/ekobox/genpdf/load.py +++ b/src/ekobox/genpdf/load.py @@ -1,3 +1,4 @@ +"""PDF set elements loaders.""" import pathlib import numpy as np @@ -12,18 +13,18 @@ def load_info_from_file(pdfset_name): - """ - Load the info file from a parent pdf. + """Load the info file from a parent pdf. Parameters ---------- - pdfset_name : str - parent pdf name + pdfset_name : str + parent pdf name Returns ------- - dict - info dictionary + dict + info dictionary + """ import lhapdf # pylint: disable=import-error, import-outside-toplevel @@ -34,22 +35,21 @@ def load_info_from_file(pdfset_name): def load_blocks_from_file(pdfset_name, member): - """ - Load a pdf from a parent pdf. + """Load a pdf from a parent pdf. Parameters ---------- - pdfset_name : str - parent pdf name - member : int - pdf member + pdfset_name : str + parent pdf name + member : int + pdf member Returns ------- - str : - head of member file - list(dict) : - pdf blocks of data + str : + head of member file + list(dict) : + pdf blocks of data """ import lhapdf # pylint: disable=import-error, import-outside-toplevel @@ -75,8 +75,10 @@ def load_blocks_from_file(pdfset_name, member): for line in cnt[head_section + 4 : next_head_section]: elems = np.fromstring(line.strip(), sep=" ") data.append(elems) - Q2grid = [el * el for el in Qgrid] - blocks.append(dict(xgrid=xgrid, Q2grid=Q2grid, pids=pids, data=np.array(data))) + mu2grid = [el * el for el in Qgrid] + blocks.append( + dict(xgrid=xgrid, mu2grid=mu2grid, pids=pids, data=np.array(data)) + ) # iterate head_section = next_head_section return head, blocks diff --git a/src/ekobox/info_file.py b/src/ekobox/info_file.py index 2d9ceb214..8f48cd56e 100644 --- a/src/ekobox/info_file.py +++ b/src/ekobox/info_file.py @@ -42,30 +42,30 @@ def build( template_info["NumFlavors"] = 14 template_info["Flavors"] = [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 21, 22] # TODO actually point to input grid - template_info["XMin"] = float(operators_card.rotations.xgrid.raw[0]) - template_info["XMax"] = float(operators_card.rotations.xgrid.raw[-1]) + template_info["XMin"] = float(operators_card.xgrid.raw[0]) + template_info["XMax"] = float(operators_card.xgrid.raw[-1]) template_info["NumMembers"] = num_members template_info["OrderQCD"] = theory_card.order[0] - 1 template_info["QMin"] = round(math.sqrt(operators_card.mu2grid[0]), 4) template_info["QMax"] = round(math.sqrt(operators_card.mu2grid[-1]), 4) - template_info["MZ"] = theory_card.couplings.alphas.scale + template_info["MZ"] = theory_card.couplings.scale template_info["MUp"] = 0.0 template_info["MDown"] = 0.0 template_info["MStrange"] = 0.0 - template_info["MCharm"] = theory_card.quark_masses.c.value - template_info["MBottom"] = theory_card.quark_masses.b.value - template_info["MTop"] = theory_card.quark_masses.t.value - template_info["AlphaS_MZ"] = theory_card.couplings.alphas.value + template_info["MCharm"] = theory_card.heavy.masses.c.value + template_info["MBottom"] = theory_card.heavy.masses.b.value + template_info["MTop"] = theory_card.heavy.masses.t.value + template_info["AlphaS_MZ"] = theory_card.couplings.alphas template_info["AlphaS_OrderQCD"] = theory_card.order[0] - 1 evmod = couplings.couplings_mod_ev(operators_card.configs.evolution_method) - quark_masses = [(x.value) ** 2 for x in theory_card.quark_masses] + quark_masses = [(x.value) ** 2 for x in theory_card.heavy.masses] sc = couplings.Couplings( theory_card.couplings, theory_card.order, evmod, quark_masses, - hqm_scheme=theory_card.quark_masses_scheme, - thresholds_ratios=np.power(list(iter(theory_card.matching)), 2.0), + hqm_scheme=theory_card.heavy.masses_scheme, + thresholds_ratios=np.power(list(iter(theory_card.heavy.matching_ratios)), 2.0), ) alphas_values = np.array( [ diff --git a/src/ekomark/benchmark/external/LHA_utils.py b/src/ekomark/benchmark/external/LHA_utils.py index 718c76ed1..4ee88c70e 100644 --- a/src/ekomark/benchmark/external/LHA_utils.py +++ b/src/ekomark/benchmark/external/LHA_utils.py @@ -126,9 +126,9 @@ def compute_LHA_data(theory, operators, rotate_to_evolution_basis=False): """ polarized = operators["polarized"] - Q2grid = operators["Q2grid"] - if not np.allclose(Q2grid, [1e4]): - raise ValueError("Q2grid has to be [1e4]") + mu2grid = operators["mu2grid"] + if not np.allclose(mu2grid, [1e4]): + raise ValueError("mu2grid has to be [1e4]") order = theory["PTO"] # select which data if polarized and order <= 1: diff --git a/src/ekomark/benchmark/external/apfel_utils.py b/src/ekomark/benchmark/external/apfel_utils.py index d2010a5a2..5419b2663 100644 --- a/src/ekomark/benchmark/external/apfel_utils.py +++ b/src/ekomark/benchmark/external/apfel_utils.py @@ -72,7 +72,7 @@ def compute_apfel_data( # Run apf_tabs = {} - for q2 in operators["Q2grid"]: + for q2 in operators["mu2grid"]: apfel.EvolveAPFEL(theory["Q0"], np.sqrt(q2)) print(f"Executing APFEL took {(time.perf_counter() - apf_start)} s") diff --git a/src/ekomark/benchmark/external/lhapdf_utils.py b/src/ekomark/benchmark/external/lhapdf_utils.py index e337d8476..5b0beb3dd 100644 --- a/src/ekomark/benchmark/external/lhapdf_utils.py +++ b/src/ekomark/benchmark/external/lhapdf_utils.py @@ -30,7 +30,7 @@ def compute_LHAPDF_data( target_xgrid = operators["interpolation_xgrid"] out_tabs = {} - for q2 in operators["Q2grid"]: + for mu2 in operators["mu2grid"]: tab = {} for pid in br.flavor_basis_pids: if pid in skip_pdfs: @@ -39,7 +39,7 @@ def compute_LHAPDF_data( # collect lhapdf me = [] for x in target_xgrid: - xf = pdf.xfxQ2(pid, x, q2) + xf = pdf.xfxQ2(pid, x, mu2) me.append(xf) tab[pid] = np.array(me) @@ -61,7 +61,7 @@ def compute_LHAPDF_data( evol_pdf = rotate_flavor_to_evolution @ pdfs tab = dict(zip(evol_basis, evol_pdf)) - out_tabs[q2] = tab + out_tabs[mu2] = tab ref = { "target_xgrid": target_xgrid, diff --git a/src/ekomark/benchmark/external/pegasus_utils.py b/src/ekomark/benchmark/external/pegasus_utils.py index df84a899e..7655e3564 100644 --- a/src/ekomark/benchmark/external/pegasus_utils.py +++ b/src/ekomark/benchmark/external/pegasus_utils.py @@ -82,12 +82,12 @@ def compute_pegasus_data(theory, operators, skip_pdfs, rotate_to_evolution_basis # run pegaus out_tabs = {} - for q2 in operators["Q2grid"]: + for mu2 in operators["mu2grid"]: tab = {} for x in target_xgrid: # last two numbers are the min and max pid to calculate, # keep everthing for simplicity. - xf, _ = pegasus.xparton(x, q2, -6, 6) + xf, _ = pegasus.xparton(x, mu2, -6, 6) temp = dict(zip(labels, xf)) for pid in labels: if pid in skip_pdfs: @@ -114,7 +114,7 @@ def compute_pegasus_data(theory, operators, skip_pdfs, rotate_to_evolution_basis evol_pdf = rotate_flavor_to_evolution @ pdfs tab = dict(zip(evol_basis, evol_pdf)) - out_tabs[q2] = tab + out_tabs[mu2] = tab ref = {"target_xgrid": target_xgrid, "values": out_tabs} diff --git a/src/ekomark/data/db.py b/src/ekomark/data/db.py index 6aa3beab6..74bf71a16 100644 --- a/src/ekomark/data/db.py +++ b/src/ekomark/data/db.py @@ -16,7 +16,7 @@ class Operator(Base): # pylint: disable=too-few-public-methods debug_skip_singlet = Column(Boolean) ev_op_max_order = Column(Integer) ev_op_iterations = Column(Integer) - Q2grid = Column(Text) + mugrid = Column(Text) backward_inversion = Column(Text) polarized = Column(Boolean) time_like = Column(Boolean) diff --git a/src/ekomark/data/operators.py b/src/ekomark/data/operators.py index bf19cb465..dfbbc6ad9 100644 --- a/src/ekomark/data/operators.py +++ b/src/ekomark/data/operators.py @@ -17,7 +17,7 @@ n_integration_cores=0, debug_skip_non_singlet=False, debug_skip_singlet=False, - Q2grid=[100], + mugrid=[10], inputgrid=None, targetgrid=None, inputpids=None, @@ -32,25 +32,25 @@ lhapdf_config = { # "ev_op_max_order": [10], # "ev_op_iterations": [2, 10, 30], - "Q2grid": [[20, 1.0e2, 1.0e3, 1.0e4]], + "mugrid": [[4.4, 10, 31, 100]], } apfel_config = { # "ev_op_max_order": [10], # "ev_op_iterations": [2, 10, 30], - "Q2grid": [[1.0e3, 1.0e4]], + "mugrid": [[31, 100]], } pegasus_config = { "ev_op_max_order": [10], "ev_op_iterations": [10], - "Q2grid": [[1.0e3]], + "mugrid": [[31]], } pegasus_exact_config = { "ev_op_max_order": [15], "ev_op_iterations": [20], - "Q2grid": [[1.0e3]], + "mugrid": [[31]], } diff --git a/tests/conftest.py b/tests/conftest.py index 7f2e1b7c6..ce91ee8cb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,7 +33,7 @@ class FakePDF: def hasFlavor(self, pid): return pid == 1 - def xfxQ2(self, _pid, x, _q2): + def xfxQ2(self, _pid, x, _mu2): return x @@ -52,7 +52,7 @@ def theory_ffns(theory_card): def set_(flavors: int) -> TheoryCard: i = flavors - 3 for q in "cbt"[i:]: - setattr(theory_card.matching, q, np.inf) + setattr(theory_card.heavy.matching_ratios, q, np.inf) return theory_card return set_ @@ -61,7 +61,7 @@ def set_(flavors: int) -> TheoryCard: @pytest.fixture def operator_card(): card = cards.example.operator() - card.rotations.xgrid = interpolation.XGrid([0.1, 0.3, 0.5, 1.0]) + card.xgrid = interpolation.XGrid([0.1, 0.3, 0.5, 1.0]) card.configs.interpolation_polynomial_degree = 2 return card @@ -91,12 +91,12 @@ def _create(self): self.cache = ( EKO.create(self.path).load_cards(self.theory, self.operator).build() ) - lx = len(self.operator.rotations.xgrid) - lpids = len(self.operator.rotations.pids) - for q2, op in self._operators( + lx = len(self.operator.xgrid) + lpids = len(self.operator.pids) + for mu2, op in self._operators( mugrid=self.operator.mu2grid, shape=(lpids, lx) ).items(): - self.cache[q2] = op + self.cache[mu2] = op return self.cache diff --git a/tests/eko/evolution_operator/test_grid.py b/tests/eko/evolution_operator/test_grid.py index 9a7f6c377..a57dffefe 100644 --- a/tests/eko/evolution_operator/test_grid.py +++ b/tests/eko/evolution_operator/test_grid.py @@ -6,7 +6,6 @@ """ import enum -import logging import pathlib import numpy as np @@ -14,6 +13,7 @@ import eko.io.types from eko.runner import legacy +from eko.quantities.couplings import CouplingEvolutionMethod def test_init_errors(monkeypatch, theory_ffns, operator_card, tmp_path, caplog): @@ -24,7 +24,7 @@ class FakeEM(enum.Enum): monkeypatch.setattr( legacy, "couplings_mod_ev", - lambda *args: eko.io.types.CouplingEvolutionMethod.EXACT, + lambda *args: CouplingEvolutionMethod.EXACT, ) operator_card.configs.evolution_method = FakeEM.BLUB with pytest.raises(ValueError, match="blub"): @@ -37,10 +37,9 @@ class FakeEM(enum.Enum): assert "exact" in caplog.text -def test_compute_q2grid(theory_ffns, operator_card, tmp_path): - mugrid = np.array([10.0, 100.0]) - operator_card._mugrid = mugrid - operator_card._mu2grid = None +def test_compute_mu2grid(theory_ffns, operator_card, tmp_path): + mugrid = [(10.0, 5), (100.0, 5)] + operator_card.mugrid = mugrid opgrid = legacy.Runner( theory_ffns(3), operator_card, path=tmp_path / "eko.tar" ).op_grid @@ -79,8 +78,7 @@ def test_mod_expanded(theory_card, theory_ffns, operator_card, tmp_path: pathlib else: theory = theory_card theory.order = (1, 0) - theory.num_flavs_init = nf0 - theory.matching + theory.heavy.num_flavs_init = nf0 path.unlink(missing_ok=True) opgrid = legacy.Runner(theory, operator_card, path=path).op_grid opg = opgrid.compute(3) diff --git a/tests/eko/evolution_operator/test_init.py b/tests/eko/evolution_operator/test_init.py index e4a70ca86..a55ebe035 100644 --- a/tests/eko/evolution_operator/test_init.py +++ b/tests/eko/evolution_operator/test_init.py @@ -340,7 +340,7 @@ def test_compute_no_skip_sv( scipy.integrate, "quad", lambda *args, v=v, **kwargs: (v, 0.56) ) o.compute() - lx = len(ocard.rotations.xgrid.raw) + lx = len(ocard.xgrid.raw) res = np.full((lx, lx), v) res[-1, -1] = 1.0 # ns are all diagonal, so they start from an identity matrix @@ -382,7 +382,7 @@ def test_compute(self, monkeypatch, theory_ffns, operator_card, tmp_path): assert k in o1.op_members np.testing.assert_allclose( o1.op_members[k].value, - np.eye(len(ocard.rotations.xgrid.raw)), + np.eye(len(ocard.xgrid.raw)), err_msg=k, ) diff --git a/tests/eko/evolution_operator/test_ome.py b/tests/eko/evolution_operator/test_ome.py index db7f9d728..2c0fdf644 100644 --- a/tests/eko/evolution_operator/test_ome.py +++ b/tests/eko/evolution_operator/test_ome.py @@ -298,7 +298,7 @@ def test_quad_ker(monkeypatch): # } # operators_card = { -# "Q2grid": [1, 10], +# "mugrid": [(1, 3), (10, 5)], # "interpolation_xgrid": [0.1, 1.0], # "interpolation_polynomial_degree": 1, # "interpolation_is_log": True, @@ -386,8 +386,8 @@ def test_labels(self, theory_ffns, operator_card, tmp_path: pathlib.Path): def test_compute_n3lo(self, theory_ffns, operator_card, tmp_path): theory_card: TheoryCard = theory_ffns(5) - theory_card.matching.c = 1.0 - theory_card.matching.b = 1.0 + theory_card.heavy.matching_ratios.c = 1.0 + theory_card.heavy.matching_ratios.b = 1.0 theory_card.order = (4, 0) operator_card.debug.skip_singlet = True g = legacy.Runner(theory_card, operator_card, path=tmp_path / "eko.tar").op_grid @@ -395,7 +395,7 @@ def test_compute_n3lo(self, theory_ffns, operator_card, tmp_path): g.config, g.managers, is_backward=True, - q2=theory_card.quark_masses.b.value**2, + q2=theory_card.heavy.masses.b.value**2, nf=4, L=0, is_msbar=False, @@ -417,8 +417,8 @@ def test_compute_n3lo(self, theory_ffns, operator_card, tmp_path): def test_compute_lo(self, theory_ffns, operator_card, tmp_path): theory_card = theory_ffns(5) - theory_card.matching.c = 1.0 - theory_card.matching.b = 1.0 + theory_card.heavy.matching_ratios.c = 1.0 + theory_card.heavy.matching_ratios.b = 1.0 theory_card.order = (1, 0) operator_card.debug.skip_singlet = False operator_card.debug.skip_non_singlet = False @@ -427,7 +427,7 @@ def test_compute_lo(self, theory_ffns, operator_card, tmp_path): g.config, g.managers, is_backward=False, - q2=theory_card.quark_masses.b.value**2, + q2=theory_card.heavy.masses.b.value**2, nf=4, L=0, is_msbar=False, @@ -460,12 +460,12 @@ def test_compute_lo(self, theory_ffns, operator_card, tmp_path): ) def test_compute_nlo(self, theory_ffns, operator_card: OperatorCard, tmp_path): - theory_card = theory_ffns(5) - theory_card.matching.c = 1.0 - theory_card.matching.b = 1.0 + theory_card: TheoryCard = theory_ffns(5) + theory_card.heavy.matching_ratios.c = 1.0 + theory_card.heavy.matching_ratios.b = 1.0 theory_card.order = (2, 0) - operator_card.mu2grid = np.array([20.0]) - operator_card.rotations.xgrid = interpolation.XGrid([0.001, 0.01, 0.1, 1.0]) + operator_card.mugrid = [(20.0, 5)] + operator_card.xgrid = interpolation.XGrid([0.001, 0.01, 0.1, 1.0]) operator_card.configs.interpolation_polynomial_degree = 1 operator_card.configs.interpolation_is_log = True operator_card.configs.ev_op_max_order = (2, 0) @@ -479,14 +479,14 @@ def test_compute_nlo(self, theory_ffns, operator_card: OperatorCard, tmp_path): g.config, g.managers, is_backward=False, - q2=theory_card.quark_masses.b.value**2, + q2=theory_card.heavy.masses.b.value**2, nf=4, L=0, is_msbar=False, ) o.compute() - dim = len(operator_card.rotations.xgrid) + dim = len(operator_card.xgrid) shape = (dim, dim) for indices in [(100, br.matching_hplus_pid), (200, br.matching_hminus_pid)]: assert o.op_members[(indices[0], indices[0])].value.shape == shape diff --git a/tests/eko/io/test_dictlike.py b/tests/eko/io/test_dictlike.py index 4085b9ec5..908aad364 100644 --- a/tests/eko/io/test_dictlike.py +++ b/tests/eko/io/test_dictlike.py @@ -1,6 +1,6 @@ import io from dataclasses import dataclass -from typing import Sequence, Union +from typing import NewType, Sequence, Union import numpy as np import numpy.typing as npt @@ -79,3 +79,34 @@ def test_unsupported(): UnsupportedDictLike.from_dict(dict(sf=[3.14])) with pytest.raises(TypeError): UnsupportedUnionDictLike.from_dict(dict(uisf=[3.14])) + + +@dataclass +class ListLike(dictlike.DictLike): + a: int + b: int + c: int + + +def test_dictlike_list(): + ls = [1, 2, 3] + lslike = ListLike.from_dict(ls) + + assert isinstance(lslike, ListLike) + assert lslike.a == ls[0] + assert lslike.c == ls[2] + + +newint = NewType("newint", int) + + +@dataclass +class WithNewType(dictlike.DictLike): + nt: newint + + +def test_newtype(): + val = 42 + wnt = WithNewType.from_dict(dict(nt=val)) + + assert wnt.nt == val diff --git a/tests/eko/io/test_init.py b/tests/eko/io/test_init.py index 0e7706d88..d73d7d97e 100644 --- a/tests/eko/io/test_init.py +++ b/tests/eko/io/test_init.py @@ -32,13 +32,15 @@ def test_load_tar(self, out_v0, tmp_path: pathlib.Path): class TestManipulate: def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): # create object - mu2out = 10.0 + muout = 10.0 + mu2out = muout**2 xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - eko_factory.operator.mu2grid = [mu2out] - eko_factory.operator.rotations.xgrid = xg + eko_factory.operator.mugrid = [(muout, 5)] + eko_factory.operator.xgrid = xg o1 = eko_factory.get() + lpids = 2 o1[mu2out] = eko.io.Operator( - operator=eko_identity([1, 2, len(xg), 2, len(xg)])[0] + operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0] ) xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) # only target @@ -47,21 +49,21 @@ def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): with EKO.edit(otpath) as ot: manipulate.xgrid_reshape(ot, xgp) chk_keys(o1.raw, ot.raw) - assert ot[10].operator.shape == (2, len(xgp), 2, len(xg)) + assert ot[mu2out].operator.shape == (lpids, len(xgp), lpids, len(xg)) ottpath = tmp_path / "ott.tar" o1.deepcopy(ottpath) with EKO.edit(ottpath) as ott: with pytest.warns(Warning): manipulate.xgrid_reshape(ott, xg) chk_keys(o1.raw, ott.raw) - np.testing.assert_allclose(ott[10].operator, o1[10].operator) + np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) # only input oipath = tmp_path / "oi.tar" o1.deepcopy(oipath) with EKO.edit(oipath) as oi: manipulate.xgrid_reshape(oi, inputgrid=xgp) - assert oi[10].operator.shape == (2, len(xg), 2, len(xgp)) + assert oi[mu2out].operator.shape == (lpids, len(xg), lpids, len(xgp)) chk_keys(o1.raw, oi.raw) oiipath = tmp_path / "oii.tar" o1.deepcopy(oiipath) @@ -69,7 +71,7 @@ def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): with pytest.warns(Warning): manipulate.xgrid_reshape(oii, inputgrid=xg) chk_keys(o1.raw, oii.raw) - np.testing.assert_allclose(oii[10].operator, o1[10].operator) + np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) # both oitpath = tmp_path / "oit.tar" @@ -78,7 +80,7 @@ def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): manipulate.xgrid_reshape(oit, xgp, xgp) chk_keys(o1.raw, oit.raw) op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) - np.testing.assert_allclose(oit[10].operator, op[0], atol=1e-10) + np.testing.assert_allclose(oit[mu2out].operator, op[0], atol=1e-10) # error with pytest.raises(ValueError): @@ -86,14 +88,15 @@ def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): def test_reshape_io(self, eko_factory: EKOFactory, tmp_path): eko_factory.path = tmp_path / "eko.tar" - eko_factory.operator.rotations.pids = np.array([0, 1]) eko_factory.operator.configs.interpolation_polynomial_degree = 1 # create object o1 = eko_factory.get() + lpids = len(o1.rotations.pids) path_copy = tmp_path / "eko_copy.tar" o1.deepcopy(path_copy) newxgrid = interpolation.XGrid([0.1, 1.0]) - inputpids = np.array([[1, -1], [1, 1]]) + inputpids = np.eye(lpids) + inputpids[:2, :2] = np.array([[1, -1], [1, 1]]) with EKO.edit(path_copy) as o2: manipulate.xgrid_reshape(o2, newxgrid, newxgrid) manipulate.flavor_reshape(o2, inputpids=inputpids) @@ -114,51 +117,52 @@ def test_reshape_io(self, eko_factory: EKOFactory, tmp_path): def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): # create object xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - pids = np.array([0, 1]) - mu2out = 10.0 - eko_factory.operator.rotations.xgrid = xg - eko_factory.operator.rotations.pids = pids - eko_factory.operator.mu2grid = np.array([mu2out]) + muout = 10.0 + mu2out = muout**2 + eko_factory.operator.xgrid = xg + eko_factory.operator.mugrid = [(muout, 5)] o1 = eko_factory.get() - lpids = len(pids) + lpids = len(o1.rotations.pids) lx = len(xg) o1[mu2out] = eko.io.Operator( operator=eko_identity([1, lpids, lx, lpids, lx])[0], error=None, ) # only target - target_r = np.array([[1, -1], [1, 1]]) + target_r = np.eye(lpids) + target_r[:2, :2] = np.array([[1, -1], [1, 1]]) tpath = tmp_path / "ot.tar" ttpath = tmp_path / "ott.tar" o1.deepcopy(tpath) with EKO.edit(tpath) as ot: manipulate.flavor_reshape(ot, target_r) chk_keys(o1.raw, ot.raw) - assert ot[mu2out].operator.shape == (2, len(xg), 2, len(xg)) + assert ot[mu2out].operator.shape == (lpids, len(xg), lpids, len(xg)) ot.deepcopy(ttpath) with EKO.edit(ttpath) as ott: manipulate.flavor_reshape(ott, np.linalg.inv(target_r)) np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) with pytest.warns(Warning): - manipulate.flavor_reshape(ott, np.eye(2)) + manipulate.flavor_reshape(ott, np.eye(lpids)) chk_keys(o1.raw, ott.raw) np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) # only input - input_r = np.array([[1, -1], [1, 1]]) + input_r = np.eye(lpids) + input_r[:2, :2] = np.array([[1, -1], [1, 1]]) ipath = tmp_path / "oi.tar" iipath = tmp_path / "oii.tar" o1.deepcopy(ipath) with EKO.edit(ipath) as oi: manipulate.flavor_reshape(oi, inputpids=input_r) chk_keys(o1.raw, oi.raw) - assert oi[mu2out].operator.shape == (2, len(xg), 2, len(xg)) + assert oi[mu2out].operator.shape == (lpids, len(xg), lpids, len(xg)) oi.deepcopy(iipath) with EKO.edit(iipath) as oii: manipulate.flavor_reshape(oii, inputpids=np.linalg.inv(input_r)) np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) with pytest.warns(Warning): - manipulate.flavor_reshape(oii, inputpids=np.eye(2)) + manipulate.flavor_reshape(oii, inputpids=np.eye(lpids)) chk_keys(o1.raw, oii.raw) np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) @@ -166,11 +170,9 @@ def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): itpath = tmp_path / "oit.tar" o1.deepcopy(itpath) with EKO.edit(itpath) as oit: - manipulate.flavor_reshape( - oit, np.array([[1, -1], [1, 1]]), np.array([[1, -1], [1, 1]]) - ) + manipulate.flavor_reshape(oit, target_r, input_r) chk_keys(o1.raw, oit.raw) - op = eko_identity([1, 2, len(xg), 2, len(xg)]).copy() + op = eko_identity([1, lpids, len(xg), lpids, len(xg)]).copy() np.testing.assert_allclose(oit[mu2out].operator, op[0], atol=1e-10) # error fpath = tmp_path / "fail.tar" @@ -181,11 +183,11 @@ def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): def test_to_evol(self, eko_factory: EKOFactory, tmp_path): xgrid = interpolation.XGrid([0.5, 1.0]) - mu2_out = 2.0 + mu_out = 2.0 + mu2_out = mu_out**2 eko_factory.operator.mu0 = float(np.sqrt(1.0)) - eko_factory.operator.mu2grid = np.array([mu2_out]) - eko_factory.operator.rotations.xgrid = xgrid - eko_factory.operator.rotations.pids = np.array(br.flavor_basis_pids) + eko_factory.operator.mugrid = [(mu_out, 4)] + eko_factory.operator.xgrid = xgrid eko_factory.operator.configs.interpolation_polynomial_degree = 1 eko_factory.operator.configs.interpolation_is_log = False eko_factory.operator.configs.ev_op_max_order = (2, 0) diff --git a/tests/eko/io/test_runcards.py b/tests/eko/io/test_runcards.py index 49573c42f..22352604b 100644 --- a/tests/eko/io/test_runcards.py +++ b/tests/eko/io/test_runcards.py @@ -1 +1,52 @@ +from dataclasses import fields + +import numpy as np +import pytest + +from eko import interpolation from eko.io import runcards as rc + + +class TestRotations: + XGRID_TEST = [1e-3, 1e-2, 1e-1, 1.0] + + def test_serialization(self): + rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) + + d = rot.raw + rot1 = rot.from_dict(d) + + for f in fields(rc.Rotations): + assert getattr(rot, f.name) == getattr(rot1, f.name) + + assert d["targetgrid"] is None + assert "_targetgrid" not in d + + def test_pids(self): + rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) + + # no check on correctness of value set + rot.inputpids = [0, 1] + # but the internal grid is unmodified + assert len(rot.pids) == 14 + # and fallback implemented for unset external bases + assert np.all(rot.targetpids == rot.pids) + + def test_grids(self): + rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) + + # no check on correctness of value set + rot.inputgrid = interpolation.XGrid([0.1, 1]) + # but the internal grid is unmodified + assert len(rot.xgrid) == len(self.XGRID_TEST) + # and fallback implemented for unset external grids + assert np.all(rot.targetgrid == rot.xgrid) + + +def test_flavored_mu2grid(): + mugrid = list(range(0, 40, 5)) + masses = [10, 20, 30] + ratios = [1, 1, 1] + + flavored = rc.flavored_mugrid(mugrid, masses, ratios) + assert pytest.approx([flav for _, flav in flavored]) == [3, 3, 4, 4, 5, 5, 6, 6] diff --git a/tests/eko/io/test_struct.py b/tests/eko/io/test_struct.py index 6324af1e2..67b4d3347 100644 --- a/tests/eko/io/test_struct.py +++ b/tests/eko/io/test_struct.py @@ -6,7 +6,9 @@ import pytest import yaml -from eko import EKO, interpolation +from eko import EKO +from eko import basis_rotation as br +from eko import interpolation from eko.io import struct from tests.conftest import EKOFactory @@ -49,32 +51,27 @@ def test_load_error(self, monkeypatch): class TestRotations: def test_fallback(self): - pids = np.array([1, 2]) xg = interpolation.XGrid([0.1, 1.0]) - r = struct.Rotations(xgrid=xg, pids=pids) - np.testing.assert_allclose(r.pids, pids) - np.testing.assert_allclose(r.targetpids, pids) - np.testing.assert_allclose(r.inputpids, pids) + r = struct.Rotations(xgrid=xg) + np.testing.assert_allclose(r.targetpids, r.pids) + np.testing.assert_allclose(r.inputpids, r.pids) assert r.xgrid == xg assert r.targetgrid == xg assert r.inputgrid == xg def test_overwrite(self): - pids = np.array([1, 2]) - tpids = np.array([3, 4]) - ipids = np.array([5, 6]) + tpids = np.array([3, 4] + list(br.flavor_basis_pids[2:])) + ipids = np.array([5, 6] + list(br.flavor_basis_pids[2:])) xg = interpolation.XGrid([0.1, 1.0]) txg = interpolation.XGrid([0.2, 1.0]) ixg = interpolation.XGrid([0.3, 1.0]) r = struct.Rotations( xgrid=xg, - pids=pids, _targetgrid=txg, _inputgrid=ixg, _targetpids=tpids, _inputpids=ipids, ) - np.testing.assert_allclose(r.pids, pids) np.testing.assert_allclose(r.targetpids, tpids) np.testing.assert_allclose(r.inputpids, ipids) assert r.xgrid == xg @@ -82,11 +79,10 @@ def test_overwrite(self): assert r.inputgrid == ixg def test_init(self): - pids = np.array([1, 2]) xg = interpolation.XGrid([0.1, 1.0]) txg = np.array([0.2, 1.0]) ixg = {"grid": [0.3, 1.0], "log": True} - r = struct.Rotations(xgrid=xg, pids=pids, _targetgrid=txg, _inputgrid=ixg) + r = struct.Rotations(xgrid=xg, _targetgrid=txg, _inputgrid=ixg) assert isinstance(r.xgrid, interpolation.XGrid) assert isinstance(r.targetgrid, interpolation.XGrid) assert isinstance(r.inputgrid, interpolation.XGrid) @@ -117,14 +113,15 @@ def test_load_error(self, tmp_path): struct.EKO.read(no_tar_path) def test_properties(self, eko_factory: EKOFactory): - mugrid = np.array([10.0]) + mu = 10.0 + mugrid = [(mu, 5)] eko_factory.operator.mugrid = mugrid eko = eko_factory.get() - assert hasattr(eko.theory_card, "quark_masses") + assert hasattr(eko.theory_card.heavy, "masses") assert hasattr(eko.operator_card, "debug") - np.testing.assert_allclose(eko.mu2grid, mugrid**2) - assert mugrid[0] ** 2 in eko - default_grid = eko.operator_card.rotations.xgrid + np.testing.assert_allclose(eko.mu2grid, [mu**2]) + assert mu**2 in eko + default_grid = eko.operator_card.xgrid assert eko.xgrid == default_grid xg = interpolation.XGrid([0.1, 1.0]) eko.xgrid = xg @@ -140,7 +137,7 @@ def test_properties(self, eko_factory: EKOFactory): def test_ops(self, eko_factory: EKOFactory): mu = 10.0 mu2 = mu**2 - mugrid = np.array([mu]) + mugrid = [(mu, 5)] eko_factory.operator.mugrid = mugrid eko = eko_factory.get() v = np.random.rand(2, 2) @@ -176,7 +173,7 @@ def test_ops(self, eko_factory: EKOFactory): def test_copy(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): mu = 10.0 mu2 = mu**2 - mugrid = np.array([mu]) + mugrid = [(mu, 5)] eko_factory.operator.mugrid = mugrid eko1 = eko_factory.get() v = np.random.rand(2, 2) @@ -225,7 +222,7 @@ def test_items(self, eko_factory: EKOFactory): def test_iter(self, eko_factory): """Test managed iteration.""" - eko_factory.operator.mugrid = np.array([5.0, 20.0, 100.0]) + eko_factory.operator.mugrid = [(3.0, 4), (20.0, 5), (300.0, 6)] eko = eko_factory.get() mu2prev = None diff --git a/tests/eko/io/test_types.py b/tests/eko/io/test_types.py new file mode 100644 index 000000000..e8661084e --- /dev/null +++ b/tests/eko/io/test_types.py @@ -0,0 +1,18 @@ +from eko.io.types import ReferenceRunning + + +def test_reference_typed(): + Ref = ReferenceRunning[str] + + val = "ciao" + scale = 30.0 + r1 = Ref.typed(val, scale) + r2 = Ref([val, scale]) + + assert r1 == r2 + assert r1.value == r2.value == val + assert r1.scale == r2.scale == scale + + new_scale = 134.4323 + r2.scale = new_scale + assert r2.scale == new_scale == r2[1] diff --git a/tests/eko/kernels/test_kernels_QEDns.py b/tests/eko/kernels/test_kernels_QEDns.py index bb6208363..5fa48e12f 100644 --- a/tests/eko/kernels/test_kernels_QEDns.py +++ b/tests/eko/kernels/test_kernels_QEDns.py @@ -1,11 +1,11 @@ -import warnings - import numpy as np import pytest from eko import basis_rotation as br -from eko import beta +from eko.couplings import Couplings from eko.kernels import non_singlet_qed as ns +from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo +from eko.quantities.heavy_quarks import QuarkMassScheme from ekore.anomalous_dimensions.unpolarized import space_like as ad methods = [ @@ -112,24 +112,14 @@ def test_zero_true_gamma(): ) -from math import nan - -from eko.couplings import Couplings, couplings_mod_ev -from eko.io.types import ( - CouplingEvolutionMethod, - CouplingsRef, - EvolutionMethod, - MatchingScales, - QuarkMassSchemes, -) - alpharef = (0.118, 0.00781) masses = [m**2 for m in (2.0, 4.5, 175.0)] muref = 91.0 -couplings = CouplingsRef.from_dict( +couplings = CouplingsInfo.from_dict( dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], nan], + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, num_flavs_ref=5, max_num_flavs=6, ) @@ -154,7 +144,7 @@ def test_ode(): order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) a0 = sc.a_s(mu2_0) @@ -244,7 +234,7 @@ def test_ode_true_gamma(): order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) a0 = sc.a_s(mu2_0) diff --git a/tests/eko/quantities/test_couplings.py b/tests/eko/quantities/test_couplings.py new file mode 100644 index 000000000..0a3b97415 --- /dev/null +++ b/tests/eko/quantities/test_couplings.py @@ -0,0 +1,9 @@ +from eko.quantities.couplings import CouplingsInfo + + +def test_couplings_ref(): + scale = 90.0 + d = dict(alphas=0.1, alphaem=0.01, scale=scale, max_num_flavs=6, num_flavs_ref=None) + couplings = CouplingsInfo.from_dict(d) + assert couplings.scale == scale + assert not couplings.em_running diff --git a/tests/eko/quantities/test_heavy_quarks.py b/tests/eko/quantities/test_heavy_quarks.py new file mode 100644 index 000000000..e536e7524 --- /dev/null +++ b/tests/eko/quantities/test_heavy_quarks.py @@ -0,0 +1,31 @@ +from eko.quantities.heavy_quarks import ( + HeavyQuarkMasses, + HeavyQuarks, + MatchingRatios, + QuarkMassRef, +) + + +def test_heavy_quarks(): + Labels = HeavyQuarks[str] + + c = "charm" + b = "bottom" + t = "top" + + labels = Labels([c, b, t]) + assert labels.c == c + assert labels.b == b + assert labels.t == t + + new_quark = "fantastique" + labels.b = new_quark + assert labels.b == new_quark + + +def test_concrete_types(): + masses = HeavyQuarkMasses([QuarkMassRef([m, m]) for m in [1.0, 5.0, 100.0]]) + assert all(hqm.value == hqm.scale for hqm in masses) + + ratios = MatchingRatios([1.0, 2.0, 3.0]) + assert ratios.b == 2.0 diff --git a/tests/eko/runner/test_compute.py b/tests/eko/runner/test_compute.py new file mode 100644 index 000000000..b00a3e362 --- /dev/null +++ b/tests/eko/runner/test_compute.py @@ -0,0 +1,9 @@ +from eko.runner.compute import compute +from eko.runner.recipes import Recipe + + +def test_compute(): + recipe = Recipe("ciao") + part = compute(recipe) + + assert hasattr(part, "operator") diff --git a/tests/eko/runner/test_legacy.py b/tests/eko/runner/test_legacy.py index 82bf38cfc..6d4f0f88a 100644 --- a/tests/eko/runner/test_legacy.py +++ b/tests/eko/runner/test_legacy.py @@ -6,7 +6,8 @@ import eko from eko import EKO -from eko.io.types import QuarkMassSchemes +from eko.io.runcards import TheoryCard +from eko.quantities.heavy_quarks import QuarkMassScheme def test_raw(theory_card, operator_card, tmp_path): @@ -28,15 +29,15 @@ class FakeEM(enum.Enum): BLUB = "blub" path = tmp_path / "eko.tar" - theory_card.quark_masses_scheme = FakeEM.BLUB + theory_card.heavy.masses_scheme = FakeEM.BLUB with pytest.raises(ValueError, match="BLUB"): eko.runner.legacy.Runner(theory_card, operator_card, path=path) # MSbar scheme - theory_card.quark_masses_scheme = QuarkMassSchemes.MSBAR + theory_card.heavy.masses_scheme = QuarkMassScheme.MSBAR theory_card.couplings.num_flavs_ref = 5 - theory_card.quark_masses.c.scale = 2 - theory_card.quark_masses.b.scale = 4.5 - theory_card.quark_masses.t.scale = 173.07 + theory_card.heavy.masses.c.scale = 2 + theory_card.heavy.masses.b.scale = 4.5 + theory_card.heavy.masses.t.scale = 173.07 r = eko.runner.legacy.Runner(theory_card, operator_card, path=path) r.compute() with EKO.read(path) as eko_: @@ -49,7 +50,7 @@ def check_shapes(o, txs, ixs, theory_card, operators_card): op_shape = (tpids, len(txs), ipids, len(ixs)) # check output = input - np.testing.assert_allclose(o.xgrid.raw, operators_card.rotations.xgrid.raw) + np.testing.assert_allclose(o.xgrid.raw, operators_card.xgrid.raw) # targetgrid and inputgrid in the opcard are now ignored, we are testing this np.testing.assert_allclose( o.rotations.targetgrid.raw, @@ -68,10 +69,10 @@ def check_shapes(o, txs, ixs, theory_card, operators_card): def test_vfns(theory_ffns, operator_card, tmp_path): # change targetpids path = tmp_path / "eko.tar" - tc = theory_ffns(3) + tc: TheoryCard = theory_ffns(3) oc = copy.deepcopy(operator_card) - tc.matching.c = 1.0 - tc.matching.b = 1.0 + tc.heavy.matching_ratios.c = 1.0 + tc.heavy.matching_ratios.b = 1.0 tc.order = (2, 0) oc.debug.skip_non_singlet = False r = eko.runner.legacy.Runner(tc, oc, path=path) diff --git a/tests/eko/runner/test_recipes.py b/tests/eko/runner/test_recipes.py new file mode 100644 index 000000000..581ea3a8d --- /dev/null +++ b/tests/eko/runner/test_recipes.py @@ -0,0 +1,19 @@ +from pathlib import Path + +import pytest + +from eko import EKO +from eko.io.runcards import OperatorCard, TheoryCard + + +@pytest.fixture +def neweko(theory_card: TheoryCard, operator_card: OperatorCard, tmp_path: Path): + path = tmp_path / "eko.tar" + with EKO.create(path) as builder: + yield builder.load_cards(theory_card, operator_card).build() + + path.unlink(missing_ok=True) + + +def test_create(neweko: EKO): + pass diff --git a/tests/eko/test_couplings.py b/tests/eko/test_couplings.py index 01ece7682..14f543dfe 100644 --- a/tests/eko/test_couplings.py +++ b/tests/eko/test_couplings.py @@ -10,13 +10,9 @@ import pytest from eko.couplings import Couplings, compute_matching_coeffs_up, couplings_mod_ev -from eko.io.types import ( - CouplingEvolutionMethod, - CouplingsRef, - EvolutionMethod, - MatchingScales, - QuarkMassSchemes, -) +from eko.io.types import EvolutionMethod +from eko.quantities.couplings import CouplingEvolutionMethod, CouplingsInfo +from eko.quantities.heavy_quarks import MatchingScales, QuarkMassScheme masses = [m**2 for m in (2.0, 4.5, 175.0)] @@ -54,10 +50,11 @@ class TestCouplings: def test_init(self): alpharef = (0.118, 0.00781) muref = 91.0 - couplings = CouplingsRef.from_dict( + couplings = CouplingsInfo.from_dict( dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], nan], + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, num_flavs_ref=None, max_num_flavs=6, ) @@ -70,7 +67,7 @@ def test_init(self): order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) assert sc.q2_ref == muref**2 @@ -81,13 +78,13 @@ def test_init(self): # errors with pytest.raises(ValueError): coup1 = copy.deepcopy(couplings) - coup1.alphas.value = 0 + coup1.alphas = 0 Couplings( coup1, order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) with pytest.raises(NotImplementedError): @@ -96,29 +93,29 @@ def test_init(self): (0, 2), evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, - thresholds_ratios=MatchingScales(c=1.0, b=1.0, t=1.0), + hqm_scheme=QuarkMassScheme.POLE, + thresholds_ratios=MatchingScales([1.0, 1.0, 1.0]), ) with pytest.raises(ValueError): coup2 = copy.deepcopy(couplings) - coup2.alphaem.value = 0 + coup2.alphaem = 0 Couplings( coup2, order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) with pytest.raises(ValueError): coup3 = copy.deepcopy(couplings) - coup3.alphas.scale = 0 + coup3.scale = 0 Couplings( coup3, order, evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) with pytest.raises(NotImplementedError): @@ -127,7 +124,7 @@ def test_init(self): (6, 0), evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) with pytest.raises(NotImplementedError): @@ -136,7 +133,7 @@ def test_init(self): (1, 3), evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) with pytest.raises(ValueError): @@ -145,7 +142,7 @@ def test_init(self): (2, 0), FakeEM.BLUB, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -158,10 +155,11 @@ def test_ref(self): ] alpharef = (0.118, 0.00781) muref = 91.0 - couplings = CouplingsRef.from_dict( + couplings = CouplingsInfo.from_dict( dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], nan], + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, num_flavs_ref=None, max_num_flavs=6, ) @@ -178,7 +176,7 @@ def test_ref(self): (order_s, order_em), evmod, masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=thresh_setup, ) np.testing.assert_approx_equal( @@ -193,10 +191,11 @@ def test_ref_copy_e841b0dfdee2f31d9ccc1ecee4d9d1a6f6624313(self): thresh_setup = (1.51, 4.75, 175.0) alpharef = np.array([0.35, 0.00781]) muref = 2.0 - couplings = CouplingsRef.from_dict( + couplings = CouplingsInfo.from_dict( dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], nan], + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, num_flavs_ref=3, # reference nf is needed to force the matching max_num_flavs=6, ) @@ -206,7 +205,7 @@ def test_ref_copy_e841b0dfdee2f31d9ccc1ecee4d9d1a6f6624313(self): (4, 0), method=CouplingEvolutionMethod.EXACT, masses=masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=thresh_setup, ) np.testing.assert_allclose(sc.a_ref, np.array(alpharef) / (4.0 * np.pi)) @@ -229,10 +228,11 @@ def test_exact(self): for qed in range(2 + 1): for qedref in [muref, nan]: # testing both running and fixed alpha pto = (qcd, qed) - couplings = CouplingsRef.from_dict( + couplings = CouplingsInfo.from_dict( dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], qedref], + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, num_flavs_ref=None, max_num_flavs=6, ) @@ -242,7 +242,7 @@ def test_exact(self): pto, method=CouplingEvolutionMethod.EXPANDED, masses=masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=thresh_setup, ) sc_exact = Couplings( @@ -250,7 +250,7 @@ def test_exact(self): pto, method=CouplingEvolutionMethod.EXACT, masses=masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=thresh_setup, ) if pto in [(1, 0), (1, 1), (1, 2)]: @@ -261,7 +261,7 @@ def test_exact(self): # At LO (either QCD or QED LO) the exact and expanded # solutions coincide, while beyond LO they don't. # Therefore if the path is too long they start being different. - if q2 in [1, 1e1] and pto not in [(1, 0)]: + if q2 in [1, 1e1] and pto not in [(1, 0), (0, 1)]: continue np.testing.assert_allclose( sc_expanded.a(q2)[0], @@ -291,13 +291,12 @@ def benchmark_expanded_n3lo(self): # use a big alpha_s to enlarge the difference alpharef = np.array([0.9, 0.00781]) muref = 90.0 - couplings = CouplingsRef.from_dict( - dict( - alphas=[alpharef[0], muref], - alphaem=[alpharef[1], nan], - num_flavs_ref=None, - max_num_flavs=6, - ) + couplings = CouplingsInfo( + alphas=alpharef[0], + alphaem=alpharef[1], + scale=muref, + num_flavs_ref=None, + max_num_flavs=6, ) m2c = 2 m2b = 25 @@ -310,7 +309,7 @@ def benchmark_expanded_n3lo(self): order=(3, 0), method=CouplingEvolutionMethod.EXPANDED, masses=masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=threshold_list, ) as_N3LO = Couplings( @@ -318,7 +317,7 @@ def benchmark_expanded_n3lo(self): order=(4, 0), method=CouplingEvolutionMethod.EXPANDED, masses=masses, - hqm_scheme=QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=threshold_list, ) np.testing.assert_allclose( diff --git a/tests/eko/test_msbar_masses.py b/tests/eko/test_msbar_masses.py index a7857eda7..c35f4359c 100644 --- a/tests/eko/test_msbar_masses.py +++ b/tests/eko/test_msbar_masses.py @@ -1,6 +1,4 @@ -""" - Tests for the threshold class -""" +"""Tests for the threshold class.""" import numpy as np import pytest @@ -8,20 +6,23 @@ from eko.couplings import Couplings from eko.io import types from eko.io.runcards import TheoryCard +from eko.quantities.couplings import CouplingEvolutionMethod +from eko.quantities.heavy_quarks import HeavyQuarkMasses, QuarkMassRef, QuarkMassScheme @pytest.fixture() def theory_card(theory_card: TheoryCard): th = theory_card th.order = (3, 0) - th.couplings.alphas.value = 0.1180 - th.couplings.alphas.scale = 91.0 - th.couplings.alphaem.value = 0.00781 + th.couplings.alphas = 0.1180 + th.couplings.scale = 91.0 + th.couplings.alphaem = 0.00781 th.couplings.num_flavs_ref = 5 - for qname, qmass in zip("cbt", [(2.0, 2.1), (4.0, 4.1), (175.0, 174.9)]): - q = getattr(th.quark_masses, qname) - q.value, q.scale = qmass - th.quark_masses_scheme = types.QuarkMassSchemes.MSBAR + th.heavy.masses = HeavyQuarkMasses( + [QuarkMassRef(val) for val in [(2.0, 2.1), (4.0, 4.1), (175.0, 174.9)]] + ) + + th.heavy.masses_scheme = QuarkMassScheme.MSBAR return th @@ -31,30 +32,30 @@ def test_compute_msbar_mass(self, theory_card: TheoryCard): th = theory_card # Test solution of msbar(m) = m - for method in list(types.CouplingEvolutionMethod): + for method in list(CouplingEvolutionMethod): for order in [2, 3, 4]: theory_card.order = (order, 0) # compute the scale such msbar(m) = m m2_computed = msbar_masses.compute( - th.quark_masses, + th.heavy.masses, th.couplings, th.order, method, - np.power(list(iter(th.matching)), 2.0), + np.power(th.heavy.matching_ratios, 2.0), ) strong_coupling = Couplings( th.couplings, th.order, method, masses=m2_computed.tolist(), - hqm_scheme=types.QuarkMassSchemes.POLE, + hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) m2_test = [] for nf in [3, 4, 5]: hq = "cbt"[nf - 3] - mass = getattr(th.quark_masses, hq) + mass = getattr(th.heavy.masses, hq) # compute msbar( m ) m2_ref = mass.value**2 Q2m_ref = mass.scale**2 @@ -74,30 +75,30 @@ def test_compute_msbar_mass_VFNS(self, theory_card: TheoryCard): # not given in the target patch (Qmc, mc are in NF=5) th = theory_card th.order = (4, 0) - th.quark_masses.c.value = 2.0 - th.quark_masses.c.scale = 80.0 - th.quark_masses.b.value = 4.0 - th.quark_masses.b.scale = 85.0 + th.heavy.masses.c.value = 2.0 + th.heavy.masses.c.scale = 80.0 + th.heavy.masses.b.value = 4.0 + th.heavy.masses.b.scale = 85.0 # compute the scale such msbar(m) = m m2_computed = msbar_masses.compute( - th.quark_masses, + th.heavy.masses, th.couplings, th.order, - types.CouplingEvolutionMethod.EXPANDED, - np.power(list(iter(th.matching)), 2.0), + CouplingEvolutionMethod.EXPANDED, + np.power(th.heavy.matching_ratios, 2.0), ) strong_coupling = Couplings( th.couplings, th.order, - types.CouplingEvolutionMethod.EXPANDED, + CouplingEvolutionMethod.EXPANDED, m2_computed.tolist(), - hqm_scheme=types.QuarkMassSchemes.MSBAR, + hqm_scheme=QuarkMassScheme.MSBAR, thresholds_ratios=[1.0, 1.0, 1.0], ) m2_test = [] for nf in [3, 4, 5]: hq = "cbt"[nf - 3] - mass = getattr(th.quark_masses, hq) + mass = getattr(th.heavy.masses, hq) # compute msbar( m ) m2_ref = mass.value**2 Q2m_ref = mass.scale**2 @@ -117,41 +118,41 @@ def test_errors(self, theory_card: TheoryCard): def compute(theory: TheoryCard): msbar_masses.compute( - theory.quark_masses, + theory.heavy.masses, theory.couplings, theory.order, - types.CouplingEvolutionMethod.EXPANDED, - np.power(list(iter(theory.matching)), 2.0), + CouplingEvolutionMethod.EXPANDED, + np.power(theory.heavy.matching_ratios, 2.0), ) # test mass ordering with pytest.raises(ValueError, match="MSbar masses are not to be sorted"): - th.quark_masses.c.value = 1.1 - th.quark_masses.c.scale = 1.2 - th.quark_masses.b.value = 1.0 - th.quark_masses.b.scale = 1.0 + th.heavy.masses.c.value = 1.1 + th.heavy.masses.c.scale = 1.2 + th.heavy.masses.b.value = 1.0 + th.heavy.masses.b.scale = 1.0 compute(th) # test forward conditions on alphas_ref with pytest.raises(ValueError, match="should be lower than"): - th.quark_masses.b.scale = 91.0001 + th.heavy.masses.b.scale = 91.0001 compute(th) # test backward conditions on alphas_ref with pytest.raises(ValueError, match="should be greater than"): - th.quark_masses.t.scale = 89.9999 + th.heavy.masses.t.scale = 89.9999 compute(th) - th.quark_masses.b.scale = 4.0 - th.quark_masses.t.scale = 175.0 + th.heavy.masses.b.scale = 4.0 + th.heavy.masses.t.scale = 175.0 # test forward conditions on masses with pytest.raises(ValueError, match="should be lower than m"): - th.quark_masses.t.value = 174.0 + th.heavy.masses.t.value = 174.0 compute(th) # test backward conditions on masses with pytest.raises(ValueError, match="should be greater than m"): - th.quark_masses.b.value = 4.1 - th.quark_masses.t.value = 176.0 + th.heavy.masses.b.value = 4.1 + th.heavy.masses.t.value = 176.0 compute(th) diff --git a/tests/eko/test_thresholds.py b/tests/eko/test_thresholds.py index ea3a631e0..0e3d9f699 100644 --- a/tests/eko/test_thresholds.py +++ b/tests/eko/test_thresholds.py @@ -16,7 +16,7 @@ def test_tuple(self): d[p.tuple] = 1 assert d[p.tuple] == 1 - def test_repr(self): + def test_str(self): p = PathSegment(0, 1, 3) s = str(p) assert s.index("0") > 0 @@ -56,7 +56,7 @@ def test_nfref(self): assert p[1].q2_to == 1.5 assert p[1].nf == 4 - def test_repr(self): + def test_str(self): walls = [1.23, 9.87, 14.54] stc3 = str(ThresholdsAtlas(walls)) diff --git a/tests/ekobox/test_apply.py b/tests/ekobox/test_apply.py index 8f5ab48ef..8e980818f 100644 --- a/tests/ekobox/test_apply.py +++ b/tests/ekobox/test_apply.py @@ -6,7 +6,6 @@ class TestApply: def test_apply(self, eko_factory: EKOFactory, fake_pdf): - eko_factory.operator.rotations.pids = np.array([0, 1]) eko = eko_factory.get() mu2_out = eko.mu2grid[0] # fake pdfs @@ -14,10 +13,6 @@ def test_apply(self, eko_factory: EKOFactory, fake_pdf): assert len(pdf_grid) == len(eko.mu2grid) pdfs = pdf_grid[mu2_out]["pdfs"] assert list(pdfs.keys()) == list(eko.rotations.targetpids) - ref_pid1 = eko[mu2_out].operator[0, :, 1, :] @ np.ones(len(eko.xgrid)) - np.testing.assert_allclose(pdfs[0], ref_pid1) - ref_pid2 = eko[mu2_out].operator[1, :, 1, :] @ np.ones(len(eko.xgrid)) - np.testing.assert_allclose(pdfs[1], ref_pid2) # rotate to target_grid target_grid = [0.75] pdf_grid = apply.apply_pdf(eko, fake_pdf, target_grid) @@ -26,24 +21,21 @@ def test_apply(self, eko_factory: EKOFactory, fake_pdf): assert list(pdfs.keys()) == list(eko.rotations.targetpids) def test_apply_flavor(self, eko_factory: EKOFactory, fake_pdf, monkeypatch): - eko_factory.operator.rotations.pids = np.array([0, 1]) eko = eko_factory.get() - q2_out = eko.mu2grid[0] + mu2_out = eko.mu2grid[0] # fake pdfs monkeypatch.setattr( - "eko.basis_rotation.rotate_flavor_to_evolution", np.ones((2, 2)) + "eko.basis_rotation.rotate_flavor_to_evolution", np.ones((14, 14)) ) monkeypatch.setattr( "eko.basis_rotation.flavor_basis_pids", eko.rotations.targetpids, ) - fake_evol_basis = ("a", "b") + fake_evol_basis = tuple( + ["a", "b"] + [str(x) for x in range(len(eko.rotations.pids) - 2)] + ) monkeypatch.setattr("eko.basis_rotation.evol_basis", fake_evol_basis) pdf_grid = apply.apply_pdf(eko, fake_pdf, rotate_to_evolution_basis=True) assert len(pdf_grid) == len(eko.mu2grid) - pdfs = pdf_grid[q2_out]["pdfs"] + pdfs = pdf_grid[mu2_out]["pdfs"] assert list(pdfs.keys()) == list(fake_evol_basis) - ref_a = ( - eko[q2_out].operator[0, :, 1, :] + eko[q2_out].operator[1, :, 1, :] - ) @ np.ones(len(eko.rotations.xgrid)) - np.testing.assert_allclose(pdfs["a"], ref_a) diff --git a/tests/ekobox/test_cards.py b/tests/ekobox/test_cards.py index ac28c5f66..1a35e137e 100644 --- a/tests/ekobox/test_cards.py +++ b/tests/ekobox/test_cards.py @@ -1,5 +1,6 @@ import math +import numpy as np import pytest from ekobox import cards @@ -7,21 +8,25 @@ def test_generate_ocard(): mu0 = 1.65 - mu2grid = [10.0, 100.0] + mugrid = [(10.0, 6), (100.0, 5)] op = cards.example.operator() op.mu0 = mu0 - op.mu2grid = mu2grid - assert pytest.approx(op.mu2grid) == mu2grid + op.mugrid = mugrid + assert pytest.approx(op.mugrid) == mugrid + assert pytest.approx(op.mu2grid) == np.array([mu**2 for mu, _ in mugrid]) assert op.configs.interpolation_polynomial_degree == 4 - mu2grid1 = [100.0] + mugrid1 = [100.0, 5] op = cards.example.operator() op.mu0 = mu0 - op.mu2grid = mu2grid1 + op.mugrid = mugrid1 op.configs.interpolation_polynomial_degree = 2 op.configs.interpolation_is_log = False - assert op.mu2grid == mu2grid1 + assert op.mugrid == mugrid1 assert op.configs.interpolation_polynomial_degree == 2 assert op.configs.interpolation_is_log is False + rawo = cards.example.raw_operator() + assert isinstance(rawo, dict) + assert op.debug.skip_non_singlet == rawo["debug"]["skip_non_singlet"] def test_dump_load_op_card(tmp_path, cd): @@ -42,9 +47,12 @@ def test_dump_load_op_card(tmp_path, cd): def test_generate_theory_card(): theory = cards.example.theory() assert hasattr(theory, "order") - assert theory.quark_masses.t.value == 173.07 + assert theory.heavy.masses.t.value == 173.07 theory.order = (2, 0) assert theory.order[0] == 2 + rawt = cards.example.raw_theory() + assert isinstance(rawt, dict) + assert theory.heavy.num_flavs_init == rawt["heavy"]["num_flavs_init"] def containsnan(obj) -> bool: diff --git a/tests/ekobox/test_evol_pdf.py b/tests/ekobox/test_evol_pdf.py index 3bc4df82f..1c51dfdad 100644 --- a/tests/ekobox/test_evol_pdf.py +++ b/tests/ekobox/test_evol_pdf.py @@ -10,7 +10,7 @@ def init_cards(): op = cards.example.operator() op.mu0 = 1.65 - op.rotations.xgrid = XGrid([0.1, 0.5, 1.0]) + op.xgrid = XGrid([0.1, 0.5, 1.0]) op.configs.interpolation_polynomial_degree = 1 theory = cards.example.theory() theory.order = (1, 0) diff --git a/tests/ekobox/test_genpdf.py b/tests/ekobox/test_genpdf.py index 77488f0b7..d14c5dfe6 100644 --- a/tests/ekobox/test_genpdf.py +++ b/tests/ekobox/test_genpdf.py @@ -37,7 +37,7 @@ def test_generate_block(): pids = np.arange(3) b = genpdf.generate_block(lambda pid, x, q2: pid * x * q2, xg, q2s, pids) assert isinstance(b, dict) - assert sorted(b.keys()) == sorted(["data", "Q2grid", "xgrid", "pids"]) + assert sorted(b.keys()) == sorted(["data", "mu2grid", "xgrid", "pids"]) assert isinstance(b["data"], np.ndarray) assert b["data"].shape == (len(xg) * len(q2s), len(pids)) @@ -79,7 +79,7 @@ def test_generate_pdf_debug_pid(fake_lhapdf, cd): info_update={"Debug": "debug"}, install=True, xgrid=xg, - Q2grid=q2s, + mu2grid=q2s, ) pp = fake_lhapdf / n assert not p.exists() @@ -105,8 +105,8 @@ def test_generate_pdf_debug_pid(fake_lhapdf, cd): # the gluon is non-zero in the bulk - x=0 is included here if ( pid == 21 - and k > len(b["Q2grid"]) - 1 - and k < len(b["data"]) - len(b["Q2grid"]) + and k > len(b["mu2grid"]) - 1 + and k < len(b["data"]) - len(b["mu2grid"]) ): assert not f == 0.0 else: @@ -146,14 +146,14 @@ def test_generate_pdf_pdf_evol(fake_lhapdf, fake_nn31, fake_mstw, fake_ct14, cd) for pid, f in zip(b["pids"], line): # the singlet is non-zero in the bulk - x >= 0 if abs(pid) in range(1, 6 + 1) and k < len(b["data"]) - len( - b["Q2grid"] + b["mu2grid"] ): assert not f == 0.0 assert not np.isclose(f, 0.0, atol=1e-15) else: # MSTW is not 0 at the end, but only almost if err_type == "error" and k >= len(b["data"]) - len( - b["Q2grid"] + b["mu2grid"] ): np.testing.assert_allclose(f, 0.0, atol=1e-15) else: @@ -179,7 +179,7 @@ def test_generate_pdf_toy_antiqed(fake_lhapdf, cd): [anti_qed_singlet], "toy", xgrid=xg, - Q2grid=q2s, + mu2grid=q2s, ) assert p.exists() # check info file @@ -195,7 +195,7 @@ def test_generate_pdf_toy_antiqed(fake_lhapdf, cd): for k, line in enumerate(b["data"]): for pid, f in zip(b["pids"], line): # the u and d are non-zero in the bulk - x=0 is not included here - if abs(pid) in [1, 2] and k < len(b["data"]) - len(b["Q2grid"]): + if abs(pid) in [1, 2] and k < len(b["data"]) - len(b["mu2grid"]): assert not f == 0.0 else: assert f == 0.0 diff --git a/tests/ekobox/test_genpdf_export.py b/tests/ekobox/test_genpdf_export.py index 381080d88..facfe0179 100644 --- a/tests/ekobox/test_genpdf_export.py +++ b/tests/ekobox/test_genpdf_export.py @@ -44,7 +44,7 @@ def fake_blocks(n_blocks, n_x, n_q2, n_pids): bs.append( { "xgrid": np.linspace(0.0, 1.0, n_x), - "Q2grid": np.geomspace(1.0, 1e3, n_q2), + "mu2grid": np.geomspace(1.0, 1e3, n_q2), "pids": np.arange(n_pids), "data": np.random.rand(n_x * n_q2, n_pids), } diff --git a/tests/ekobox/test_genpdf_flavors.py b/tests/ekobox/test_genpdf_flavors.py index 39860b98e..e1f3d695e 100644 --- a/tests/ekobox/test_genpdf_flavors.py +++ b/tests/ekobox/test_genpdf_flavors.py @@ -36,7 +36,7 @@ def test_flavors_evol_to_flavor(): def test_flavors_evol_raw(): blocks = [ { - "Q2grid": np.array([1, 2]), + "mu2grid": np.array([1, 2]), "xgrid": np.array([0.1, 1.0]), "pids": np.array([-1, 21, 1]), "data": np.array([[0.1, 0.2, 0.1]] * 4), @@ -65,13 +65,13 @@ def test_flavors_evol_nodata(): # try with a block without data blocks = [ { - "Q2grid": np.array([1, 2]), + "mu2grid": np.array([1, 2]), "xgrid": np.array([0.1, 1.0]), "pids": np.array([-1, 21, 1]), "data": np.array([]), }, { - "Q2grid": np.array([1, 2]), + "mu2grid": np.array([1, 2]), "xgrid": np.array([0.1, 1.0]), "pids": np.array([-1, 21, 1]), "data": np.array([[0.1, 0.2, 0.1]] * 4), diff --git a/tests/ekobox/test_genpdf_load.py b/tests/ekobox/test_genpdf_load.py index ead1f4135..aab51cc17 100644 --- a/tests/ekobox/test_genpdf_load.py +++ b/tests/ekobox/test_genpdf_load.py @@ -15,7 +15,7 @@ def test_load_data_ct14(fake_ct14): assert len(blocks) == 1 b0 = blocks[0] assert isinstance(b0, dict) - assert sorted(b0.keys()) == sorted(["pids", "xgrid", "Q2grid", "data"]) + assert sorted(b0.keys()) == sorted(["pids", "xgrid", "mu2grid", "data"]) assert sorted(b0["pids"]) == sorted([-3, -2, -1, 21, 1, 2, 3]) assert len(b0["data"].T) == 7 np.testing.assert_allclose(b0["xgrid"][0], 1e-9) diff --git a/tests/ekobox/test_info_file.py b/tests/ekobox/test_info_file.py index fa2dee7c7..64af8eb93 100644 --- a/tests/ekobox/test_info_file.py +++ b/tests/ekobox/test_info_file.py @@ -8,10 +8,10 @@ def test_build(): theory = cards.example.theory() theory.order = (2, 0) - theory.couplings.alphas.value = 0.2 + theory.couplings.alphas = 0.2 op = cards.example.operator() op.mu0 = 1.0 - op.mu2grid = [10.0, 100.0] + op.mugrid = [(10.0, 5), (100.0, 5)] info = info_file.build( theory, op, 4, info_update={"SetDesc": "Prova", "NewArg": 15.3, "MTop": 1.0} ) @@ -19,7 +19,7 @@ def test_build(): assert info["SetDesc"] == "Prova" assert info["NewArg"] == 15.3 assert info["NumMembers"] == 4 - assert info["MTop"] == theory.quark_masses.t.value + assert info["MTop"] == theory.heavy.masses.t.value np.testing.assert_allclose(info["QMin"], math.sqrt(op.mu2grid[0]), rtol=1e-5) - assert info["XMin"] == op.rotations.xgrid.raw[0] - assert info["XMax"] == op.rotations.xgrid.raw[-1] == 1.0 + assert info["XMin"] == op.xgrid.raw[0] + assert info["XMax"] == op.xgrid.raw[-1] == 1.0 diff --git a/tests/ekobox/test_utils.py b/tests/ekobox/test_utils.py index 971ff2eb8..d4b896927 100644 --- a/tests/ekobox/test_utils.py +++ b/tests/ekobox/test_utils.py @@ -11,7 +11,7 @@ def test_ekos_product(tmp_path): # Generating two ekos mu01 = 5.0 - mu2grid1 = np.array([60.0, 80.0, 100.0]) + mugrid1 = [(1.0, 3), (8.0, 5), (10.0, 5)] xgrid = interpolation.XGrid([0.1, 0.5, 1.0]) theory = cards.example.theory() @@ -19,22 +19,25 @@ def test_ekos_product(tmp_path): op1 = cards.example.operator() op1.mu0 = mu01 - op1.mu2grid = mu2grid1 - op1.rotations.xgrid = xgrid + op1.mugrid = mugrid1 + op1.xgrid = xgrid op1.configs.interpolation_polynomial_degree = 1 - mu02 = 10.0 - mu2grid2 = np.array([80.0, 100.0, 120.0]) + mu0 = 10.0 + mugrid2 = [(8.0, 5), (10.0, 5), (12.0, 5)] op2 = cards.example.operator() - op2.mu0 = mu02 - op2.mu2grid = mu2grid2 - op2.rotations.xgrid = xgrid + op2.mu0 = mu0 + op2.mugrid = mugrid2 + op2.xgrid = xgrid op2.configs.interpolation_polynomial_degree = 1 op_err = copy.deepcopy(op2) op_err.mu0 = mu01 + mu2first = mugrid2[0][0] ** 2 + mu2last = mugrid2[-1][0] ** 2 + ini_path = tmp_path / "ini.tar" eko.solve(theory, op1, path=ini_path) fin_path = tmp_path / "fin.tar" @@ -49,14 +52,14 @@ def test_ekos_product(tmp_path): _ = utils.ekos_product(eko_ini, eko_fin_err) # product is copied res_path = tmp_path / "eko_res.tar" - eko_fin[120.0].error = None # drop one set of errors + eko_fin[mu2last].error = None # drop one set of errors utils.ekos_product(eko_ini, eko_fin, path=res_path) with EKO.read(res_path) as eko_res: assert eko_res.operator_card.mu20 == eko_ini.operator_card.mu20 np.testing.assert_allclose(eko_res.mu2grid[1:], eko_fin.mu2grid) np.testing.assert_allclose( - eko_ini[80.0].operator, eko_res[80.0].operator + eko_ini[mu2first].operator, eko_res[mu2first].operator ) # product overwrites initial @@ -64,8 +67,8 @@ def test_ekos_product(tmp_path): np.testing.assert_allclose(eko_ini.mu2grid[1:], eko_fin.mu2grid) np.testing.assert_allclose( - eko_res[80.0].operator, eko_ini[80.0].operator + eko_res[mu2first].operator, eko_ini[mu2first].operator ) utils.ekos_product(eko_ini, eko_fin) - assert eko_res[120.0].error is None + assert eko_res[mu2last].error is None