From 15f12cecced8b8a118eaff4a2424078969682fa6 Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 12:46:52 +0200 Subject: [PATCH 1/7] update python version in tests --- .github/workflows/python-package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 88891c7..c3faca4 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 From 0d085af1088c873c3aebce6f06f0df5f689a8b84 Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 15:23:09 +0200 Subject: [PATCH 2/7] added tests + improvements --- .github/workflows/python-package.yml | 2 +- corecon/DataEntryClass.py | 4 +- corecon/FieldClass.py | 13 +- .../Livermore_et_al_2017.py | 2 +- corecon/loaders.py | 7 +- test_import.py | 3 - tests/init.py | 2 + tests/test_DataEntryClass.py | 72 ++++++++ tests/test_FieldClass.py | 162 ++++++++++++++++++ tests/test_corecon.py | 28 +++ 10 files changed, 283 insertions(+), 12 deletions(-) delete mode 100644 test_import.py create mode 100644 tests/init.py create mode 100644 tests/test_DataEntryClass.py create mode 100644 tests/test_FieldClass.py create mode 100644 tests/test_corecon.py diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index c3faca4..957362f 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -37,4 +37,4 @@ jobs: # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | - pytest --disable-pytest-warnings + pytest --disable-pytest-warnings tests diff --git a/corecon/DataEntryClass.py b/corecon/DataEntryClass.py index 6af751a..675cba9 100644 --- a/corecon/DataEntryClass.py +++ b/corecon/DataEntryClass.py @@ -50,8 +50,9 @@ def __init__(self, self.extra_data.append(k) self.extra_data = np.array(self.extra_data) - #create named entries + #create named entries and dimensions_descriptors_internal setattr(self, parent_field, values[:]) + self.dimensions_descriptors_internal = [] for k, descr in enumerate(dimensions_descriptors): descr = descr.replace(" ", "_") descr = ''.join(ch if ch.isalnum() or ch=="_" else '' for ch in descr) @@ -59,6 +60,7 @@ def __init__(self, setattr(self, descr, axes) else: setattr(self, descr, axes[:,k]) + self.dimensions_descriptors_internal.append(descr) def __repr__(self): """string describing the class diff --git a/corecon/FieldClass.py b/corecon/FieldClass.py index a9d7efa..732df10 100644 --- a/corecon/FieldClass.py +++ b/corecon/FieldClass.py @@ -17,6 +17,10 @@ def __init__(self, *arg, **kw): self.field_description = None self.field_units = None self.field_remarks = None + + # return a copy of the item, to preserve the originally-loaded item + #def __getitem__(self, key): + # return copy.deepcopy(super().__getitem__(key)) def __str__(self): return super().__str__() @@ -92,7 +96,7 @@ def filter_by_redshift_range(self, zmin, zmax): dict_zslice.field_description = self.field_description for k in self.keys(): - if not 'redshift' in self[k].dimensions_descriptors: + if not 'redshift' in self[k].dimensions_descriptors_internal: print("WARNING: missing redshift dimension for entry %s. Skipping it."%(k)) continue @@ -106,7 +110,7 @@ def filter_by_redshift_range(self, zmin, zmax): dict_zslice[k].upper_lim = dict_zslice[k].upper_lim[w] dict_zslice[k].lower_lim = dict_zslice[k].lower_lim[w] #variables auto-created from "axes" - for dd in self[k].dimensions_descriptors: + for dd in self[k].dimensions_descriptors_internal: _temp = getattr(dict_zslice[k], dd) setattr(dict_zslice[k], dd, _temp[w]) #variables auto-created from"values" @@ -160,7 +164,7 @@ def get_lower_limits(self): dict_lls[k].upper_lim = dict_lls[k].upper_lim[self[k].lower_lim] dict_lls[k].lower_lim = dict_lls[k].lower_lim[self[k].lower_lim] #variables auto-created from "axes" - for dd in self[k].dimensions_descriptors: + for dd in self[k].dimensions_descriptors_internal: _temp = getattr(dict_lls[k], dd) setattr(dict_lls[k], dd, _temp[self[k].lower_lim]) #variables auto-created from"values" @@ -195,7 +199,7 @@ def get_upper_limits(self): dict_uls[k].upper_lim = dict_uls[k].upper_lim[self[k].upper_lim] dict_uls[k].lower_lim = dict_uls[k].lower_lim[self[k].upper_lim] #variables auto-created from "axes" - for dd in self[k].dimensions_descriptors: + for dd in self[k].dimensions_descriptors_internal: _temp = getattr(dict_uls[k], dd) setattr(dict_uls[k], dd, _temp[self[k].upper_lim]) #variables auto-created from"values" @@ -207,4 +211,3 @@ def get_upper_limits(self): setattr(dict_uls[k], e, _temp[self[k].upper_lim]) return dict_uls - diff --git a/corecon/data/UV_luminosity_function/Livermore_et_al_2017.py b/corecon/data/UV_luminosity_function/Livermore_et_al_2017.py index db72857..c09f9c8 100644 --- a/corecon/data/UV_luminosity_function/Livermore_et_al_2017.py +++ b/corecon/data/UV_luminosity_function/Livermore_et_al_2017.py @@ -1,4 +1,4 @@ -dictionary_tag = "Livermore et a. 2017" +dictionary_tag = "Livermore et al. 2017" reference = "Livermore, Finkelstein, Lotz; ApJ. 835, 113 (2017)" diff --git a/corecon/loaders.py b/corecon/loaders.py index b8da74c..b9647bf 100644 --- a/corecon/loaders.py +++ b/corecon/loaders.py @@ -50,7 +50,12 @@ def _expand_field(field, shape): extra_data = {} for k in local_var_dict.keys(): extra_data[k] = np.array(local_var_dict[k], dtype=object) - + #convert to float whenever possible + try: + extra_data[k] = extra_data[k].astype(np.float64) + except ValueError: + pass + #expand None's, True's, and False's (this will also convert them to array) err_up = _expand_field(err_up , values.shape) err_down = _expand_field(err_down , values.shape) diff --git a/test_import.py b/test_import.py deleted file mode 100644 index 5cecd72..0000000 --- a/test_import.py +++ /dev/null @@ -1,3 +0,0 @@ -def test_import(): - import corecon as crc - return True diff --git a/tests/init.py b/tests/init.py new file mode 100644 index 0000000..efce621 --- /dev/null +++ b/tests/init.py @@ -0,0 +1,2 @@ +import corecon as crc +import numpy as np diff --git a/tests/test_DataEntryClass.py b/tests/test_DataEntryClass.py new file mode 100644 index 0000000..8820b34 --- /dev/null +++ b/tests/test_DataEntryClass.py @@ -0,0 +1,72 @@ +def test_swap_limits(): + uvlf = crc.get("UVLF") + h23 = uvlf["Harikane et al. 2023"] + + h23.swap_limits() + assert np.all( h23.lower_lim == [True, True, False, False, False, False, True, True, False, False, False, False, True, False]), + "Problem detected in DataEntry.swap_limit" + +def test_swap_errors(): + uvlf = crc.get("UVLF") + b17 = uvlf["Bouwens et al. 2017"] + + b17.swap_errors() + assert np.all(b17.err_down == [0.30103 , 0.15970084, 0.19629465, 0.12493874, 0.09691001, 0.07918125, 0.28546222, 0.03817964, + 0.29459589, 0.34325599, 0.26787824, 0.35483287, 0.09089366, 0.78861996, 1.14378603, 1.58134989, 2.10047281] ), + "Problem detected in DataEntry.swap_errors" + +def test_nan_to_vals_all_fields(): + uvlf = crc.get("UVLF") + k22 = uvlf["Kauffmann et al. 2022"] + + k22.nan_to_value("all", 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + "Problem detected in DataEntry.nan_to_vals using 'all' as first argument" + assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]), + "Problem detected in DataEntry.nan_to_vals using 'all' as first argument" + +def test_nan_to_vals_one_field(): + uvlf = crc.get("UVLF") + k22 = uvlf["Kauffmann et al. 2022"] + + k22.nan_to_value("err_down", 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + "Problem detected in DataEntry.nan_to_vals using a single field as first argument" + +def test_nan_to_vals_list_fields(): + uvlf = crc.get("UVLF") + k22 = uvlf["Kauffmann et al. 2022"] + + k22.nan_to_value(["err_down", "err_up"], 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + "Problem detected in DataEntry.nan_to_vals using a list of fields as first argument" + assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]), + "Problem detected in DataEntry.nan_to_vals using a list of fields as first argument" + +def test_set_lim_errors(): + uvlf = crc.get("UVLF") + h23 = uvlf["Harikane et al. 2023"] + + h23.set_lim_errors(5, frac_of_values=False) + assert np.all(h23.err_up == [5.0000000e+00, 5.0000000e+00, 5.2569252e-01, 5.2542593e-01, 3.4821760e-02, 3.9757000e-03, + 5.0000000e+00, 5.0000000e+00, 5.2009033e-01, 3.6845013e-01, 2.9921665e-01, 3.7791135e-01, + 5.0000000e+00, 3.6835150e-01]), + "Problem detected in DataEntry.set_lim_errors using frac_of_values=False" + +def test_set_lim_errors_frac(): + uvlf = crc.get("UVLF") + h23 = uvlf["Harikane et al. 2023"] + + h23.set_lim_errors(0.5, frac_of_values=True) + assert np.all(h23.err_up == [-2.0790076 , -2.05760232, 0.52569252, 0.52542593, 0.03482176, 0.0039757 , -2.616422065, + -2.596910015, 0.52009033, 0.36845013, 0.29921665, 0.37791135, -2.808092315, 0.3683515 ]), + "Problem detected in DataEntry.set_lim_errors using frac_of_values=True" + +def test_list_attributes() + uvlf = crc.get("UVLF") + h23 = uvlf["Harikane et al. 2023"] + + assert np.all(h23.list_attributes() == ['ndim', 'description','reference','parent_field','url','dimensions_descriptors', + 'extracted','axes','values','err_up','err_down','upper_lim','lower_lim','extra_data', + 'err_right','err_left','UV_luminosity_function','redshift','M_UV']), + "Problem detected in DataEntry.list_attributes" diff --git a/tests/test_FieldClass.py b/tests/test_FieldClass.py new file mode 100644 index 0000000..d7889a6 --- /dev/null +++ b/tests/test_FieldClass.py @@ -0,0 +1,162 @@ +def test_get_all_references(): + uvlf = crc.get("UVLF") + + #We test that *at least* the following references are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + references = ['Ishigaki, Kawamata, Ouchi, Oguri, Shimasaku, Ono; ApJ. 854, 73 (2018)', + 'McLeod, Donnan, McLure, Dunlop, Magee, Begley, Carnall, et al.; MNRAS 527, 5004 (2024)', + 'Atek, Richard, Kneib, Schaerer; MNRAS 479, 5184 (2018)', + 'Oesch, Bouwens, Illingworth, Labbe, Smit, Franx, et al.; ApJ. 786, 108 (2014)', + 'Morishita, Trenti, Stiavelli, Bradley, Coe, et al.; ApJ. 867, 150 (2018)', + 'Perez-Gonzalez, Costantin, Langeroodi, Rinaldi, Annunziatella, et al.; ApJL 951, L1 (2023)', + 'Bouwens, Oesch, Labbe, Illingworth, Fazio, et al.; ApJ. 830, 67 (2016)', + 'Livermore, Finkelstein, Lotz; ApJ. 835, 113 (2017)', + 'Harikane, Ouchi, Oguri, Ono, Nakajima, Isobe, Umeda, Mawatari, Zhang; ApJS 265, 5 (2023)', + 'Atek, Richard, Kneib, Jauzac, Schaerer, Clement, et al.; ApJ 800, 18 (2015)', + 'Bouwens, Illingworth, Oesch, Naidu, van Leeuwen, Magee, MNRAS 523, 1009 (2023)', + 'McLeod, McLure, Dunlop; MNRAS 459, 3812 (2016)', + 'Donnan, McLeod, McLure, Dunlop, Carnall, Cullen, Magee; MNRAS 520, 4554 (2023)', + 'Finkelstein, Ryan, Papovich, Dickinson, Song, et al.; ApJ. 810, 71 (2015)', + 'Stefanon, Labbe, Bouwens, Oesch, Ashby, Caputi, et al.; ApJ. 883, 99 (2019)', + 'Bouwens, Illingworth, Oesch, Trenti, Labbe, et al.; ApJ. 803, 34 (2015)', + 'Rojas-Ruiz S., Finkelstein S. L., Bagley M. B., Stevans M., Finkelstein K. D., et al., 2020, ApJ, 891, 146', + 'McLure, Dunlop, Bowler, Curtis-Lake, Schenker, et al.; MNRAS 432, 2696 (2013)', + 'Bouwens, Oesch, Illingworth, Ellis, Stefanon; ApJ. 843, 129 (2017)', + 'Bowler, Jarvis, Dunlop, McLure, McLeod, et al.; MNRAS 493, 2059 (2020)', + 'Leung, Bagley, Finkelstein, Ferguson, Koekemoer, Perez-Gonzalez, et al.; ApJL 954, L46 (2023)', + 'Donnan, McLeod, Dunlop, McLure, Carnall, Begley, Cullen, et al.; MNRAS 518, 6011 (2023)', + 'Bouwens, Stefanon, Brammer, Oesch, Herard-Demanche, Illingworth, et al., MNRAS 523, 1936 (2023)', + 'Castellano, Dayal, Pentericci, Fontana, Hutter, et al.; ApJL. 818, L3 (2016)', + 'Oesch, Bouwens, Illingworth, Labbe, Stefanon; ApJ. 855, 105 (2018)', + 'Bouwens, Illingworth, Ellis, Oesch, Stefanon; ApJ 940, 55 (2022)', + 'Bowler, Dunlop, McLure, McCracken, Milvang-Jensen, et al.; MNRAS 452, 1817 (2015)' + ] + + uvlf_ref = uvlf.get_all_references() + + for r in references: + assert r in uvlf_ref, "Problem detected in Field.get_all_references" + + + + + + +def test_get_all_urls(): + uvlf = crc.get("UVLF") + + #We test that *at least* the following URLs are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + urls = ['https://iopscience.iop.org/article/10.3847/1538-4357/aaa544', + 'https://academic.oup.com/mnras/article/527/3/5004/7408621', + 'https://academic.oup.com/mnras/article/479/4/5184/5050078', + 'https://iopscience.iop.org/article/10.1088/0004-637X/786/2/108', + 'https://iopscience.iop.org/article/10.3847/1538-4357/aae68c', + 'https://iopscience.iop.org/article/10.3847/2041-8213/acd9d0/pdf', + 'https://iopscience.iop.org/article/10.3847/0004-637X/830/2/67', + 'https://iopscience.iop.org/article/10.3847/1538-4357/835/2/113', + 'https://iopscience.iop.org/article/10.3847/1538-4365/acaaa9', + 'https://ui.adsabs.harvard.edu/abs/2015ApJ...800...18A/abstract', + 'https://ui.adsabs.harvard.edu/abs/2023MNRAS.523.1009B/', + 'https://academic.oup.com/mnras/article/459/4/3812/2624050', + 'https://ui.adsabs.harvard.edu/abs/2023MNRAS.520.4554D/abstract', + 'https://iopscience.iop.org/article/10.1088/0004-637X/810/1/71', + 'https://iopscience.iop.org/article/10.3847/1538-4357/ab3792', + 'https://iopscience.iop.org/article/10.1088/0004-637X/803/1/34', + 'https://iopscience.iop.org/article/10.3847/1538-4357/ab7659', + 'https://academic.oup.com/mnras/article/432/4/2696/2907730', + 'https://iopscience.iop.org/article/10.3847/1538-4357/aa70a4', + 'https://academic.oup.com/mnras/article/493/2/2059/5721544', + 'https://iopscience.iop.org/article/10.3847/2041-8213/acf365/pdf', + 'https://academic.oup.com/mnras/article/518/4/6011/6849970', + 'https://ui.adsabs.harvard.edu/abs/2023MNRAS.523.1036B/abstract', + 'https://iopscience.iop.org/article/10.3847/2041-8205/818/1/L3', + 'https://iopscience.iop.org/article/10.3847/1538-4357/aab03f', + 'https://iopscience.iop.org/article/10.3847/1538-4357/ac86d1', + 'https://academic.oup.com/mnras/article/452/2/1817/1068199'] + + uvlf_urls = uvlf.get_all_urls() + + for u in urls: + assert u in uvlf_urls, "Problem detected in Field.get_all_urls" + + + + + + +def test_filter_by_redshift_range(): + uvlf = crc.get("UVLF") + + #We test that *at least* the following entries are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + entries = ['Ishigaki et al. 2018', 'McLeod et al. 2024', 'Oesch et al. 2014', + 'Morishita et al. 2018', 'Perez-Gonzalez et al. 2023', + 'Bouwens et al. 2016', 'Harikane et al. 2023', + 'Bouwens et al. 2023a', 'McLeod et al. 2016', + 'Donnan et al. 2023b', 'Stefanon et al. 2019', + 'Bouwens et al. 2015', 'Rojas-Ruiz et al. 2020', + 'Bowler et al. 2020', 'Leung et al. 2023', 'Donnan et al. 2023a', + 'Bouwens et al. 2023b', 'Oesch et al. 2018', 'Bouwens et al. 2022'] + + uvlf_zrange = uvlf.filter_by_redshift_range(9,13) + + for e in entries: + assert e in list(uvlf_zrange.keys()), "Problem detected in Field.filter_by_redshift_range (not all entries are returned)" + + #then test the slicing is correct + assert np.all(uvlf_zrange['Ishigaki et al. 2018'] == [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]), + "Problem detected in Field.filter_by_redshift_range (slicing of constraints is wrong)" + + + + + +def test_filter_by_extracted(): + + uvlf = crc.get("UVLF") + + #We test that *at least* the following entries are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + entries = ['Ishigaki et al. 2018', 'Atek et al. 2018', 'Livermore et al. 2017', 'Atek et al. 2015', 'McLeod et al. 2016', 'Castellano et al. 2015'] + + uvlf_ex = uvlf.filter_by_extracted(True) + + for e in entries: + assert e in list(uvlf_ex.keys()), "Problem detected in Field.filter_by_extracted (not all entries are returned)" + assert uvlf_ex[e].extracted == True, "Problem detected in Field.filter_by_extracted (returned wrong entry/slice)" + + + + +def test_get_upper_limits(): + xHII = crc.get("x_HII") + + #We test that *at least* the following entries are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + entries = ['Schenker et al. 2014','Sobacchi & Mesinger 2015', 'Schroeder et al. 2013', 'Mortlock et al. 2011', 'Mesinger et al. 2015', 'Mason et al. 2019', + 'Tilvi et al. 2014', 'Pentericci et al. 2014', 'Bosman et al. 2022', 'Yang et al. 2020b', 'Robertson et al. 2013', 'Hoag et al. 2019'] + + xHII_ul = xHII.get_upper_limits() + + for e in entries: + assert e in list(xHII_ul.keys()), "Problem detected in Field.get_upper_limits (not all entries are returned)" + assert np.all(xHII_ul[e].upper_lim), "Problem detected in Field.get_upper_limits (returned wrong entry/slice)" + + + + +def test_get_lower_limits(): + xHII = crc.get("x_HII") + + #We test that *at least* the following entries are returned. In this way, we do not have to update this test every time a + # new constraint is added (or a temporary one updated) + entries = ['Zhu et al. 2022', 'Ouchi et al. 2010', 'Chornock et al. 2013', 'Totani et al. 2006', 'Nakane et al. 2024', + 'Lu et al. 2020', 'McGreer et al. 2011','McGreer et al. 2015'] + + xHII_ll = xHII.get_lower_limits() + + for e in entries: + assert e in list(xHII_ll.keys()), "Problem detected in Field.get_lower_limits (not all entries are returned)" + assert np.all(xHII_ll[e].lower_lim), "Problem detected in Field.get_lower_limits (returned wrong entry/slice)" + diff --git a/tests/test_corecon.py b/tests/test_corecon.py new file mode 100644 index 0000000..4c7ee08 --- /dev/null +++ b/tests/test_corecon.py @@ -0,0 +1,28 @@ +def test_get_fields(): + + #We test that *at least* the following fields are returned. In this way, we do not have to update this test every time a + # new constraint field is added + fields = ['HII_fraction', 'HeIII_fraction', 'Lya_flux_power_spectrum', 'mean_free_path', 'effective_optical_depth_HI_Lya', + 'effective_optical_depth_HeII_Lya', 'HeII_to_HI_column_density_ratio',a'quasar_luminosity_function', + 'UV_luminosity_function','IGM_temperature_mean_density','optical_depth_CMB','sfrd', + 'Lya_spike_galaxy_correlation','mass_stellar_metallicity_relation','mass_gas_metallicity_relation', + 'galaxy_main_sequence','UV_slope','ionizing_photons_production_efficiency','HI_photoionization_rate', + 'ionizing_photons_emission_rate','reionization_midpoint','UV_luminosity_density'] + + + crc_fields = crc.get_fields() + + for f in fields: + assert f in crc_fields, "Problem detected in CoReCon.get_fields" + + + +def test_get_field_synonyms(): + #We test that *at least* the following synonyms are returned. In this way, we do not have to update this test every time a + # new one is added + synonyms = ['ionized_fraction', 'x_HII', 'f_HII', 'x_ion', 'f_ion', 'ion_frac'] + + crc_syn = crc.get_field_synonyms("HII_fraction") + for s in synonyms: + assert s in crc_syn, "Problem detected in CoReCon.get_field_synonym" + From c3d757946313e08202051ee007d5efdef1739e8f Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 15:44:21 +0200 Subject: [PATCH 3/7] fix in tests --- .gitignore | 2 +- tests/init.py | 2 -- tests/test_DataEntryClass.py | 38 ++++++++++++++++++++---------------- tests/test_FieldClass.py | 28 ++++++++++++++------------ tests/test_corecon.py | 12 +++++++----- 5 files changed, 44 insertions(+), 38 deletions(-) diff --git a/.gitignore b/.gitignore index 01f4c93..dcd1f74 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ HOWTO MANIFEST corecon/__init__.py.bkp time_of_last_update.dat - +tests/__pycache__ diff --git a/tests/init.py b/tests/init.py index efce621..e69de29 100644 --- a/tests/init.py +++ b/tests/init.py @@ -1,2 +0,0 @@ -import corecon as crc -import numpy as np diff --git a/tests/test_DataEntryClass.py b/tests/test_DataEntryClass.py index 8820b34..5befbd2 100644 --- a/tests/test_DataEntryClass.py +++ b/tests/test_DataEntryClass.py @@ -1,9 +1,13 @@ +import corecon as crc +import numpy as np + + def test_swap_limits(): uvlf = crc.get("UVLF") h23 = uvlf["Harikane et al. 2023"] h23.swap_limits() - assert np.all( h23.lower_lim == [True, True, False, False, False, False, True, True, False, False, False, False, True, False]), + assert np.all( h23.lower_lim == [True, True, False, False, False, False, True, True, False, False, False, False, True, False]),\ "Problem detected in DataEntry.swap_limit" def test_swap_errors(): @@ -12,35 +16,35 @@ def test_swap_errors(): b17.swap_errors() assert np.all(b17.err_down == [0.30103 , 0.15970084, 0.19629465, 0.12493874, 0.09691001, 0.07918125, 0.28546222, 0.03817964, - 0.29459589, 0.34325599, 0.26787824, 0.35483287, 0.09089366, 0.78861996, 1.14378603, 1.58134989, 2.10047281] ), + 0.29459589, 0.34325599, 0.26787824, 0.35483287, 0.09089366, 0.78861996, 1.14378603, 1.58134989, 2.10047281] ), \ "Problem detected in DataEntry.swap_errors" -def test_nan_to_vals_all_fields(): +def test_nan_to_values_all_fields(): uvlf = crc.get("UVLF") k22 = uvlf["Kauffmann et al. 2022"] - k22.nan_to_value("all", 1e-10) - assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + k22.nan_to_values("all", 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]),\ "Problem detected in DataEntry.nan_to_vals using 'all' as first argument" - assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]), + assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]),\ "Problem detected in DataEntry.nan_to_vals using 'all' as first argument" -def test_nan_to_vals_one_field(): +def test_nan_to_values_one_field(): uvlf = crc.get("UVLF") k22 = uvlf["Kauffmann et al. 2022"] - k22.nan_to_value("err_down", 1e-10) - assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + k22.nan_to_values("err_down", 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]),\ "Problem detected in DataEntry.nan_to_vals using a single field as first argument" -def test_nan_to_vals_list_fields(): +def test_nan_to_values_list_fields(): uvlf = crc.get("UVLF") k22 = uvlf["Kauffmann et al. 2022"] - k22.nan_to_value(["err_down", "err_up"], 1e-10) - assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]), + k22.nan_to_values(["err_down", "err_up"], 1e-10) + assert np.all(k22.err_down == [1.0000000e-10, 3.5139073e-01, 3.4584234e-01, 4.0866387e-01, 5.4104580e-01, 1.0000000e+02]),\ "Problem detected in DataEntry.nan_to_vals using a list of fields as first argument" - assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]), + assert np.all(k22.err_up == [1.0000000e-10, 1.9165904e-01, 1.9005692e-01, 2.0676008e-01, 2.3357745e-01, 3.4987856e-01]),\ "Problem detected in DataEntry.nan_to_vals using a list of fields as first argument" def test_set_lim_errors(): @@ -50,7 +54,7 @@ def test_set_lim_errors(): h23.set_lim_errors(5, frac_of_values=False) assert np.all(h23.err_up == [5.0000000e+00, 5.0000000e+00, 5.2569252e-01, 5.2542593e-01, 3.4821760e-02, 3.9757000e-03, 5.0000000e+00, 5.0000000e+00, 5.2009033e-01, 3.6845013e-01, 2.9921665e-01, 3.7791135e-01, - 5.0000000e+00, 3.6835150e-01]), + 5.0000000e+00, 3.6835150e-01]),\ "Problem detected in DataEntry.set_lim_errors using frac_of_values=False" def test_set_lim_errors_frac(): @@ -59,14 +63,14 @@ def test_set_lim_errors_frac(): h23.set_lim_errors(0.5, frac_of_values=True) assert np.all(h23.err_up == [-2.0790076 , -2.05760232, 0.52569252, 0.52542593, 0.03482176, 0.0039757 , -2.616422065, - -2.596910015, 0.52009033, 0.36845013, 0.29921665, 0.37791135, -2.808092315, 0.3683515 ]), + -2.596910015, 0.52009033, 0.36845013, 0.29921665, 0.37791135, -2.808092315, 0.3683515 ]),\ "Problem detected in DataEntry.set_lim_errors using frac_of_values=True" -def test_list_attributes() +def test_list_attributes(): uvlf = crc.get("UVLF") h23 = uvlf["Harikane et al. 2023"] assert np.all(h23.list_attributes() == ['ndim', 'description','reference','parent_field','url','dimensions_descriptors', 'extracted','axes','values','err_up','err_down','upper_lim','lower_lim','extra_data', - 'err_right','err_left','UV_luminosity_function','redshift','M_UV']), + 'err_right','err_left','UV_luminosity_function', 'dimensions_descriptors_internal','redshift','M_UV']),\ "Problem detected in DataEntry.list_attributes" diff --git a/tests/test_FieldClass.py b/tests/test_FieldClass.py index d7889a6..fa455ee 100644 --- a/tests/test_FieldClass.py +++ b/tests/test_FieldClass.py @@ -1,3 +1,7 @@ +import corecon as crc +import numpy as np + + def test_get_all_references(): uvlf = crc.get("UVLF") @@ -34,9 +38,7 @@ def test_get_all_references(): uvlf_ref = uvlf.get_all_references() - for r in references: - assert r in uvlf_ref, "Problem detected in Field.get_all_references" - + assert len(set(references).difference(uvlf_ref))==0, "Problem detected in Field.get_all_references" @@ -76,10 +78,8 @@ def test_get_all_urls(): 'https://academic.oup.com/mnras/article/452/2/1817/1068199'] uvlf_urls = uvlf.get_all_urls() - - for u in urls: - assert u in uvlf_urls, "Problem detected in Field.get_all_urls" - + + assert len(set(urls).difference(uvlf_urls))==0, "Problem detected in Field.get_all_urls" @@ -101,11 +101,10 @@ def test_filter_by_redshift_range(): uvlf_zrange = uvlf.filter_by_redshift_range(9,13) - for e in entries: - assert e in list(uvlf_zrange.keys()), "Problem detected in Field.filter_by_redshift_range (not all entries are returned)" + assert len(set(entries).difference(uvlf_zrange))==0, "Problem detected in Field.filter_by_redshift_range (not all entries are returned)" #then test the slicing is correct - assert np.all(uvlf_zrange['Ishigaki et al. 2018'] == [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]), + assert np.all(uvlf_zrange['Ishigaki et al. 2018'].redshift == [9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]), \ "Problem detected in Field.filter_by_redshift_range (slicing of constraints is wrong)" @@ -121,9 +120,10 @@ def test_filter_by_extracted(): entries = ['Ishigaki et al. 2018', 'Atek et al. 2018', 'Livermore et al. 2017', 'Atek et al. 2015', 'McLeod et al. 2016', 'Castellano et al. 2015'] uvlf_ex = uvlf.filter_by_extracted(True) + + assert len(set(entries).difference(uvlf_ex))==0, "Problem detected in Field.filter_by_extracted (not all entries are returned)" for e in entries: - assert e in list(uvlf_ex.keys()), "Problem detected in Field.filter_by_extracted (not all entries are returned)" assert uvlf_ex[e].extracted == True, "Problem detected in Field.filter_by_extracted (returned wrong entry/slice)" @@ -139,8 +139,9 @@ def test_get_upper_limits(): xHII_ul = xHII.get_upper_limits() + assert len(set(entries).difference(xHII_ul))==0, "Problem detected in Field.get_upper_limits (not all entries are returned)" + for e in entries: - assert e in list(xHII_ul.keys()), "Problem detected in Field.get_upper_limits (not all entries are returned)" assert np.all(xHII_ul[e].upper_lim), "Problem detected in Field.get_upper_limits (returned wrong entry/slice)" @@ -156,7 +157,8 @@ def test_get_lower_limits(): xHII_ll = xHII.get_lower_limits() + assert len(set(entries).difference(xHII_ll))==0, "Problem detected in Field.get_lower_limits (not all entries are returned)" + for e in entries: - assert e in list(xHII_ll.keys()), "Problem detected in Field.get_lower_limits (not all entries are returned)" assert np.all(xHII_ll[e].lower_lim), "Problem detected in Field.get_lower_limits (returned wrong entry/slice)" diff --git a/tests/test_corecon.py b/tests/test_corecon.py index 4c7ee08..54c902a 100644 --- a/tests/test_corecon.py +++ b/tests/test_corecon.py @@ -1,9 +1,12 @@ +import corecon as crc +import numpy as np + def test_get_fields(): #We test that *at least* the following fields are returned. In this way, we do not have to update this test every time a # new constraint field is added fields = ['HII_fraction', 'HeIII_fraction', 'Lya_flux_power_spectrum', 'mean_free_path', 'effective_optical_depth_HI_Lya', - 'effective_optical_depth_HeII_Lya', 'HeII_to_HI_column_density_ratio',a'quasar_luminosity_function', + 'effective_optical_depth_HeII_Lya', 'HeII_to_HI_column_density_ratio', 'quasar_luminosity_function', 'UV_luminosity_function','IGM_temperature_mean_density','optical_depth_CMB','sfrd', 'Lya_spike_galaxy_correlation','mass_stellar_metallicity_relation','mass_gas_metallicity_relation', 'galaxy_main_sequence','UV_slope','ionizing_photons_production_efficiency','HI_photoionization_rate', @@ -12,8 +15,7 @@ def test_get_fields(): crc_fields = crc.get_fields() - for f in fields: - assert f in crc_fields, "Problem detected in CoReCon.get_fields" + assert len(set(fields).difference(crc_fields))==0, "Problem detected in CoReCon.get_fields" @@ -23,6 +25,6 @@ def test_get_field_synonyms(): synonyms = ['ionized_fraction', 'x_HII', 'f_HII', 'x_ion', 'f_ion', 'ion_frac'] crc_syn = crc.get_field_synonyms("HII_fraction") - for s in synonyms: - assert s in crc_syn, "Problem detected in CoReCon.get_field_synonym" + + assert len(set(synonyms).difference(crc_syn))==0, "Problem detected in CoReCon.get_field_synonym" From 5bc309af9685c1427c9b460910101bf01a72814c Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 17:37:25 +0200 Subject: [PATCH 4/7] include better-tests in workflows --- .github/workflows/python-package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 957362f..977ecc5 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -5,7 +5,7 @@ name: Python package on: push: - branches: [ "master" ] + branches: [ "master", "better-tests" ] pull_request: branches: [ "master" ] From 834e3783f1f39c3579efc71201f5920c778fbf22 Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 17:54:49 +0200 Subject: [PATCH 5/7] fix? --- tests/{init.py => __init__.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{init.py => __init__.py} (100%) diff --git a/tests/init.py b/tests/__init__.py similarity index 100% rename from tests/init.py rename to tests/__init__.py From 1551a46e6a77045867619b017c08ae8a85971f0a Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 18:03:26 +0200 Subject: [PATCH 6/7] "[automated] updated data.zip" --- corecon/data/data.zip | Bin 870231 -> 870232 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/corecon/data/data.zip b/corecon/data/data.zip index 0f4500da5eed1b544ec5368a7a184af399031d2b..e73dfe5ea8544acd1e630787e4c4f137b268d66e 100644 GIT binary patch delta 6299 zcmZ8l3s_ZE7Csj^_nv#s*>HJZF7mj5q4EY5L=jLa6vaoH<|8xn?f9BRvq`aNG8NPk zw;C0envaAW&0A^@wMoZG4V`huSIIbz7!OIL3{Yn8wf4<9qu;`xfBkFiwbx$fhVr$=_#rvVRZUoOML?W? zNZwrI55ViAJ5xDu{p@2p;R)!dziFu%P`bl(47v6&Q-v3BTcb=*%7B_nkFGsc>jc`);IWsLQj?}D24c+1b|fV5D#5-X{Sl+6SzpI^N8NLa3Ltaue#F+%9hl1#iQba(o1!&>!UG*?=m-&Cg-1Dam|e5pcJ*nY#u1@^dRLGu4kt zlq7B`bjo%)iq2JuA5A!@`_RZ1WwPt|XXc+0AnMR9^K$G?dcFBW58!GW%`^|Z|PRCw2Bj$2YeSNF7}xq4(+QZd8izLw*N zVzMnqWa#dWzmzn#C?<(p4$Uo8dc5w)v$!02N{|)WmsY4a5RDkC=(x<}35tSf_GD$R zAL#6hlqM|Rx>$J^m)WyaIfSjBx?GWwo3m26j41y#>A}BphIQKYFwi9 zP30UqCzmTB82Zv?K;}x6xwH>atOV5tCbPR z+T&r{Fl2H*G)yt*Tmb3n9ZaHFx^V^-uYv^6*X)VY4tcrCMUYXDMSl4ZUZZua~Uqx@x4r>iY4B2h{5mEX+s~NB0 zoCDUaxQ}a}TKA)O!%=Gkjz;k})|ndQtFN(6!h8i6tqvTGnRV80@y?OnVqK3}PT#j$ z5nX&>&BCgJWYvybimJYW5uXRB$30;2r<)pDcQxB2so6$1{R*9HeeYf zU!93)>2UQL<{vj&RdK7+CaEtXS35=Bh>ch}O}!8R+b>+EifxTwp-v12?z2tmFs#j5 zq1y2hUN>uo^p!qe~L)fq-Hez)H{-D054oL3^-r*; z&z#W9u=+cv^?c+qYxFm9ALp*>AK_}tuj^g0&pYe%8qBh+UZ013sc+Vun8oM5J`E!1 zl6HL_Zvm}q*PjM3s6&9t4*e-$e(lf~0{Gnn{TTpfALx?+BtFz90w{HZ1~(|^)F*&z zcPEO=tptL~XOZ#TQT`1{Cwz;((?X#8anHB}axYBy+igD1@7afn)PCPM&RGmG$U7BU5xwHER`fEJ6W zaHK-8U3(OQUDB)q%yok^Zjh*o?UbryEabW62Dus;2TX+~n((Ve#sD)?CwNBtbm3Bo z2!GxUz92&5@D`xR8~aW#dJA!~k4U)64X*o$gjqH+8rIrq6PgB_*jJt}nF_AGzGNnV zAU`q#K(Qa04&XKek6vP${74Y}oxeDPBmSagQ2`=sZ2*}AVQm2-)O2wbg! zB79&Ffs2)H3=*eO8zjy$C0Ly0%fUkY*I+Ud!XxZtIDmzAq50emtRW)dcsJM^BBDA& zM4n*|5w*n)ZaRc3EmVY;gbL=o8$@;?BVf_rcOgRo9CriXFu^<k#-n4(0Ukx|0+t{c=x zi4h$VEoQzdnoNe>*kS}&6eC9Kyc?u;6|Rk4h3lRhjDAd94xcK&z%wLHW z!`8E#0Hxi;7~k$DCNrnIn9R-H#XR5bE;NNb#1hp#gv%O7o+gG@3OYYdwBuMD3H4}6 zr`r-p2>m6FM8W2|#52|}2ieMLY5*;^TVxvHMc|W+F5`YvKKeh7C*crP!`ZpRMbvOL zh*qVDfcOOF8af(X#iLb!$Q8j~^^+rZw4|}m0d8^d{fj>iu6&wSCXh@>eT~QeeG0~} zud~{yS1R$11ybO_VZC4WSV#(P6y`mEJtY*5a06&<~*d6G=Z< zuQ`#eSGeN0t=Q<5#|b=@>5wGm+P6t;C5gm972RrXp1Q!^P`LFlWPR{9O0Ojm_;RG3 zJb}HEvt1~AbMmF%hZ7yLoL{`=`#|=l#4X;C#kuQ$YI!$V?7iLhi~WggAKZ++E3QU; zT{4M;9cd|S)n5)E%ih571Yp$#Bf9KsG)odUc_!m#>{uh+kV^VM)n`)KQkHWV%HI0? z>3f0hb6j0^<+~6`;%2P+B+X4DeIb5D8jG)O5T)`#-Wn)&agDoHG^2~P$%i^r;_Kui z74LD-dlzddw;01NQd(6ytX1s_;|tpI(|_yCVE_NYE!vA7Jcx) z9lv)5>mfH|u?;l0H%WskJ4a5~{TZu}o3YArqS5_?t;4GXZO#WJQwFk~b2BDfXVYkM zpys5Z4|Vtep^kJ`Q$QflUFq2bvC+AlUS!v3nO%#9BZG|{<@5)k8vQa1VIvSWif-#i!swV$S}@!tpwung)ij2?!fEVNS^&Mq%7q!r7KgoyQJuH%@<+IC$TbQA^MWwVhn*>lLn*@XOM(g^SH<^rEq(R!c zo=XGi&>Ui=ld?q_3$odX*~_^rhz>5-{b=?8aiOkV*zfuMzAn$#)6ktV!>N-M8O9K%{E1`nc!wD*mL`DS*^4wM-*(zWeZN- zg;`Q_34AKjp`5+57uoQSbUWN0;H@^Y@NzQk96*9hZ_%%ENg5RRYE{}-Kbj$IW=XY(1*e&eM7jy*iM@g7)P#Z_Igu zJ%YIzA1$L!2yNyd5)B9T>>xIGH%Vb}=YLgjnix5)E qeLpmV&q=?1T)Yt6;k4&{pnC?5D3DoWBjm delta 6180 zcmY*d3tW^{7XOA}X1@8p@4_ID0Y+d3Bz*9G_~xbHf`lMtT0Tfk#oSEV@>wFKn`Ypn zg`0*OROUNmWt~=CP1#K|OUc&Vd_R^_d5EH|E0}xlId^ggen6rCSx?+?+fLhbTY-9#hgl{4h!v^yS>bjO3PN!3L-Em=AX+C=H(9z0&h#Dj_6Ie5J1%TCB-yO&{#21vCTYB7_~>;@wN z&KYC)!35~N6obD4Xh*tX4X%=!W#~eblxt{pfYvd`kcM{CUNeMXSKeG==#fENzTD7Z z0rX0hp&cVn+hpiKyQe-eRNFw?cE}JN2FP~FurwV|O{j4i`s(70*Iohcr#fR_s}J9| zWtF0%<4WYt+%uj+wC%p}UGx~-W1NYo?vb$_wd*`gSCD(nX!;0Iuwt5rsDhX}(QcW= zbQIBOUy~CMof%?^q6fW+AALSU@pkSGG2PWcLfL_)WoUdX*z^G+W}Jp-OPHw#(Wl`i zCvL*?QKsv-WZnSNGoCOu-;N%nuJn|om7a8MvMI)SKF;(hTCYepy{MOY>q;gpsG;v9 zo06RU2b&TxLh&%u5k!5`P5TYd;>}+zZ|{)}X z!tt_(%M?$PWkfF)%R733UEnKn2PSWRP5vV;vwfkw7yCbbscb-QPPu#*QO*ka40uu9{Hg_PI)s%Zr?B4FnRT`EQX@HNv=cfhsWeV^w{^c z{3degr{wX-Z8$9lAo|l8c?(9FbYA`jx#-KTihjB3%Ej3tiy@nILly%X*CwCFObxf> zAJF&89eJ!?pFgS9newxI4msZk@+Gvp*W;@E#v^$JYD@LzD$LI;n~U&xPVqLcSqI(d ztTn%d+WDK!&FC?DoB1Z9LA%UGJjNM&%p0+e3!j@CP`mDcxeZ5S*_Y-L72>rvnD#>gjo zD}HGAqeD4|XJYhv{i0hINxEgBw;f6dZF@pVugWm@aTZ1?yWob|J6P$B7gkt?(uON~ z#z^_4OdgFE&^G zFK%G}BK2GJ=pLs|6x;Tknu=ZCP^_Ny0gw5m>fcbCI$zC0w0f~xj>x-A{XKfTzg)fk zGI+doS6zpk^?`Z@*X{Jxj^R%3AE0$%w;PhQCLH($>6&2-_?A^@%k+d_zSr)pEoouh zZlRBAv@{nr2ytW)TRJ9`B0k-Anq4!5`an$*wYqmcQH>s(k6mx`y({2QV2Lzy-1$pimIao z4I*^}gDiJ}YdSQc86Lvsj~>G2p$Ex@u(^7Y4`73y*zekL*%m}WWIV*V?gArJG69%Ps%XLkm5c+XNF%sMpJ_s+65&3>1r8BmWAhSVffx3j z{?khsCwPm1t6boUw+NVKAy30vOl70HEo3NsYPX2uM){EG!0hxPB>=3w$SeTmy~s=e zHyL>RU~{V%37~U*#YP`-zA%{YC%l{Ye(6e)bpcSpftNFs%v@ zTXQu)Y-plYZ0J&}Fg{@=W5L}XND2VF5-4mAx_}ZS0#0;+ok8eT9W26(31&|0S%q%o zfS;8gU8-c8@LpjP#;0At(VG;4@vFVblK`4rpjU`sriKWw`Vf(zE>x%rUEo6(=m-@v zI3!FMuMQKXp9>RdMB7DI7THDQqb}g@5MHw!%!WOb&_;)_@dy`jio%8ahc3_-F6J~Z zLJWR=1Sy7ceEJCRyFOyRPP;%-q)=5w3e`Oq825xY6MLT!(-IaXjLW0M;Gd5Y!8gz+)JViD9Ago<`O57!nr7bBzz% z+?B*4a2ep1>jYXCBaZ4t?)v>y?%JOk6GZ|=m75N&WUt*^;@w>5%)nJxL6MMajY3-v zvlY0w=bE_~O;5y%j@*uAYvq?A%PR1p7e*0yf}?qHOtrHbRadK(Kw72{4HoFz+;Z$R z`^x3g-z}G(577Q%9D%pb>Ub8!T8m_Ut>Q;7g^FfB%T@NRzb@^7(A_=<8)8}BT--ZU zR*$AP;)xx0Q%hh0e%^yDdrI@ACyhjdKF=%cy{p|QNnE1c5`32~OCZC*{~-5oI`V4* z7V4QOb}uNAsX~sTsvwc{f!z8WA9&_FRx%g&vfGZMX_`Utru*5J!2;j%D8`ehVh@47 z^pII}u;!i0bEkqNiHm!JUw-$03H>&Sz{3??mc&9`ZbKDJ?mn|nUF52zov32X@uoI~ z_&E5uMSC=QZDoz*(#Ne0@%I!u=AIG>ErVoNB@>%I_<=$PKT!Hqr-@BE%h~sO6xz_E zL_OZS*c4*Zb&ELd!V zZ4je<{efl$$B5Ei;&f@CO4kIc5wJ55zA&3TU+o3mgScLAbVm+kNpNvbnqPBOtLfD7EV{C68L)@ z-Nor$1AtDTJ*i@&q4ot=4W6=(fvZk6T>cQf{*boga_dzPfWZc(15 zSJKHKDAkt1{C}#!sMkk`sIxh{pcd2Gm_e)%urcdJT{zoiF79c0&{G*C4swrfOGzHf z^5)`R=CrLCFDaA2dmYWsWUF-SKsIcr7D!(YBN{wz+Q*!YuO3MPbnEHiOp*kH_SR=f zU);*cIUK&B*Rg-pedf4ZW zX)_%(N5=Qr#Tgp2!)KDG%> zhh>on2rxB^1-N>gv(b+yXCG%{n$fl+i}VBA;B3YoIj_;n=e0O6hSf&5ShBCNmAFK? z?Q-d5jIcdhH05g^VZ(Ke+V8UnXG6u!6+Md4FSP<~r#o7;|GT*=x$a^o=p6K$QQ`}? elhYpefldx6%rmfWS4ld@et*c5q>^rpefl57cgy<# From 9082c05da8e3285fe431faa56689a275f3ca3a7e Mon Sep 17 00:00:00 2001 From: Enrico Garaldi Date: Tue, 3 Sep 2024 18:05:52 +0200 Subject: [PATCH 7/7] updated test --- tests/test_FieldClass.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_FieldClass.py b/tests/test_FieldClass.py index fa455ee..f198fae 100644 --- a/tests/test_FieldClass.py +++ b/tests/test_FieldClass.py @@ -117,7 +117,7 @@ def test_filter_by_extracted(): #We test that *at least* the following entries are returned. In this way, we do not have to update this test every time a # new constraint is added (or a temporary one updated) - entries = ['Ishigaki et al. 2018', 'Atek et al. 2018', 'Livermore et al. 2017', 'Atek et al. 2015', 'McLeod et al. 2016', 'Castellano et al. 2015'] + entries = ['Ishigaki et al. 2018', 'Atek et al. 2018', 'Atek et al. 2015', 'McLeod et al. 2016', 'Castellano et al. 2015'] uvlf_ex = uvlf.filter_by_extracted(True)