From 59f93c4f080f38d7d930bc6d70a8f992cc3598cc Mon Sep 17 00:00:00 2001 From: sbradnam Date: Thu, 31 Oct 2024 14:33:52 +0000 Subject: [PATCH 01/53] Placholder _get_tally_data function --- jade/openmc.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/jade/openmc.py b/jade/openmc.py index 14647833..1dfef529 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -215,6 +215,44 @@ def read_openmc_version(self) -> str: version = ".".join(map(str, self.statepoint.version)) return version + def _get_tally_data(self, rows: list, name: str): + """Extract tally data from statepoint file + + Parameters + ---------- + rows : list + list of rows to append tally data to + name : str + tally name + + Returns + ------- + list + list of rows with tally data appended + """ + tally = self.statepoint.get_tally(name=name) + tally_n = tally.id + tally_description = tally.name.title() + energy_bins = tally.find_filter(openmc.EnergyFilter).values[1:] + fluxes = tally.mean.flatten() + errors = tally.std_dev.flatten() + for energy, flux, error in zip(energy_bins, fluxes, errors): + rows.append([tally_n, tally_description, energy, flux, error]) + return rows + + def tally_to_rows(self): + """Call to extract tally data from statepoint file + + Returns + ------- + list + list of rows with all sphere case tally data + """ + rows = [] + for t in self.statepoint.tallies.keys(): + rows = self._get_tally_data(rows, t.name) + return rows + class OpenMCSphereSimOutput(OpenMCSimOutput): def __init__(self, spfile_path: str) -> None: From 9b2f89f3ca402ca6986d1f522b97f6022781a79c Mon Sep 17 00:00:00 2001 From: sbradnam Date: Thu, 31 Oct 2024 16:29:53 +0000 Subject: [PATCH 02/53] First pass omc single library --- jade/openmc.py | 49 ++++++++++++++++++------- jade/output.py | 98 +++++++++++++++++++++++++++----------------------- 2 files changed, 90 insertions(+), 57 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 069b70a8..6b79e75e 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -274,30 +274,55 @@ def read_openmc_version(self) -> str: version = ".".join(map(str, self.statepoint.version)) return version - def _get_tally_data(self, rows: list, name: str): + def _get_filters(self, tally: openmc.Tally) -> list: + """ + + Args: + tally (openmc.Tally): openmc tally object + + Returns: + filters (dict): list of contained openmc filters + """ + filters = [] + if tally.contains_filter(openmc.CellFilter): + filters.append(openmc.CellFilter) + if tally.contains_filter(openmc.SurfaceFilter): + filters.append(openmc.SurfaceFilter) + if tally.contains_filter(openmc.EnergyFilter): + filters.append(openmc.EnergyFilter) + if tally.contains_filter(openmc.TimeFilter): + filters.append(openmc.TimeFilter) + return filters + + + def _get_tally_data(self, rows: list, tally: openmc.Tally): """Extract tally data from statepoint file Parameters ---------- rows : list list of rows to append tally data to - name : str - tally name + tally : openmc.Tally + openmc tally Returns ------- list list of rows with tally data appended """ - tally = self.statepoint.get_tally(name=name) + print(tally) + df = tally.get_pandas_dataframe() + print(df) tally_n = tally.id tally_description = tally.name.title() - energy_bins = tally.find_filter(openmc.EnergyFilter).values[1:] - fluxes = tally.mean.flatten() - errors = tally.std_dev.flatten() - for energy, flux, error in zip(energy_bins, fluxes, errors): - rows.append([tally_n, tally_description, energy, flux, error]) - return rows + filters = self._get_filters(tally) + print(tally_n) + print(tally_description) + print('Filters:', filters) + values = tally.get_values(filters=filters, value='mean') + errors = tally.get_values(value='std_dev') + print('Values:', values) + print('Errors:', errors) def tally_to_rows(self): """Call to extract tally data from statepoint file @@ -308,8 +333,8 @@ def tally_to_rows(self): list of rows with all sphere case tally data """ rows = [] - for t in self.statepoint.tallies.keys(): - rows = self._get_tally_data(rows, t.name) + for tally_n in sorted(self.statepoint.tallies.keys()): + rows = self._get_tally_data(rows, self.statepoint.tallies[tally_n]) return rows diff --git a/jade/output.py b/jade/output.py index 60aad213..9f8ad002 100644 --- a/jade/output.py +++ b/jade/output.py @@ -652,6 +652,11 @@ def _generate_single_excel_output(self): # Open the excel file # name = "Generic_single.xlsx" # template = os.path.join(os.getcwd(), "templates", name) + + if self.openmc: + results_path = os.path.join(self.test_path, self.code) + _, outfile = self._get_output_files(results_path, "openmc") + openmc_output = OpenMCOutput(outfile) if self.mcnp or self.d1s: outpath = os.path.join( @@ -843,6 +848,7 @@ def _generate_single_excel_output(self): def _print_raw(self): for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, str(key) + ".csv") + print(file) data.to_csv(file, header=True, index=False) metadata_file = os.path.join(self.raw_path, "metadata.json") @@ -1233,51 +1239,53 @@ def read(self, output_file): return output_file_data def process_tally(self): - tallydata = {} - totalbin = {} - rows = [] - for line in self.output_file_data: - if "tally" in line.lower(): - if len(rows) > 0: - tallydata[tallynum], totalbin[tallynum] = self._create_dataframe( - rows - ) - rows = [] - parts = line.split() - tallynum = int(parts[2].replace(":", "")) - cells = False - user = False - segments = False - cosine = False - energy = False - time = False - cor_c = False - cor_b = False - cor_a = False - value = False - error = False - if "incoming energy" in line.lower(): - parts = line.split() - energy = 1e-6 * float(parts[3].replace(")", "")) - if "flux" in line.lower(): - parts = line.split() - value, error = float(parts[1]), float(parts[2]) - rows.append( - [ - cells, - user, - segments, - cosine, - energy, - time, - cor_c, - cor_b, - cor_a, - value, - error, - ] - ) - tallydata[tallynum], totalbin[tallynum] = self._create_dataframe(rows) + #tallydata = {} + #totalbin = {} + #rows = [] + #for line in self.output_file_data: + # if "tally" in line.lower(): + # if len(rows) > 0: + # tallydata[tallynum], totalbin[tallynum] = self._create_dataframe( + # rows + # ) + # rows = [] + # parts = line.split() + # tallynum = int(parts[2].replace(":", "")) + # cells = False + # user = False + # segments = False + # cosine = False + # energy = False + # time = False + # cor_c = False + # cor_b = False + # cor_a = False + # value = False + # error = False + # if "incoming energy" in line.lower(): + # parts = line.split() + # energy = 1e-6 * float(parts[3].replace(")", "")) + # if "flux" in line.lower(): + # parts = line.split() + # value, error = float(parts[1]), float(parts[2]) + # rows.append( + # [ + # cells, + # user, + # segments, + # cosine, + # energy, + # time, + # cor_c, + # cor_b, + # cor_a, + # value, + # error, + # ] + # ) + # + rows = self.output.tally_to_rows() + tallydata, totalbin = self._create_dataframe(rows) return tallydata, totalbin From 4279c9ef0e99277f7ff2dae41bf6d025e43281cc Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 11:33:16 +0000 Subject: [PATCH 03/53] Sort output from omc dataframes --- jade/openmc.py | 53 ++++--------------- jade/output.py | 138 +++++++++++++++++-------------------------------- 2 files changed, 58 insertions(+), 133 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 6b79e75e..09396c8c 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -274,57 +274,24 @@ def read_openmc_version(self) -> str: version = ".".join(map(str, self.statepoint.version)) return version - def _get_filters(self, tally: openmc.Tally) -> list: - """ - - Args: - tally (openmc.Tally): openmc tally object - - Returns: - filters (dict): list of contained openmc filters - """ - filters = [] - if tally.contains_filter(openmc.CellFilter): - filters.append(openmc.CellFilter) - if tally.contains_filter(openmc.SurfaceFilter): - filters.append(openmc.SurfaceFilter) - if tally.contains_filter(openmc.EnergyFilter): - filters.append(openmc.EnergyFilter) - if tally.contains_filter(openmc.TimeFilter): - filters.append(openmc.TimeFilter) - return filters - - - def _get_tally_data(self, rows: list, tally: openmc.Tally): + def _get_tally_data(self, tally: openmc.Tally): """Extract tally data from statepoint file Parameters ---------- - rows : list - list of rows to append tally data to tally : openmc.Tally openmc tally Returns ------- - list - list of rows with tally data appended + df : pd.DataFrame + pandas dataframe containing tally data """ - print(tally) df = tally.get_pandas_dataframe() - print(df) - tally_n = tally.id - tally_description = tally.name.title() - filters = self._get_filters(tally) - print(tally_n) - print(tally_description) - print('Filters:', filters) - values = tally.get_values(filters=filters, value='mean') - errors = tally.get_values(value='std_dev') - print('Values:', values) - print('Errors:', errors) + #df.to_csv('tally_'+str(tally.id)+'.csv') + return df - def tally_to_rows(self): + def tallies_to_dataframes(self): """Call to extract tally data from statepoint file Returns @@ -332,10 +299,10 @@ def tally_to_rows(self): list list of rows with all sphere case tally data """ - rows = [] - for tally_n in sorted(self.statepoint.tallies.keys()): - rows = self._get_tally_data(rows, self.statepoint.tallies[tally_n]) - return rows + tallies = {} + for _, tally in self.statepoint.tallies.items(): + tallies[tally.id] = self._get_tally_data(tally) + return tallies class OpenMCSphereSimOutput(OpenMCSimOutput): diff --git a/jade/output.py b/jade/output.py index 9f8ad002..9df173f7 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1196,96 +1196,54 @@ def __init__(self, output_path): self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None - def _create_dataframe(self, rows): - columns = [ - "Cells", - "User", - "Segments", - "Cosine", - "Energy", - "Time", - "Cor C", - "Cor B", - "Cor A", - "Value", - "Error", - ] - df = pd.DataFrame(rows, columns=columns) - cells = list(df.Cells.unique()) - total = "Energy" - for cell in cells: - value = df.loc[df["Cells"] == cell, "Values"].sum() - error = np.sqrt(sum(df.loc[df["Cells"] == cell, "Values"] ** 2)) - row = [ - cell, - False, - False, - False, - total, - False, - False, - False, - False, - value, - error, - ] - df.loc[len(df)] = row - dftotal = df[df[total] == "total"] - return df, dftotal - - def read(self, output_file): - with open(output_file, "r") as f: - output_file_data = f.readlines() - return output_file_data - - def process_tally(self): - #tallydata = {} - #totalbin = {} - #rows = [] - #for line in self.output_file_data: - # if "tally" in line.lower(): - # if len(rows) > 0: - # tallydata[tallynum], totalbin[tallynum] = self._create_dataframe( - # rows - # ) - # rows = [] - # parts = line.split() - # tallynum = int(parts[2].replace(":", "")) - # cells = False - # user = False - # segments = False - # cosine = False - # energy = False - # time = False - # cor_c = False - # cor_b = False - # cor_a = False - # value = False - # error = False - # if "incoming energy" in line.lower(): - # parts = line.split() - # energy = 1e-6 * float(parts[3].replace(")", "")) - # if "flux" in line.lower(): - # parts = line.split() - # value, error = float(parts[1]), float(parts[2]) - # rows.append( - # [ - # cells, - # user, - # segments, - # cosine, - # energy, - # time, - # cor_c, - # cor_b, - # cor_a, - # value, - # error, - # ] - # ) - # - rows = self.output.tally_to_rows() - tallydata, totalbin = self._create_dataframe(rows) + def _create_dataframes(self, tallies): + tallydata = {} + totalbin = {} + filter_lookup = {'cell': "Cells-Segments", + 'surface' : "Cells-Segments", + 'energy high [eV]' : 'Energy', + 'time' : 'Time', + 'mean' : 'Value', + 'std. dev.' : 'Error'} + columns = ["Cells-Segments", + "User", + "Cosine", + "Energy", + "Time", + "Cor C", + "Cor B", + "Cor A", + "Value", + "Error"] + for id, tally in tallies.items(): + filters = [] + new_columns = {} + if 'cell' in tally.columns: + filters.append('cell') + if 'surface' in tally.columns: + filters.append('surface') + if 'energy high [eV]' in tally.columns: + filters.append('energy high [eV]') + if 'time' in tally.columns: + filters.append('time') + new_columns = dict((k, filter_lookup[k]) for k in filters if k in filter_lookup) + new_columns['mean'] = filter_lookup['mean'] + new_columns['std. dev.'] = filter_lookup['std. dev.'] + sorted_tally = tally.sort_values(filters) + sorted_tally = sorted_tally.reset_index(drop=True) + sorted_tally = sorted_tally.rename(columns=new_columns) + for column in columns: + if column not in sorted_tally.columns: + sorted_tally[column] = np.nan + sorted_tally = sorted_tally[columns] + #sorted_tally.to_csv('tally_'+str(id)+'_sorted.csv') + tallydata[id] = sorted_tally + totalbin[id] = None + return tallydata, totalbin + + def process_tally(self): + tallies = self.output.tallies_to_dataframes() + tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin From 9ba13377302ae53e952ed4184c828439ae58d3d4 Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 11:45:09 +0000 Subject: [PATCH 04/53] openmc tally data to self.raw_data --- jade/output.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jade/output.py b/jade/output.py index 9df173f7..fefdc815 100644 --- a/jade/output.py +++ b/jade/output.py @@ -657,6 +657,8 @@ def _generate_single_excel_output(self): results_path = os.path.join(self.test_path, self.code) _, outfile = self._get_output_files(results_path, "openmc") openmc_output = OpenMCOutput(outfile) + self.raw_data = openmc_output.tallydata + if self.mcnp or self.d1s: outpath = os.path.join( From e858907f7d7e45bc224e40f0c1c8e4fc4e102bc3 Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 12:12:15 +0000 Subject: [PATCH 05/53] Generalised tally indexing for mcnp and openmc --- jade/output.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/jade/output.py b/jade/output.py index fefdc815..5d41d21b 100644 --- a/jade/output.py +++ b/jade/output.py @@ -681,7 +681,8 @@ def _generate_single_excel_output(self): meshtalfile = os.path.join(results_path, file) # Parse output mcnp_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) - mctal = mcnp_output.mctal + tally_numbers = [tally.tallyNumber for tally in mcnp_output.mctal.tallies] + tally_comments = [tally.tallyComment[0] for tally in mcnp_output.mctal.tallies] # Adjourn raw Data self.raw_data = mcnp_output.tallydata @@ -689,9 +690,9 @@ def _generate_single_excel_output(self): for label in ["Value", "Error"]: # keys = {} - for tally in mctal.tallies: - num = tally.tallyNumber - key = tally.tallyComment[0] + for num, key in zip(tally_numbers, tally_comments): + #num = tally.tallyNumber + #key = tally.tallyComment[0] # keys[num] = key # Memorize tally descriptions tdata = mcnp_output.tallydata[num].copy() # Full tally data try: @@ -830,9 +831,9 @@ def _generate_single_excel_output(self): dic_checks = mcnp_output.out.stat_checks rows = [] - for tally in mctal.tallies: - num = tally.tallyNumber - key = tally.tallyComment[0] + for num, key in zip(tally_numbers, tally_comments): + #num = tally.tallyNumber + #key = tally.tallyComment[0] key_dic = key + " [" + str(num) + "]" try: stat = dic_checks[key_dic] From 8b05c5c1a969e834df8af75eb7e800a3155ec664 Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 12:27:07 +0000 Subject: [PATCH 06/53] Extract omc tally comments and numbers --- jade/openmc.py | 5 + jade/output.py | 325 +++++++++++++++++++++++++------------------------ 2 files changed, 169 insertions(+), 161 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 09396c8c..7e27b662 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -256,6 +256,11 @@ def initialise(self, spfile_path: str) -> None: try: # Retrieve the version from the statepoint file (convert from tuple of integers to string) self.statepoint = openmc.StatePoint(spfile_path) + self.tally_numbers = [] + self.tally_comments = [] + for _, tally in self.statepoint.tallies.items(): + self.tally_numbers.append(tally.id) + self.tally_comments.append(tally.name) except (FileNotFoundError, KeyError): logging.warning( "OpenMC version not found in the statepoint file for %s", diff --git a/jade/output.py b/jade/output.py index 5d41d21b..0c907114 100644 --- a/jade/output.py +++ b/jade/output.py @@ -653,18 +653,20 @@ def _generate_single_excel_output(self): # name = "Generic_single.xlsx" # template = os.path.join(os.getcwd(), "templates", name) + outputs = {} + if self.openmc: results_path = os.path.join(self.test_path, self.code) _, outfile = self._get_output_files(results_path, "openmc") - openmc_output = OpenMCOutput(outfile) - self.raw_data = openmc_output.tallydata + sim_output = OpenMCOutput(outfile) + tally_numbers = sim_output.output.tally_numbers + tally_comments = sim_output.output.tally_comments if self.mcnp or self.d1s: outpath = os.path.join( self.excel_path, self.testname + "_" + self.lib + ".xlsx" ) - outputs = {} # ex = ExcelOutputSheet(template, outpath) # Get results # results = [] @@ -680,173 +682,174 @@ def _generate_single_excel_output(self): elif file[-4:] == "msht": meshtalfile = os.path.join(results_path, file) # Parse output - mcnp_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) - tally_numbers = [tally.tallyNumber for tally in mcnp_output.mctal.tallies] - tally_comments = [tally.tallyComment[0] for tally in mcnp_output.mctal.tallies] - # Adjourn raw Data - self.raw_data = mcnp_output.tallydata - - # res, err = output.get_single_excel_data() - - for label in ["Value", "Error"]: - # keys = {} - for num, key in zip(tally_numbers, tally_comments): - #num = tally.tallyNumber - #key = tally.tallyComment[0] - # keys[num] = key # Memorize tally descriptions - tdata = mcnp_output.tallydata[num].copy() # Full tally data - try: - tally_settings = ex_cnf.loc[num] - except KeyError: - print( - " Warning!: tally n." - + str(num) - + " is not in configuration" - ) - continue - - # Re-Elaborate tdata Dataframe - x_name = tally_settings["x"] - x_tag = tally_settings["x name"] - y_name = tally_settings["y"] - y_tag = tally_settings["y name"] - ylim = tally_settings["cut Y"] - - if label == "Value": - outputs[num] = {"title": key, "x_label": x_tag} - - # select the index format - if x_name == "Energy": - idx_format = "0.00E+00" - # TODO all possible cases should be addressed - else: - idx_format = "0" - - if y_name != "tally": - tdata.set_index(x_name, inplace=True) - x_set = list(set(tdata.index)) - y_set = list(set(tdata[y_name].values)) - rows = [] - for xval in x_set: - try: - row = tdata.loc[xval, label].values - prev_len = len(row) - except AttributeError: - # There is only one total value, fill the rest with - # nan - row = [] - for i in range(prev_len - 1): - row.append(np.nan) - row.append(tdata.loc[xval, label]) - - rows.append(row) - - try: - main_value_df = pd.DataFrame( - rows, columns=y_set, index=x_set - ) - main_value_df.index.name = x_name - except ValueError: - print( - CRED - + """ - A ValueError was triggered, a probable cause may be that more than 2 binnings - are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) - + CEND - ) - # Safely exit from excel and from application - # ex.save() - sys.exit() - - # reorder index (quick reset of the index) - main_value_df.reset_index(inplace=True) - main_value_df = self._reorder_df(main_value_df, x_name) - main_value_df.set_index(x_name, inplace=True) - # memorize for atlas - outputs[num][label] = main_value_df - # insert the df in pieces - # ex.insert_cutted_df( - # "B", - # main_value_df, - # label + "s", - # ylim, - # header=(key, "Tally n." + str(num)), - # index_name=x_tag, - # cols_name=y_tag, - # index_num_format=idx_format, - # ) - else: - # reorder df - try: - tdata = self._reorder_df(tdata, x_name) - except KeyError: - print( - CRED - + """ - {} is not available in tally {}. Please check the configuration file. - The application will now exit """.format( - x_name, str(num) - ) - + CEND - ) - # Safely exit from excel and from application - # ex.save() - sys.exit() - - if label == "Value": - del tdata["Error"] - elif label == "Error": - del tdata["Value"] - # memorize for atlas and set index - tdata.set_index(x_name, inplace=True) - outputs[num][label] = tdata - - # Insert DF - # ex.insert_df( - # "B", - # tdata, - # label + "s", - # print_index=True, - # header=(key, "Tally n." + str(num)), - # ) - # memorize data for atlas - self.outputs[self.code] = outputs - # print(outputs) - # Dump them for comparisons - raw_outpath = os.path.join(self.raw_path, self.lib + ".pickle") - with open(raw_outpath, "wb") as outfile: - pickle.dump(outputs, outfile) - - # Compile general infos in the sheet - # ws = ex.current_ws - # title = self.testname + " RESULTS RECAP: " + label + "s" - # ws.range("A3").value = title - # ws.range("C1").value = self.lib + sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) + tally_numbers = [tally.tallyNumber for tally in sim_output.mctal.tallies] + tally_comments = [tally.tallyComment[0] for tally in sim_output.mctal.tallies] + + # Adjourn raw Data + self.raw_data = sim_output.tallydata - # --- Compile statistical checks sheet --- - # ws = ex.wb.sheets["Statistical Checks"] + # res, err = output.get_single_excel_data() - dic_checks = mcnp_output.out.stat_checks - rows = [] + for label in ["Value", "Error"]: + # keys = {} for num, key in zip(tally_numbers, tally_comments): #num = tally.tallyNumber #key = tally.tallyComment[0] - key_dic = key + " [" + str(num) + "]" + # keys[num] = key # Memorize tally descriptions + tdata = sim_output.tallydata[num].copy() # Full tally data try: - stat = dic_checks[key_dic] + tally_settings = ex_cnf.loc[num] except KeyError: - stat = None - rows.append([num, key, stat]) + print( + " Warning!: tally n." + + str(num) + + " is not in configuration" + ) + continue + + # Re-Elaborate tdata Dataframe + x_name = tally_settings["x"] + x_tag = tally_settings["x name"] + y_name = tally_settings["y"] + y_tag = tally_settings["y name"] + ylim = tally_settings["cut Y"] + + if label == "Value": + outputs[num] = {"title": key, "x_label": x_tag} + + # select the index format + if x_name == "Energy": + idx_format = "0.00E+00" + # TODO all possible cases should be addressed + else: + idx_format = "0" + + if y_name != "tally": + tdata.set_index(x_name, inplace=True) + x_set = list(set(tdata.index)) + y_set = list(set(tdata[y_name].values)) + rows = [] + for xval in x_set: + try: + row = tdata.loc[xval, label].values + prev_len = len(row) + except AttributeError: + # There is only one total value, fill the rest with + # nan + row = [] + for i in range(prev_len - 1): + row.append(np.nan) + row.append(tdata.loc[xval, label]) + + rows.append(row) - stats = pd.DataFrame(rows) - stats.columns = ["Tally Number", "Tally Description", "Result"] - # ws.range("A9").options(index=False, header=False).value = df + try: + main_value_df = pd.DataFrame( + rows, columns=y_set, index=x_set + ) + main_value_df.index.name = x_name + except ValueError: + print( + CRED + + """ + A ValueError was triggered, a probable cause may be that more than 2 binnings + are defined in tally {}. This is a fatal exception, application will now + close""".format( + str(num) + ) + + CEND + ) + # Safely exit from excel and from application + # ex.save() + sys.exit() + + # reorder index (quick reset of the index) + main_value_df.reset_index(inplace=True) + main_value_df = self._reorder_df(main_value_df, x_name) + main_value_df.set_index(x_name, inplace=True) + # memorize for atlas + outputs[num][label] = main_value_df + # insert the df in pieces + # ex.insert_cutted_df( + # "B", + # main_value_df, + # label + "s", + # ylim, + # header=(key, "Tally n." + str(num)), + # index_name=x_tag, + # cols_name=y_tag, + # index_num_format=idx_format, + # ) + else: + # reorder df + try: + tdata = self._reorder_df(tdata, x_name) + except KeyError: + print( + CRED + + """ +{} is not available in tally {}. Please check the configuration file. +The application will now exit """.format( + x_name, str(num) + ) + + CEND + ) + # Safely exit from excel and from application + # ex.save() + sys.exit() - # ex.save() - exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) + if label == "Value": + del tdata["Error"] + elif label == "Error": + del tdata["Value"] + # memorize for atlas and set index + tdata.set_index(x_name, inplace=True) + outputs[num][label] = tdata + + # Insert DF + # ex.insert_df( + # "B", + # tdata, + # label + "s", + # print_index=True, + # header=(key, "Tally n." + str(num)), + # ) + # memorize data for atlas + self.outputs[self.code] = outputs + # print(outputs) + # Dump them for comparisons + raw_outpath = os.path.join(self.raw_path, self.lib + ".pickle") + with open(raw_outpath, "wb") as outfile: + pickle.dump(outputs, outfile) + + # Compile general infos in the sheet + # ws = ex.current_ws + # title = self.testname + " RESULTS RECAP: " + label + "s" + # ws.range("A3").value = title + # ws.range("C1").value = self.lib + + # --- Compile statistical checks sheet --- + # ws = ex.wb.sheets["Statistical Checks"] + + dic_checks = sim_output.stat_checks + rows = [] + for num, key in zip(tally_numbers, tally_comments): + #num = tally.tallyNumber + #key = tally.tallyComment[0] + key_dic = key + " [" + str(num) + "]" + try: + stat = dic_checks[key_dic] + except KeyError: + stat = None + rows.append([num, key, stat]) + + stats = pd.DataFrame(rows) + stats.columns = ["Tally Number", "Tally Description", "Result"] + # ws.range("A9").options(index=False, header=False).value = df + + # ex.save() + exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) def _print_raw(self): for key, data in self.raw_data.items(): From e88ed4706575f8ed7482f399fa846432b837057c Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 14:00:43 +0000 Subject: [PATCH 07/53] omc generated excel and atlas simple tokamak --- jade/output.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/jade/output.py b/jade/output.py index 0c907114..b4e2e245 100644 --- a/jade/output.py +++ b/jade/output.py @@ -654,6 +654,9 @@ def _generate_single_excel_output(self): # template = os.path.join(os.getcwd(), "templates", name) outputs = {} + outpath = os.path.join( + self.excel_path, self.testname + "_" + self.lib + ".xlsx" + ) if self.openmc: results_path = os.path.join(self.test_path, self.code) @@ -664,9 +667,6 @@ def _generate_single_excel_output(self): if self.mcnp or self.d1s: - outpath = os.path.join( - self.excel_path, self.testname + "_" + self.lib + ".xlsx" - ) # ex = ExcelOutputSheet(template, outpath) # Get results # results = [] @@ -840,7 +840,7 @@ def _generate_single_excel_output(self): key_dic = key + " [" + str(num) + "]" try: stat = dic_checks[key_dic] - except KeyError: + except (KeyError, TypeError): stat = None rows.append([num, key, stat]) @@ -1205,14 +1205,15 @@ def __init__(self, output_path): def _create_dataframes(self, tallies): tallydata = {} totalbin = {} - filter_lookup = {'cell': "Cells-Segments", - 'surface' : "Cells-Segments", + filter_lookup = {'cell': "Cells", + 'surface' : "Segments", 'energy high [eV]' : 'Energy', 'time' : 'Time', 'mean' : 'Value', 'std. dev.' : 'Error'} - columns = ["Cells-Segments", + columns = ["Cells", "User", + "Segments", "Cosine", "Energy", "Time", From abd43a2fec3bf9b1d8298fdd98a6c3769d0a7d59 Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 14:02:53 +0000 Subject: [PATCH 08/53] Black format new changes --- jade/openmc.py | 6 ++-- jade/output.py | 95 +++++++++++++++++++++++++------------------------- 2 files changed, 51 insertions(+), 50 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 7e27b662..3995bca8 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -293,9 +293,9 @@ def _get_tally_data(self, tally: openmc.Tally): pandas dataframe containing tally data """ df = tally.get_pandas_dataframe() - #df.to_csv('tally_'+str(tally.id)+'.csv') + # df.to_csv('tally_'+str(tally.id)+'.csv') return df - + def tallies_to_dataframes(self): """Call to extract tally data from statepoint file @@ -306,7 +306,7 @@ def tallies_to_dataframes(self): """ tallies = {} for _, tally in self.statepoint.tallies.items(): - tallies[tally.id] = self._get_tally_data(tally) + tallies[tally.id] = self._get_tally_data(tally) return tallies diff --git a/jade/output.py b/jade/output.py index b4e2e245..023a65f0 100644 --- a/jade/output.py +++ b/jade/output.py @@ -652,12 +652,12 @@ def _generate_single_excel_output(self): # Open the excel file # name = "Generic_single.xlsx" # template = os.path.join(os.getcwd(), "templates", name) - + outputs = {} outpath = os.path.join( self.excel_path, self.testname + "_" + self.lib + ".xlsx" ) - + if self.openmc: results_path = os.path.join(self.test_path, self.code) _, outfile = self._get_output_files(results_path, "openmc") @@ -665,7 +665,6 @@ def _generate_single_excel_output(self): tally_numbers = sim_output.output.tally_numbers tally_comments = sim_output.output.tally_comments - if self.mcnp or self.d1s: # ex = ExcelOutputSheet(template, outpath) # Get results @@ -684,8 +683,10 @@ def _generate_single_excel_output(self): # Parse output sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) tally_numbers = [tally.tallyNumber for tally in sim_output.mctal.tallies] - tally_comments = [tally.tallyComment[0] for tally in sim_output.mctal.tallies] - + tally_comments = [ + tally.tallyComment[0] for tally in sim_output.mctal.tallies + ] + # Adjourn raw Data self.raw_data = sim_output.tallydata @@ -694,18 +695,14 @@ def _generate_single_excel_output(self): for label in ["Value", "Error"]: # keys = {} for num, key in zip(tally_numbers, tally_comments): - #num = tally.tallyNumber - #key = tally.tallyComment[0] + # num = tally.tallyNumber + # key = tally.tallyComment[0] # keys[num] = key # Memorize tally descriptions tdata = sim_output.tallydata[num].copy() # Full tally data try: tally_settings = ex_cnf.loc[num] except KeyError: - print( - " Warning!: tally n." - + str(num) - + " is not in configuration" - ) + print(" Warning!: tally n." + str(num) + " is not in configuration") continue # Re-Elaborate tdata Dataframe @@ -745,9 +742,7 @@ def _generate_single_excel_output(self): rows.append(row) try: - main_value_df = pd.DataFrame( - rows, columns=y_set, index=x_set - ) + main_value_df = pd.DataFrame(rows, columns=y_set, index=x_set) main_value_df.index.name = x_name except ValueError: print( @@ -835,8 +830,8 @@ def _generate_single_excel_output(self): dic_checks = sim_output.stat_checks rows = [] for num, key in zip(tally_numbers, tally_comments): - #num = tally.tallyNumber - #key = tally.tallyComment[0] + # num = tally.tallyNumber + # key = tally.tallyComment[0] key_dic = key + " [" + str(num) + "]" try: stat = dic_checks[key_dic] @@ -1205,37 +1200,43 @@ def __init__(self, output_path): def _create_dataframes(self, tallies): tallydata = {} totalbin = {} - filter_lookup = {'cell': "Cells", - 'surface' : "Segments", - 'energy high [eV]' : 'Energy', - 'time' : 'Time', - 'mean' : 'Value', - 'std. dev.' : 'Error'} - columns = ["Cells", - "User", - "Segments", - "Cosine", - "Energy", - "Time", - "Cor C", - "Cor B", - "Cor A", - "Value", - "Error"] + filter_lookup = { + "cell": "Cells", + "surface": "Segments", + "energy high [eV]": "Energy", + "time": "Time", + "mean": "Value", + "std. dev.": "Error", + } + columns = [ + "Cells", + "User", + "Segments", + "Cosine", + "Energy", + "Time", + "Cor C", + "Cor B", + "Cor A", + "Value", + "Error", + ] for id, tally in tallies.items(): filters = [] new_columns = {} - if 'cell' in tally.columns: - filters.append('cell') - if 'surface' in tally.columns: - filters.append('surface') - if 'energy high [eV]' in tally.columns: - filters.append('energy high [eV]') - if 'time' in tally.columns: - filters.append('time') - new_columns = dict((k, filter_lookup[k]) for k in filters if k in filter_lookup) - new_columns['mean'] = filter_lookup['mean'] - new_columns['std. dev.'] = filter_lookup['std. dev.'] + if "cell" in tally.columns: + filters.append("cell") + if "surface" in tally.columns: + filters.append("surface") + if "energy high [eV]" in tally.columns: + filters.append("energy high [eV]") + if "time" in tally.columns: + filters.append("time") + new_columns = dict( + (k, filter_lookup[k]) for k in filters if k in filter_lookup + ) + new_columns["mean"] = filter_lookup["mean"] + new_columns["std. dev."] = filter_lookup["std. dev."] sorted_tally = tally.sort_values(filters) sorted_tally = sorted_tally.reset_index(drop=True) sorted_tally = sorted_tally.rename(columns=new_columns) @@ -1243,12 +1244,12 @@ def _create_dataframes(self, tallies): if column not in sorted_tally.columns: sorted_tally[column] = np.nan sorted_tally = sorted_tally[columns] - #sorted_tally.to_csv('tally_'+str(id)+'_sorted.csv') + # sorted_tally.to_csv('tally_'+str(id)+'_sorted.csv') tallydata[id] = sorted_tally totalbin[id] = None return tallydata, totalbin - def process_tally(self): + def process_tally(self): tallies = self.output.tallies_to_dataframes() tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin From 2a9a23052e54c593d466a3e6c739698898ef4948 Mon Sep 17 00:00:00 2001 From: sbradnam Date: Fri, 1 Nov 2024 14:32:13 +0000 Subject: [PATCH 09/53] Update omc output test --- .../32c/ITER_1D/openmc/tallies.out | 0 tests/output_test.py | 37 +++++++++++++------ 2 files changed, 25 insertions(+), 12 deletions(-) create mode 100644 tests/TestFiles/output/Simulations/32c/ITER_1D/openmc/tallies.out diff --git a/tests/TestFiles/output/Simulations/32c/ITER_1D/openmc/tallies.out b/tests/TestFiles/output/Simulations/32c/ITER_1D/openmc/tallies.out new file mode 100644 index 00000000..e69de29b diff --git a/tests/output_test.py b/tests/output_test.py index 96ad8933..7bcd05b2 100644 --- a/tests/output_test.py +++ b/tests/output_test.py @@ -136,20 +136,33 @@ def test_single_excel_mcnp(self, tmpdir): @pytest.mark.skipif(not OMC_AVAIL, reason="OpenMC is not available") def test_single_excel_openmc(self, tmpdir): - spfile = os.path.join( - cp, - "TestFiles", - "output", - "Simulations", - "32c", - "ITER_1D", - "openmc", - "statepoint.50.h5", + conf = Configuration( + os.path.join(cp, "TestFiles", "output", "config_test.xlsx") ) - statepoint = omc.OpenMCSimOutput(spfile) - version = statepoint.read_openmc_version() + session = MockSession(conf, tmpdir) + out = output.BenchmarkOutput("32c", "openmc", "ITER_1D", session) + out._generate_single_excel_output() + out._print_raw() - assert version == "0.14.0" + assert os.path.exists( + os.path.join( + session.path_single, + "32c", + "ITER_1D", + "openmc", + "Excel", + "ITER_1D_32c.xlsx", + ) + ) + metadata_path = os.path.join( + session.path_single, "32c", "ITER_1D", "openmc", "Raw_Data", "metadata.json" + ) + assert os.path.exists(metadata_path) + with open(metadata_path, "r", encoding="utf-8") as f: + metadata = json.load(f) + assert metadata["jade_run_version"] == "0.0.1" + assert metadata["jade_version"] == __version__ + assert metadata["code_version"] == "0.14.0" def test_iter_cyl(self, tmpdir): conf = Configuration( From ecb6232501d9c75e3d904ab787e44094f703e409 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 14:23:13 +0000 Subject: [PATCH 10/53] Initialised abstract _get_output_files --- jade/output.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/jade/output.py b/jade/output.py index 023a65f0..eb2390cf 100644 --- a/jade/output.py +++ b/jade/output.py @@ -67,7 +67,6 @@ def single_postprocess(self): """ To be executed when a single pp is requested """ - pass @abc.abstractmethod def compare(self): @@ -75,6 +74,13 @@ def compare(self): To be executed when a comparison is requested """ + @abc.abstractmethod + def _get_output_files(results_path): + """ + To be executed when a comparison is requested + """ + + ''' @staticmethod def _get_output_files(results_path, code): """ @@ -130,7 +136,8 @@ def _get_output_files(results_path, code): file2 = os.path.join(results_path, file2) if file2 else None return file1, file2 - + ''' + class BenchmarkOutput(AbstractOutput): def __init__(self, lib: str, code: str, testname: str, session: Session): From 7e54213f07cb3ee0059ad4f959fda0fefc45fcf3 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 14:41:09 +0000 Subject: [PATCH 11/53] Moved some functions to AbstractOutput --- jade/output.py | 453 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 451 insertions(+), 2 deletions(-) diff --git a/jade/output.py b/jade/output.py index eb2390cf..46dfaef4 100644 --- a/jade/output.py +++ b/jade/output.py @@ -62,6 +62,130 @@ class AbstractOutput(abc.ABC): + def __init__(self, lib: str, code: str, testname: str, session: Session): + """ + General class for a Benchmark output + + Parameters + ---------- + lib : str + library to post-process + code : str + code being post processed + testname : str + name of the benchmark being postprocessed + session : Session + Jade Session + + Returns + ------- + None. + + """ + self.raw_data = {} # Raw data + self.outputs = {} # outputs linked to the benchmark + self.testname = testname # test name + self.code_path = os.getcwd() # path to code + self.state = session.state + self.session = session + self.path_templates = session.path_templates + + # Read specific configuration + cnf_path = os.path.join(session.path_cnf, self.testname + ".xlsx") + if os.path.isfile(cnf_path): + self.cnf_path = cnf_path + # It can be assumed that there is a folder containing multiple files + else: + self.cnf_path = os.path.join(session.path_cnf, self.testname) + + # Updated to handle multiple codes + # initialize them so that intellisense knows they are available + self.mcnp = False + self.openmc = False + self.serpent = False + self.d1s = False + for available_code in CODES.values(): + if code == available_code: + setattr(self, available_code, True) + self.raw_data[code] = {} + self.outputs[code] = {} + else: + setattr(self, available_code, False) + + self.code = code # this can be handy in a lot of places to avoid if else + + # COMPARISON + if isinstance(lib, list) and len(lib) > 1: + self.single = False # Indicator for single or comparison + self.lib = lib + couples = [] + tp = os.path.join(session.path_run, lib[0], self.testname) + self.test_path = {lib[0]: tp} + refname = session.conf.get_lib_name(lib[0]) + name = refname + dirname = lib[0] + for library in lib[1:]: + libname = session.conf.get_lib_name(library) + # name_couple = lib[0]+'_Vs_'+library + name_couple = lib[0] + "_Vs_" + library + name = name + "_Vs_" + libname + dirname = dirname + "_Vs_" + library + couples.append((lib[0], library, name_couple)) + tp = os.path.join(session.path_run, library, self.testname) + self.test_path[library] = tp + + self.name = name + # Generate library output path + out = os.path.join(session.path_comparison, dirname) + if not os.path.exists(out): + os.mkdir(out) + + out = os.path.join(out, self.testname, code) + if os.path.exists(out): + shutil.rmtree(out) + os.makedirs(out) + excel_path = os.path.join(out, "Excel") + atlas_path = os.path.join(out, "Atlas") + raw_path = os.path.join(out, "Raw_Data") + os.makedirs(excel_path) + os.makedirs(atlas_path) + os.makedirs(raw_path) + self.excel_path = excel_path + self.raw_path = raw_path + self.atlas_path = atlas_path + self.couples = couples # Couples of libraries to post process + # SINGLE-LIBRARY + else: + self.single = True # Indicator for single or comparison + if isinstance(lib, list) and len(lib) == 1: + self.lib = lib[0] # In case of 1-item list + else: + self.lib = lib + self.test_path = os.path.join(session.path_run, lib, self.testname) + + # Generate library output path + out = os.path.join(session.path_single, lib) + if not os.path.exists(out): + os.mkdir(out) + + out = os.path.join(out, self.testname, code) + if os.path.exists(out): + shutil.rmtree(out) + os.makedirs(out) + excel_path = os.path.join(out, "Excel") + atlas_path = os.path.join(out, "Atlas") + raw_path = os.path.join(out, "Raw_Data") + os.makedirs(excel_path) + os.makedirs(atlas_path) + os.makedirs(raw_path) + self.excel_path = excel_path + self.raw_path = raw_path + self.atlas_path = atlas_path + + # Read the metadata + results_path = os.path.join(self.test_path, code) + self.metadata = self._read_metadata_run(results_path) + @abc.abstractmethod def single_postprocess(self): """ @@ -79,6 +203,12 @@ def _get_output_files(results_path): """ To be executed when a comparison is requested """ + + @abc.abstractmethod + def _read_code_version(self, pathtofile): + """ + To be executed when a comparison is requested + """ ''' @staticmethod @@ -137,9 +267,314 @@ def _get_output_files(results_path, code): return file1, file2 ''' - + + def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: + """Retrieve the metadata from the run + + Parameters + ---------- + pathtofile : os.PathLike + path to metadata file + + Returns + ------- + dict + metadata dictionary + """ + try: + with open( + os.path.join(pathtofile, "metadata.json"), + "r", + encoding="utf-8", + ) as file: + metadata = json.load(file) + except FileNotFoundError: + logging.warning("No metadata file found at %s", pathtofile) + metadata = {} + + metadata["jade_version"] = __version__ + metadata["code_version"] = self._read_code_version(pathtofile) + + return metadata + + def single_postprocess(self): + """ + Execute the full post-processing of a single library (i.e. excel, + raw data and atlas) + + Returns + ------- + None. + + """ + print(" Generating Excel Recap...") + self._generate_single_excel_output() + self._print_raw() + + print(" Creating Atlas...") + outpath = os.path.join(self.atlas_path, "tmp") + os.mkdir(outpath) + + # Get atlas configuration + atl_cnf = pd.read_excel(self.cnf_path, sheet_name="Atlas") + atl_cnf.set_index("Tally", inplace=True) + + # Printing Atlas + template = template = os.path.join(self.path_templates, "AtlasTemplate.docx") + atlas = at.Atlas(template, self.testname + "_" + self.lib) + + # Iterate over each type of plot (first one is quantity + # and second one the measure unit) + for plot_type in list(atl_cnf.columns)[2:]: + print(" Plotting : " + plot_type) + atlas.doc.add_heading("Plot type: " + plot_type, level=1) + # Keep only tallies to plot + atl_cnf_plot = atl_cnf[atl_cnf[plot_type]] + for tally_num in tqdm(atl_cnf_plot.index, desc="Tallies"): + try: + output = self.outputs[self.code][tally_num] + except KeyError: + fatal_exception( + "tally n. " + + str(tally_num) + + " is in config but not in the MCNP output" + ) + vals_df = output["Value"] + err_df = output["Error"] + quantity = str(atl_cnf_plot["Quantity"].loc[tally_num]) + unit = str(atl_cnf_plot["Unit"].loc[tally_num]) + xlabel = output["x_label"] + title = output["title"] + + atlas.doc.add_heading("Tally: " + title, level=2) + + columns = vals_df.columns + x = np.array(vals_df.index) + + for column in tqdm(columns): + if len(columns) > 1: + try: + txt = str(int(column)) + except ValueError: + # it is not convertible to int + txt = str(column) + + atlas.doc.add_heading(txt, level=3) + newtitle = title + " (" + txt + ")" + else: + newtitle = title + + # If total is present it has to be deleted + try: + vals_df.drop(["total"], inplace=True) + err_df.drop(["total"], inplace=True) + x = x[:-1] + except KeyError: + pass + + try: + values = vals_df[column].values + error = err_df[column].values + except KeyError: + # this means that the column is only one and we have + # two distinct DFs for values and errors + # depending on pandas version, these may be series or + # directly arrays + values = vals_df["Value"] + error = err_df["Error"] + if isinstance(values, pd.Series) or isinstance( + values, pd.DataFrame + ): + values = values.values + if isinstance(error, pd.Series) or isinstance( + error, pd.DataFrame + ): + error = error.values + + lib_name = self.session.conf.get_lib_name(self.lib) + lib = {"x": x, "y": values, "err": error, "ylabel": lib_name} + data = [lib] + + outname = "tmp" + plot = plotter.Plotter( + data, + newtitle, + outpath, + outname, + quantity, + unit, + xlabel, + self.testname, + ) + img_path = plot.plot(plot_type) + + atlas.insert_img(img_path) + atlas.save(self.atlas_path) + # Remove tmp images + shutil.rmtree(outpath) + + def compare(self): + """ + Generates the full comparison post-processing (excel and atlas) + + Returns + ------- + None. + + """ + print(" Generating Excel Recap...") + self._generate_comparison_excel_output() + + print(" Creating Atlas...") + outpath = os.path.join(self.atlas_path, "tmp") + os.mkdir(outpath) + + # Get atlas configuration + atl_cnf = pd.read_excel(self.cnf_path, sheet_name="Atlas") + atl_cnf.set_index("Tally", inplace=True) + + # Printing Atlas + template = os.path.join(self.path_templates, "AtlasTemplate.docx") + + atlas = at.Atlas(template, self.testname + "_" + self.name) + + # Recover data + outputs_dic = {} + for lib in self.lib: + # Recover lib output + out_path = os.path.join( + self.session.path_single, + lib, + self.testname, + self.code, + "Raw_Data", + lib + ".pickle", + ) + with open(out_path, "rb") as handle: + outputs = pickle.load(handle) + outputs_dic[lib] = outputs + + # Iterate over each type of plot (first one is quantity + # and second one the measure unit) + for plot_type in list(atl_cnf.columns)[2:]: + print(" Plotting : " + plot_type) + atlas.doc.add_heading("Plot type: " + plot_type, level=1) + # Keep only tallies to plot + atl_cnf_plot = atl_cnf[atl_cnf[plot_type]] + for tally_num in tqdm(atl_cnf_plot.index, desc="Tallies"): + # The last 'outputs' can be easily used for common data + try: + output = outputs_dic[lib][tally_num] + except KeyError: + fatal_exception( + "tally n. " + + str(tally_num) + + " is in config but not in the MCNP output" + ) + vals_df = output["Value"] + err_df = output["Error"] + quantity = str(atl_cnf_plot["Quantity"].loc[tally_num]) + unit = str(atl_cnf_plot["Unit"].loc[tally_num]) + xlabel = output["x_label"] + title = output["title"] + + atlas.doc.add_heading("Tally: " + title, level=2) + + columns = vals_df.columns + + for column in tqdm(columns): + if len(columns) > 1: + try: + txt = str(int(column)) + except ValueError: + # it is not convertible to int + txt = str(column) + + atlas.doc.add_heading(txt, level=3) + newtitle = title + " (" + txt + ")" + + else: + newtitle = title + data = [] + for lib in self.lib: + output = outputs_dic[lib][tally_num] + + # override values and errors + try: + vals_df = output["Value"] + err_df = output["Error"] + # If total is present it has to be deleted + try: + vals_df.drop(["total"], inplace=True) + err_df.drop(["total"], inplace=True) + except KeyError: + pass + values = vals_df[column].values + error = err_df[column].values + + except KeyError: + # this means that the column is only one and we + # havetwo distinct DFs for values and errors + values = vals_df["Value"].values + error = err_df["Error"].values + + x = np.array(vals_df.index) + + lib_name = self.session.conf.get_lib_name(lib) + lib_data = { + "x": x, + "y": values, + "err": error, + "ylabel": lib_name, + } + data.append(lib_data) + + outname = "tmp" + plot = plotter.Plotter( + data, + newtitle, + outpath, + outname, + quantity, + unit, + xlabel, + self.testname, + ) + img_path = plot.plot(plot_type) + + atlas.insert_img(img_path) + atlas.save(self.atlas_path) + + # Remove tmp images + shutil.rmtree(outpath) + + @staticmethod + def _reorder_df(df, x_set): + # First of all try order by number + df["index"] = pd.to_numeric(df[x_set], errors="coerce") + + # If they are all nan try with a normal sort + if df["index"].isnull().values.all(): + df.sort_values(x_set, inplace=True) + + # Otherwise keep on with the number sorting + else: + df.sort_values("index", inplace=True) + + del df["index"] + + # Try to reorder the columns + try: + df = df.reindex(sorted(df.columns), axis=1) + except TypeError: + # They are a mix of strings and ints, let's ignore it for + # the time being + pass + + return df class BenchmarkOutput(AbstractOutput): + ''' def __init__(self, lib: str, code: str, testname: str, session: Session): """ General class for a Benchmark output @@ -263,7 +698,9 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # Read the metadata results_path = os.path.join(self.test_path, code) self.metadata = self._read_metadata_run(results_path) + ''' + ''' def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: """Retrieve the metadata from the run @@ -292,7 +729,9 @@ def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: metadata["code_version"] = self._read_code_version(pathtofile) return metadata + ''' + ''' def _read_code_version(self, pathtofile: os.PathLike) -> str | None: """Read the code version from the output files or in other ways depending on the used code. @@ -322,7 +761,9 @@ def _read_code_version(self, pathtofile: os.PathLike) -> str | None: pass return None + ''' + #TODO convert to _read_code_version in MCNPoutput def _read_mcnp_code_version(self, ofile: os.PathLike) -> str | None: """Read MCNP code version from the output file @@ -351,6 +792,7 @@ def _read_mcnp_code_version(self, ofile: os.PathLike) -> str | None: ) return None + #TODO convert to _read_code_version in OpenMCOutput def _read_openmc_code_version(self, spfile: os.PathLike) -> str | None: """Read OpenMC code version from the statepoint file @@ -368,9 +810,12 @@ def _read_openmc_code_version(self, spfile: os.PathLike) -> str | None: version = statepoint.version return version + ''' def _read_serpent_code_version(self, ofile: os.PathLike) -> str | None: pass + ''' + ''' def single_postprocess(self): """ Execute the full post-processing of a single library (i.e. excel, @@ -486,7 +931,9 @@ def single_postprocess(self): atlas.save(self.atlas_path) # Remove tmp images shutil.rmtree(outpath) + ''' + ''' def compare(self): """ Generates the full comparison post-processing (excel and atlas) @@ -621,7 +1068,9 @@ def compare(self): # Remove tmp images shutil.rmtree(outpath) + ''' + ''' @staticmethod def _reorder_df(df, x_set): # First of all try order by number @@ -646,6 +1095,7 @@ def _reorder_df(df, x_set): pass return df + ''' def _generate_single_excel_output(self): # Get excel configuration @@ -856,7 +1306,6 @@ def _generate_single_excel_output(self): def _print_raw(self): for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, str(key) + ".csv") - print(file) data.to_csv(file, header=True, index=False) metadata_file = os.path.join(self.raw_path, "metadata.json") From 3c4111cb76f606069b5ae25b8ea5712a235d7ff0 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 15:05:38 +0000 Subject: [PATCH 12/53] Abstract functions added to child classes --- jade/output.py | 87 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 25 deletions(-) diff --git a/jade/output.py b/jade/output.py index 46dfaef4..4a76e37d 100644 --- a/jade/output.py +++ b/jade/output.py @@ -572,6 +572,15 @@ def _reorder_df(df, x_set): pass return df + + def _print_raw(self): + for key, data in self.raw_data.items(): + file = os.path.join(self.raw_path, str(key) + ".csv") + data.to_csv(file, header=True, index=False) + + metadata_file = os.path.join(self.raw_path, "metadata.json") + with open(metadata_file, "w", encoding="utf-8") as outfile: + json.dump(self.metadata, outfile, indent=4) class BenchmarkOutput(AbstractOutput): ''' @@ -792,24 +801,6 @@ def _read_mcnp_code_version(self, ofile: os.PathLike) -> str | None: ) return None - #TODO convert to _read_code_version in OpenMCOutput - def _read_openmc_code_version(self, spfile: os.PathLike) -> str | None: - """Read OpenMC code version from the statepoint file - - Parameters - ---------- - spfile : os.PathLike - statepoint file path - - Returns - ------- - str | None - version of the OpenMC code used to run the benchmark - """ - statepoint = omc.OpenMCSimOutput(spfile) - version = statepoint.version - return version - ''' def _read_serpent_code_version(self, ofile: os.PathLike) -> str | None: pass @@ -1139,10 +1130,8 @@ def _generate_single_excel_output(self): meshtalfile = os.path.join(results_path, file) # Parse output sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) - tally_numbers = [tally.tallyNumber for tally in sim_output.mctal.tallies] - tally_comments = [ - tally.tallyComment[0] for tally in sim_output.mctal.tallies - ] + tally_numbers = sim_output.tally_numbers + tally_comments = sim_output.tally_comments # Adjourn raw Data self.raw_data = sim_output.tallydata @@ -1303,6 +1292,7 @@ def _generate_single_excel_output(self): # ex.save() exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) + ''' def _print_raw(self): for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, str(key) + ".csv") @@ -1311,6 +1301,7 @@ def _print_raw(self): metadata_file = os.path.join(self.raw_path, "metadata.json") with open(metadata_file, "w", encoding="utf-8") as outfile: json.dump(self.metadata, outfile, indent=4) + ''' def _generate_comparison_excel_output(self): # Get excel configuration @@ -1552,8 +1543,7 @@ def _generate_comparison_excel_output(self): std_devs, ) - -class MCNPoutput: +class MCNPoutput(AbstractOutput): def __init__(self, mctal_file, output_file, meshtal_file=None): """ Class representing all outputs coming from and MCNP run @@ -1619,6 +1609,8 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): pass # no user column self.mctal = mctal + self.tally_comments = [tally.tallyNumber for tally in self.mctal.tallies] + self.tally_numbers = [tally.tallyComment[0] for tally in self.mctal.tallies] self.tallydata = tallydata self.totalbin = total_bin # Read the output file @@ -1646,13 +1638,58 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): else: continue + def _read_code_version(self, ofile: os.PathLike) -> str | None: + """Read MCNP code version from the output file + + Parameters + ---------- + ofile : os.PathLike + output file path -class OpenMCOutput: + Returns + ------- + str | None + version of the MCNP code used to run the benchmark + """ + + outp = MCNPOutputFile(ofile) + try: + version = outp.get_code_version() + return version + except ValueError: + logging.warning( + "Code version not found in the output file or aux file for %s", + ofile, + ) + logging.warning( + "Contents of the directory: %s", os.listdir(os.path.dirname(ofile)) + ) + return None + + +class OpenMCOutput(AbstractOutput): def __init__(self, output_path): self.output = omc.OpenMCSimOutput(output_path) self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None + def _read_code_version(self, spfile: os.PathLike) -> str | None: + """Read OpenMC code version from the statepoint file + + Parameters + ---------- + spfile : os.PathLike + statepoint file path + + Returns + ------- + str | None + version of the OpenMC code used to run the benchmark + """ + statepoint = omc.OpenMCSimOutput(spfile) + version = statepoint.version + return version + def _create_dataframes(self, tallies): tallydata = {} totalbin = {} From 6c9b5418999c0c85a49cd5eab4c983cf07470edf Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 15:13:00 +0000 Subject: [PATCH 13/53] Moved single excel output to AbstractOutput --- jade/output.py | 233 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 204 insertions(+), 29 deletions(-) diff --git a/jade/output.py b/jade/output.py index 4a76e37d..9ddb65d5 100644 --- a/jade/output.py +++ b/jade/output.py @@ -582,6 +582,210 @@ def _print_raw(self): with open(metadata_file, "w", encoding="utf-8") as outfile: json.dump(self.metadata, outfile, indent=4) + def _generate_single_excel_output(self): + # Get excel configuration + self.outputs = {} + self.results = {} + self.errors = {} + self.stat_checks = {} + ex_cnf = pd.read_excel(self.cnf_path, sheet_name="Excel") + ex_cnf.set_index("Tally", inplace=True) + + # Open the excel file + # name = "Generic_single.xlsx" + # template = os.path.join(os.getcwd(), "templates", name) + + outputs = {} + outpath = os.path.join( + self.excel_path, self.testname + "_" + self.lib + ".xlsx" + ) + + if self.openmc: + results_path = os.path.join(self.test_path, self.code) + _, outfile = self._get_output_files(results_path, "openmc") + sim_output = OpenMCOutput(outfile) + tally_numbers = sim_output.output.tally_numbers + tally_comments = sim_output.output.tally_comments + + if self.mcnp or self.d1s: + # ex = ExcelOutputSheet(template, outpath) + # Get results + # results = [] + # errors = [] + results_path = os.path.join(self.test_path, self.code) + # Get mfile and outfile and possibly meshtal file + meshtalfile = None + for file in os.listdir(results_path): + if file[-1] == "m": + mfile = os.path.join(results_path, file) + elif file[-1] == "o": + ofile = os.path.join(results_path, file) + elif file[-4:] == "msht": + meshtalfile = os.path.join(results_path, file) + # Parse output + sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) + tally_numbers = sim_output.tally_numbers + tally_comments = sim_output.tally_comments + + # Adjourn raw Data + self.raw_data = sim_output.tallydata + + # res, err = output.get_single_excel_data() + + for label in ["Value", "Error"]: + # keys = {} + for num, key in zip(tally_numbers, tally_comments): + # num = tally.tallyNumber + # key = tally.tallyComment[0] + # keys[num] = key # Memorize tally descriptions + tdata = sim_output.tallydata[num].copy() # Full tally data + try: + tally_settings = ex_cnf.loc[num] + except KeyError: + print(" Warning!: tally n." + str(num) + " is not in configuration") + continue + + # Re-Elaborate tdata Dataframe + x_name = tally_settings["x"] + x_tag = tally_settings["x name"] + y_name = tally_settings["y"] + y_tag = tally_settings["y name"] + ylim = tally_settings["cut Y"] + + if label == "Value": + outputs[num] = {"title": key, "x_label": x_tag} + + # select the index format + if x_name == "Energy": + idx_format = "0.00E+00" + # TODO all possible cases should be addressed + else: + idx_format = "0" + + if y_name != "tally": + tdata.set_index(x_name, inplace=True) + x_set = list(set(tdata.index)) + y_set = list(set(tdata[y_name].values)) + rows = [] + for xval in x_set: + try: + row = tdata.loc[xval, label].values + prev_len = len(row) + except AttributeError: + # There is only one total value, fill the rest with + # nan + row = [] + for i in range(prev_len - 1): + row.append(np.nan) + row.append(tdata.loc[xval, label]) + + rows.append(row) + + try: + main_value_df = pd.DataFrame(rows, columns=y_set, index=x_set) + main_value_df.index.name = x_name + except ValueError: + print( + CRED + + """ + A ValueError was triggered, a probable cause may be that more than 2 binnings + are defined in tally {}. This is a fatal exception, application will now + close""".format( + str(num) + ) + + CEND + ) + # Safely exit from excel and from application + # ex.save() + sys.exit() + + # reorder index (quick reset of the index) + main_value_df.reset_index(inplace=True) + main_value_df = self._reorder_df(main_value_df, x_name) + main_value_df.set_index(x_name, inplace=True) + # memorize for atlas + outputs[num][label] = main_value_df + # insert the df in pieces + # ex.insert_cutted_df( + # "B", + # main_value_df, + # label + "s", + # ylim, + # header=(key, "Tally n." + str(num)), + # index_name=x_tag, + # cols_name=y_tag, + # index_num_format=idx_format, + # ) + else: + # reorder df + try: + tdata = self._reorder_df(tdata, x_name) + except KeyError: + print( + CRED + + """ +{} is not available in tally {}. Please check the configuration file. +The application will now exit """.format( + x_name, str(num) + ) + + CEND + ) + # Safely exit from excel and from application + # ex.save() + sys.exit() + + if label == "Value": + del tdata["Error"] + elif label == "Error": + del tdata["Value"] + # memorize for atlas and set index + tdata.set_index(x_name, inplace=True) + outputs[num][label] = tdata + + # Insert DF + # ex.insert_df( + # "B", + # tdata, + # label + "s", + # print_index=True, + # header=(key, "Tally n." + str(num)), + # ) + # memorize data for atlas + self.outputs[self.code] = outputs + # print(outputs) + # Dump them for comparisons + raw_outpath = os.path.join(self.raw_path, self.lib + ".pickle") + with open(raw_outpath, "wb") as outfile: + pickle.dump(outputs, outfile) + + # Compile general infos in the sheet + # ws = ex.current_ws + # title = self.testname + " RESULTS RECAP: " + label + "s" + # ws.range("A3").value = title + # ws.range("C1").value = self.lib + + # --- Compile statistical checks sheet --- + # ws = ex.wb.sheets["Statistical Checks"] + + dic_checks = sim_output.stat_checks + rows = [] + for num, key in zip(tally_numbers, tally_comments): + # num = tally.tallyNumber + # key = tally.tallyComment[0] + key_dic = key + " [" + str(num) + "]" + try: + stat = dic_checks[key_dic] + except (KeyError, TypeError): + stat = None + rows.append([num, key, stat]) + + stats = pd.DataFrame(rows) + stats.columns = ["Tally Number", "Tally Description", "Result"] + # ws.range("A9").options(index=False, header=False).value = df + + # ex.save() + exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) + class BenchmarkOutput(AbstractOutput): ''' def __init__(self, lib: str, code: str, testname: str, session: Session): @@ -772,35 +976,6 @@ def _read_code_version(self, pathtofile: os.PathLike) -> str | None: return None ''' - #TODO convert to _read_code_version in MCNPoutput - def _read_mcnp_code_version(self, ofile: os.PathLike) -> str | None: - """Read MCNP code version from the output file - - Parameters - ---------- - ofile : os.PathLike - output file path - - Returns - ------- - str | None - version of the MCNP code used to run the benchmark - """ - - outp = MCNPOutputFile(ofile) - try: - version = outp.get_code_version() - return version - except ValueError: - logging.warning( - "Code version not found in the output file or aux file for %s", - ofile, - ) - logging.warning( - "Contents of the directory: %s", os.listdir(os.path.dirname(ofile)) - ) - return None - ''' def _read_serpent_code_version(self, ofile: os.PathLike) -> str | None: pass From 4fc8353a5b773f645b80837ac4d20814af75942a Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 15:49:37 +0000 Subject: [PATCH 14/53] Reoved benchmark output class --- jade/output.py | 337 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 334 insertions(+), 3 deletions(-) diff --git a/jade/output.py b/jade/output.py index 9ddb65d5..84332fc1 100644 --- a/jade/output.py +++ b/jade/output.py @@ -186,17 +186,21 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): results_path = os.path.join(self.test_path, code) self.metadata = self._read_metadata_run(results_path) + ''' @abc.abstractmethod def single_postprocess(self): """ To be executed when a single pp is requested """ + ''' + ''' @abc.abstractmethod def compare(self): """ To be executed when a comparison is requested """ + ''' @abc.abstractmethod def _get_output_files(results_path): @@ -786,7 +790,247 @@ def _generate_single_excel_output(self): # ex.save() exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) -class BenchmarkOutput(AbstractOutput): + def _generate_comparison_excel_output(self): + # Get excel configuration + self.outputs = {} + self.results = {} + self.errors = {} + self.stat_checks = {} + ex_cnf = pd.read_excel(self.cnf_path, sheet_name="Excel") + ex_cnf.set_index("Tally", inplace=True) + + # Open the excel file + # name_tag = "Generic_comparison.xlsx" + # template = os.path.join(os.getcwd(), "templates", name_tag) + + if self.mcnp or self.d1s: + mcnp_outputs = {} + comps = {} + abs_diffs = {} + std_devs = {} + for reflib, tarlib, name in self.couples: + lib_to_comp = name + outfolder_path = self.excel_path + outpath = os.path.join( + outfolder_path, "Comparison_" + name + f"_{self.code}.xlsx" + ) + + # ex = ExcelOutputSheet(template, outpath) + # Get results + + # for lib in to_read: + # results_path = self.test_path[lib] + for lib, results_path in { + reflib: os.path.join(self.test_path[reflib], self.code), + tarlib: os.path.join(self.test_path[tarlib], self.code), + }.items(): + # Get mfile and outfile and possibly meshtal file + meshtalfile = None + for file in os.listdir(results_path): + if file[-1] == "m": + mfile = os.path.join(results_path, file) + elif file[-1] == "o": + ofile = os.path.join(results_path, file) + elif file[-4:] == "msht": + meshtalfile = os.path.join(results_path, file) + # Parse output + mcnp_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) + mcnp_outputs[lib] = mcnp_output + # Build the comparison + for label in ["Value", "Error"]: + for tally in mcnp_outputs[reflib].mctal.tallies: + num = tally.tallyNumber + key = tally.tallyComment[0] + + # Full tally data + tdata_ref = mcnp_outputs[reflib].tallydata[num].copy() + tdata_tar = mcnp_outputs[tarlib].tallydata[num].copy() + try: + tally_settings = ex_cnf.loc[num] + except KeyError: + print( + " Warning!: tally n." + + str(num) + + " is not in configuration" + ) + continue + + # Re-Elaborate tdata Dataframe + x_name = tally_settings["x"] + x_tag = tally_settings["x name"] + y_name = tally_settings["y"] + # y_tag = tally_settings["y name"] + # ylim = tally_settings["cut Y"] + # select the index format + if label == "Value": + for dic in [comps, abs_diffs, std_devs]: + dic[num] = {"title": key, "x_label": x_tag} + + # if x_name == "Energy": + # idx_format = "0.00E+00" + # # TODO all possible cases should be addressed + # else: + # idx_format = "0" + + if y_name != "tally": + tdata_ref.set_index(x_name, inplace=True) + tdata_tar.set_index(x_name, inplace=True) + x_set = list(set(tdata_ref.index)) + y_set = list(set(tdata_ref[y_name].values)) + rows_fin = [] + rows_abs_diff = [] + rows_std_dev = [] + for xval in x_set: + try: + ref = tdata_ref.loc[xval, "Value"].values + ref_err = tdata_ref.loc[xval, "Error"].values + tar = tdata_tar.loc[xval, "Value"].values + # !!! True divide warnings are suppressed !!! + with np.errstate(divide="ignore", invalid="ignore"): + row_fin = (ref - tar) / ref + row_abs_diff = ref - tar + row_std_dev = row_abs_diff / (ref_err * ref) + prev_len = len(ref) + except AttributeError: + # This is raised when total values are + # collected only for one bin. + # the rest needs to be filled by nan + ref = tdata_ref.loc[xval, "Value"] + ref_err = tdata_ref.loc[xval, "Error"] + tar = tdata_tar.loc[xval, "Value"] + row_fin = [] + row_abs_diff = [] + row_std_dev = [] + for i in range(prev_len - 1): + row_fin.append(np.nan) + row_abs_diff.append(np.nan) + row_std_dev.append(np.nan) + row_fin.append((ref - tar) / ref) + row_abs_diff.append(ref - tar) + row_std_dev.append((ref - tar) / (ref_err * ref)) + + rows_fin.append(row_fin) + rows_abs_diff.append(row_abs_diff) + rows_std_dev.append(row_std_dev) + try: + final = pd.DataFrame( + rows_fin, columns=y_set, index=x_set + ) + abs_diff = pd.DataFrame( + rows_abs_diff, columns=y_set, index=x_set + ) + std_dev = pd.DataFrame( + rows_std_dev, columns=y_set, index=x_set + ) + for df in [final, abs_diff, std_dev]: + df.index.name = x_name + df.replace(np.nan, "Not Available", inplace=True) + df.replace(float(0), "Identical", inplace=True) + df.replace(-np.inf, "Reference = 0", inplace=True) + df.replace(1, "Target = 0", inplace=True) + except ValueError: + print( + CRED + + """ + A ValueError was triggered, a probable cause may be that more than 2 binnings + are defined in tally {}. This is a fatal exception, application will now + close""".format( + str(num) + ) + + CEND + ) + # Safely exit from excel and from application + sys.exit() + + # reorder index and quick index reset + for df in [final, abs_diff, std_dev]: + df.reset_index(inplace=True) + df = self._reorder_df(df, x_name) + df.set_index(x_name, inplace=True) + comps[num][label] = final + abs_diffs[num][label] = abs_diff + std_devs[num][label] = std_dev + # insert the df in pieces + # ex.insert_cutted_df( + # "B", + # main_value_df, + # "Comparison", + # ylim, + # header=(key, "Tally n." + str(num)), + # index_name=x_tag, + # cols_name=y_tag, + # index_num_format=idx_format, + # values_format="0.00%", + # ) + else: + # reorder dfs + try: + tdata_ref = self._reorder_df(tdata_ref, x_name) + except KeyError: + print( + CRED + + """ + {} is not available in tally {}. Please check the configuration file. + The application will now exit """.format( + x_name, str(num) + ) + + CEND + ) + # Safely exit from excel and from application + sys.exit() + + del tdata_ref["Error"] + tdata_ref.set_index(x_name, inplace=True) + + tdata_tar = self._reorder_df(tdata_tar, x_name) + del tdata_tar["Error"] + tdata_tar.set_index(x_name, inplace=True) + + # !!! True divide warnings are suppressed !!! + with np.errstate(divide="ignore", invalid="ignore"): + comp_df = (tdata_ref - tdata_tar) / tdata_ref + abs_diff_df = tdata_ref - tdata_tar + std_dev_df = abs_diff_df + comps[num][label] = comp_df + abs_diffs[num][label] = abs_diff_df + std_devs[num][label] = abs_diff_df + # Insert DF + # ex.insert_df( + # "B", + # df, + # "Comparison", + # print_index=True, + # header=(key, "Tally n." + str(num)), + # values_format="0.00%", + # ) + + # Compile general infos in the sheet + # ws = ex.current_ws + # title = self.testname + " RESULTS RECAP: Comparison" + # ws.range("A3").value = title + # ws.range("C1").value = tarlib + " Vs " + reflib + + # Add single pp sheets + # for lib in [reflib, tarlib]: + # cp = self.state.get_path( + # "single", [lib, self.testname, "Excel"]) + # file = os.listdir(cp)[0] + # cp = os.path.join(cp, file) + # ex.copy_sheets(cp) + + # ex.save() + self.outputs[self.code] = comps + exsupp.comp_excel_writer( + self, + outpath, + lib_to_comp, + self.testname, + comps, + abs_diffs, + std_devs, + ) + +#class BenchmarkOutput(AbstractOutput): ''' def __init__(self, lib: str, code: str, testname: str, session: Session): """ @@ -1263,6 +1507,7 @@ def _reorder_df(df, x_set): return df ''' + ''' def _generate_single_excel_output(self): # Get excel configuration self.outputs = {} @@ -1466,7 +1711,8 @@ def _generate_single_excel_output(self): # ex.save() exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) - + ''' + ''' def _print_raw(self): for key, data in self.raw_data.items(): @@ -1840,7 +2086,49 @@ def _read_code_version(self, ofile: os.PathLike) -> str | None: "Contents of the directory: %s", os.listdir(os.path.dirname(ofile)) ) return None + + def _get_output_files(self, results_path): + """ + Recover the output files from a directory + + Parameters + ---------- + results_path : str or path + path where the results are contained. + code : str + code that generated the output ('mcnp' or 'openmc') + + Raises + ------ + FileNotFoundError + if the required files are not found. + + Returns + ------- + file1 : path + path to the first file + file2 : path + path to the second file (only for mcnp) + + """ + file1 = None + file2 = None + + for file_name in os.listdir(results_path): + if file_name[-1] == "m": + file1 = file_name + elif file_name[-1] == "o": + file2 = file_name + + if file1 is None or file2 is None: + raise FileNotFoundError( + f"The following path does not contain the required files for {self.code} output: {results_path}" + ) + file1 = os.path.join(results_path, file1) if file1 else None + file2 = os.path.join(results_path, file2) if file2 else None + + return file1, file2 class OpenMCOutput(AbstractOutput): def __init__(self, output_path): @@ -1863,7 +2151,50 @@ def _read_code_version(self, spfile: os.PathLike) -> str | None: """ statepoint = omc.OpenMCSimOutput(spfile) version = statepoint.version - return version + return version + + def _get_output_files(self, results_path): + """ + Recover the output files from a directory + + Parameters + ---------- + results_path : str or path + path where the results are contained. + code : str + code that generated the output ('mcnp' or 'openmc') + + Raises + ------ + FileNotFoundError + if the required files are not found. + + Returns + ------- + file1 : path + path to the first file + file2 : path + path to the second file (only for mcnp) + + """ + file1 = None + file2 = None + + for file_name in os.listdir(results_path): + if file_name.endswith(".out"): + file1 = file_name + elif file_name.startswith("statepoint"): + file2 = file_name + + if file1 is None or file2 is None: + raise FileNotFoundError( + f"The following path does not contain the required files for {self.code} output: {results_path}" + ) + + file1 = os.path.join(results_path, file1) if file1 else None + file2 = os.path.join(results_path, file2) if file1 else None + + return file1, file2 def _create_dataframes(self, tallies): tallydata = {} From 064de0231e803fec311cfeb4d4a7ef6d24ba9688 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 16:36:57 +0000 Subject: [PATCH 15/53] Refactored benchmark output --- jade/output.py | 1150 +++++------------------------------------------- 1 file changed, 118 insertions(+), 1032 deletions(-) diff --git a/jade/output.py b/jade/output.py index 84332fc1..1ede6136 100644 --- a/jade/output.py +++ b/jade/output.py @@ -60,978 +60,14 @@ CRED = "\033[91m" CEND = "\033[0m" +class BenchmarkOutput: + def __init__(self, lib: str, code: str, testname: str, session: Session): + if code in ['mcnp', 'd1s']: + return MCNPoutput(lib, code, testname, session) + elif code == 'openmc': + return OpenMCOutput(lib, code, testname, session) -class AbstractOutput(abc.ABC): - def __init__(self, lib: str, code: str, testname: str, session: Session): - """ - General class for a Benchmark output - - Parameters - ---------- - lib : str - library to post-process - code : str - code being post processed - testname : str - name of the benchmark being postprocessed - session : Session - Jade Session - - Returns - ------- - None. - - """ - self.raw_data = {} # Raw data - self.outputs = {} # outputs linked to the benchmark - self.testname = testname # test name - self.code_path = os.getcwd() # path to code - self.state = session.state - self.session = session - self.path_templates = session.path_templates - - # Read specific configuration - cnf_path = os.path.join(session.path_cnf, self.testname + ".xlsx") - if os.path.isfile(cnf_path): - self.cnf_path = cnf_path - # It can be assumed that there is a folder containing multiple files - else: - self.cnf_path = os.path.join(session.path_cnf, self.testname) - - # Updated to handle multiple codes - # initialize them so that intellisense knows they are available - self.mcnp = False - self.openmc = False - self.serpent = False - self.d1s = False - for available_code in CODES.values(): - if code == available_code: - setattr(self, available_code, True) - self.raw_data[code] = {} - self.outputs[code] = {} - else: - setattr(self, available_code, False) - - self.code = code # this can be handy in a lot of places to avoid if else - - # COMPARISON - if isinstance(lib, list) and len(lib) > 1: - self.single = False # Indicator for single or comparison - self.lib = lib - couples = [] - tp = os.path.join(session.path_run, lib[0], self.testname) - self.test_path = {lib[0]: tp} - refname = session.conf.get_lib_name(lib[0]) - name = refname - dirname = lib[0] - for library in lib[1:]: - libname = session.conf.get_lib_name(library) - # name_couple = lib[0]+'_Vs_'+library - name_couple = lib[0] + "_Vs_" + library - name = name + "_Vs_" + libname - dirname = dirname + "_Vs_" + library - couples.append((lib[0], library, name_couple)) - tp = os.path.join(session.path_run, library, self.testname) - self.test_path[library] = tp - - self.name = name - # Generate library output path - out = os.path.join(session.path_comparison, dirname) - if not os.path.exists(out): - os.mkdir(out) - - out = os.path.join(out, self.testname, code) - if os.path.exists(out): - shutil.rmtree(out) - os.makedirs(out) - excel_path = os.path.join(out, "Excel") - atlas_path = os.path.join(out, "Atlas") - raw_path = os.path.join(out, "Raw_Data") - os.makedirs(excel_path) - os.makedirs(atlas_path) - os.makedirs(raw_path) - self.excel_path = excel_path - self.raw_path = raw_path - self.atlas_path = atlas_path - self.couples = couples # Couples of libraries to post process - # SINGLE-LIBRARY - else: - self.single = True # Indicator for single or comparison - if isinstance(lib, list) and len(lib) == 1: - self.lib = lib[0] # In case of 1-item list - else: - self.lib = lib - self.test_path = os.path.join(session.path_run, lib, self.testname) - - # Generate library output path - out = os.path.join(session.path_single, lib) - if not os.path.exists(out): - os.mkdir(out) - - out = os.path.join(out, self.testname, code) - if os.path.exists(out): - shutil.rmtree(out) - os.makedirs(out) - excel_path = os.path.join(out, "Excel") - atlas_path = os.path.join(out, "Atlas") - raw_path = os.path.join(out, "Raw_Data") - os.makedirs(excel_path) - os.makedirs(atlas_path) - os.makedirs(raw_path) - self.excel_path = excel_path - self.raw_path = raw_path - self.atlas_path = atlas_path - - # Read the metadata - results_path = os.path.join(self.test_path, code) - self.metadata = self._read_metadata_run(results_path) - - ''' - @abc.abstractmethod - def single_postprocess(self): - """ - To be executed when a single pp is requested - """ - ''' - - ''' - @abc.abstractmethod - def compare(self): - """ - To be executed when a comparison is requested - """ - ''' - - @abc.abstractmethod - def _get_output_files(results_path): - """ - To be executed when a comparison is requested - """ - - @abc.abstractmethod - def _read_code_version(self, pathtofile): - """ - To be executed when a comparison is requested - """ - - ''' - @staticmethod - def _get_output_files(results_path, code): - """ - Recover the output files from a directory - - Parameters - ---------- - results_path : str or path - path where the results are contained. - code : str - code that generated the output ('mcnp' or 'openmc') - - Raises - ------ - FileNotFoundError - if the required files are not found. - NotImplementedError - if the code is not supported. - - Returns - ------- - file1 : path - path to the first file - file2 : path - path to the second file (only for mcnp) - - """ - file1 = None - file2 = None - - for file_name in os.listdir(results_path): - if code in ["mcnp", "d1s"]: - if file_name[-1] == "m": - file1 = file_name - elif file_name[-1] == "o": - file2 = file_name - elif code == "openmc": - if file_name.endswith(".out"): - file1 = file_name - elif file_name.startswith("statepoint"): - file2 = file_name - else: - raise NotImplementedError( - f"The code '{code}' is not currently supported." - ) - - if file1 is None or (code in ["mcnp", "d1s"] and file2 is None): - raise FileNotFoundError( - f"The following path does not contain the required files for {code} output: {results_path}" - ) - - file1 = os.path.join(results_path, file1) - file2 = os.path.join(results_path, file2) if file2 else None - - return file1, file2 - ''' - - def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: - """Retrieve the metadata from the run - - Parameters - ---------- - pathtofile : os.PathLike - path to metadata file - - Returns - ------- - dict - metadata dictionary - """ - try: - with open( - os.path.join(pathtofile, "metadata.json"), - "r", - encoding="utf-8", - ) as file: - metadata = json.load(file) - except FileNotFoundError: - logging.warning("No metadata file found at %s", pathtofile) - metadata = {} - - metadata["jade_version"] = __version__ - metadata["code_version"] = self._read_code_version(pathtofile) - - return metadata - - def single_postprocess(self): - """ - Execute the full post-processing of a single library (i.e. excel, - raw data and atlas) - - Returns - ------- - None. - - """ - print(" Generating Excel Recap...") - self._generate_single_excel_output() - self._print_raw() - - print(" Creating Atlas...") - outpath = os.path.join(self.atlas_path, "tmp") - os.mkdir(outpath) - - # Get atlas configuration - atl_cnf = pd.read_excel(self.cnf_path, sheet_name="Atlas") - atl_cnf.set_index("Tally", inplace=True) - - # Printing Atlas - template = template = os.path.join(self.path_templates, "AtlasTemplate.docx") - atlas = at.Atlas(template, self.testname + "_" + self.lib) - - # Iterate over each type of plot (first one is quantity - # and second one the measure unit) - for plot_type in list(atl_cnf.columns)[2:]: - print(" Plotting : " + plot_type) - atlas.doc.add_heading("Plot type: " + plot_type, level=1) - # Keep only tallies to plot - atl_cnf_plot = atl_cnf[atl_cnf[plot_type]] - for tally_num in tqdm(atl_cnf_plot.index, desc="Tallies"): - try: - output = self.outputs[self.code][tally_num] - except KeyError: - fatal_exception( - "tally n. " - + str(tally_num) - + " is in config but not in the MCNP output" - ) - vals_df = output["Value"] - err_df = output["Error"] - quantity = str(atl_cnf_plot["Quantity"].loc[tally_num]) - unit = str(atl_cnf_plot["Unit"].loc[tally_num]) - xlabel = output["x_label"] - title = output["title"] - - atlas.doc.add_heading("Tally: " + title, level=2) - - columns = vals_df.columns - x = np.array(vals_df.index) - - for column in tqdm(columns): - if len(columns) > 1: - try: - txt = str(int(column)) - except ValueError: - # it is not convertible to int - txt = str(column) - - atlas.doc.add_heading(txt, level=3) - newtitle = title + " (" + txt + ")" - else: - newtitle = title - - # If total is present it has to be deleted - try: - vals_df.drop(["total"], inplace=True) - err_df.drop(["total"], inplace=True) - x = x[:-1] - except KeyError: - pass - - try: - values = vals_df[column].values - error = err_df[column].values - except KeyError: - # this means that the column is only one and we have - # two distinct DFs for values and errors - # depending on pandas version, these may be series or - # directly arrays - values = vals_df["Value"] - error = err_df["Error"] - if isinstance(values, pd.Series) or isinstance( - values, pd.DataFrame - ): - values = values.values - if isinstance(error, pd.Series) or isinstance( - error, pd.DataFrame - ): - error = error.values - - lib_name = self.session.conf.get_lib_name(self.lib) - lib = {"x": x, "y": values, "err": error, "ylabel": lib_name} - data = [lib] - - outname = "tmp" - plot = plotter.Plotter( - data, - newtitle, - outpath, - outname, - quantity, - unit, - xlabel, - self.testname, - ) - img_path = plot.plot(plot_type) - - atlas.insert_img(img_path) - atlas.save(self.atlas_path) - # Remove tmp images - shutil.rmtree(outpath) - - def compare(self): - """ - Generates the full comparison post-processing (excel and atlas) - - Returns - ------- - None. - - """ - print(" Generating Excel Recap...") - self._generate_comparison_excel_output() - - print(" Creating Atlas...") - outpath = os.path.join(self.atlas_path, "tmp") - os.mkdir(outpath) - - # Get atlas configuration - atl_cnf = pd.read_excel(self.cnf_path, sheet_name="Atlas") - atl_cnf.set_index("Tally", inplace=True) - - # Printing Atlas - template = os.path.join(self.path_templates, "AtlasTemplate.docx") - - atlas = at.Atlas(template, self.testname + "_" + self.name) - - # Recover data - outputs_dic = {} - for lib in self.lib: - # Recover lib output - out_path = os.path.join( - self.session.path_single, - lib, - self.testname, - self.code, - "Raw_Data", - lib + ".pickle", - ) - with open(out_path, "rb") as handle: - outputs = pickle.load(handle) - outputs_dic[lib] = outputs - - # Iterate over each type of plot (first one is quantity - # and second one the measure unit) - for plot_type in list(atl_cnf.columns)[2:]: - print(" Plotting : " + plot_type) - atlas.doc.add_heading("Plot type: " + plot_type, level=1) - # Keep only tallies to plot - atl_cnf_plot = atl_cnf[atl_cnf[plot_type]] - for tally_num in tqdm(atl_cnf_plot.index, desc="Tallies"): - # The last 'outputs' can be easily used for common data - try: - output = outputs_dic[lib][tally_num] - except KeyError: - fatal_exception( - "tally n. " - + str(tally_num) - + " is in config but not in the MCNP output" - ) - vals_df = output["Value"] - err_df = output["Error"] - quantity = str(atl_cnf_plot["Quantity"].loc[tally_num]) - unit = str(atl_cnf_plot["Unit"].loc[tally_num]) - xlabel = output["x_label"] - title = output["title"] - - atlas.doc.add_heading("Tally: " + title, level=2) - - columns = vals_df.columns - - for column in tqdm(columns): - if len(columns) > 1: - try: - txt = str(int(column)) - except ValueError: - # it is not convertible to int - txt = str(column) - - atlas.doc.add_heading(txt, level=3) - newtitle = title + " (" + txt + ")" - - else: - newtitle = title - data = [] - for lib in self.lib: - output = outputs_dic[lib][tally_num] - - # override values and errors - try: - vals_df = output["Value"] - err_df = output["Error"] - # If total is present it has to be deleted - try: - vals_df.drop(["total"], inplace=True) - err_df.drop(["total"], inplace=True) - except KeyError: - pass - values = vals_df[column].values - error = err_df[column].values - - except KeyError: - # this means that the column is only one and we - # havetwo distinct DFs for values and errors - values = vals_df["Value"].values - error = err_df["Error"].values - - x = np.array(vals_df.index) - - lib_name = self.session.conf.get_lib_name(lib) - lib_data = { - "x": x, - "y": values, - "err": error, - "ylabel": lib_name, - } - data.append(lib_data) - - outname = "tmp" - plot = plotter.Plotter( - data, - newtitle, - outpath, - outname, - quantity, - unit, - xlabel, - self.testname, - ) - img_path = plot.plot(plot_type) - - atlas.insert_img(img_path) - atlas.save(self.atlas_path) - - # Remove tmp images - shutil.rmtree(outpath) - - @staticmethod - def _reorder_df(df, x_set): - # First of all try order by number - df["index"] = pd.to_numeric(df[x_set], errors="coerce") - - # If they are all nan try with a normal sort - if df["index"].isnull().values.all(): - df.sort_values(x_set, inplace=True) - - # Otherwise keep on with the number sorting - else: - df.sort_values("index", inplace=True) - - del df["index"] - - # Try to reorder the columns - try: - df = df.reindex(sorted(df.columns), axis=1) - except TypeError: - # They are a mix of strings and ints, let's ignore it for - # the time being - pass - - return df - - def _print_raw(self): - for key, data in self.raw_data.items(): - file = os.path.join(self.raw_path, str(key) + ".csv") - data.to_csv(file, header=True, index=False) - - metadata_file = os.path.join(self.raw_path, "metadata.json") - with open(metadata_file, "w", encoding="utf-8") as outfile: - json.dump(self.metadata, outfile, indent=4) - - def _generate_single_excel_output(self): - # Get excel configuration - self.outputs = {} - self.results = {} - self.errors = {} - self.stat_checks = {} - ex_cnf = pd.read_excel(self.cnf_path, sheet_name="Excel") - ex_cnf.set_index("Tally", inplace=True) - - # Open the excel file - # name = "Generic_single.xlsx" - # template = os.path.join(os.getcwd(), "templates", name) - - outputs = {} - outpath = os.path.join( - self.excel_path, self.testname + "_" + self.lib + ".xlsx" - ) - - if self.openmc: - results_path = os.path.join(self.test_path, self.code) - _, outfile = self._get_output_files(results_path, "openmc") - sim_output = OpenMCOutput(outfile) - tally_numbers = sim_output.output.tally_numbers - tally_comments = sim_output.output.tally_comments - - if self.mcnp or self.d1s: - # ex = ExcelOutputSheet(template, outpath) - # Get results - # results = [] - # errors = [] - results_path = os.path.join(self.test_path, self.code) - # Get mfile and outfile and possibly meshtal file - meshtalfile = None - for file in os.listdir(results_path): - if file[-1] == "m": - mfile = os.path.join(results_path, file) - elif file[-1] == "o": - ofile = os.path.join(results_path, file) - elif file[-4:] == "msht": - meshtalfile = os.path.join(results_path, file) - # Parse output - sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) - tally_numbers = sim_output.tally_numbers - tally_comments = sim_output.tally_comments - - # Adjourn raw Data - self.raw_data = sim_output.tallydata - - # res, err = output.get_single_excel_data() - - for label in ["Value", "Error"]: - # keys = {} - for num, key in zip(tally_numbers, tally_comments): - # num = tally.tallyNumber - # key = tally.tallyComment[0] - # keys[num] = key # Memorize tally descriptions - tdata = sim_output.tallydata[num].copy() # Full tally data - try: - tally_settings = ex_cnf.loc[num] - except KeyError: - print(" Warning!: tally n." + str(num) + " is not in configuration") - continue - - # Re-Elaborate tdata Dataframe - x_name = tally_settings["x"] - x_tag = tally_settings["x name"] - y_name = tally_settings["y"] - y_tag = tally_settings["y name"] - ylim = tally_settings["cut Y"] - - if label == "Value": - outputs[num] = {"title": key, "x_label": x_tag} - - # select the index format - if x_name == "Energy": - idx_format = "0.00E+00" - # TODO all possible cases should be addressed - else: - idx_format = "0" - - if y_name != "tally": - tdata.set_index(x_name, inplace=True) - x_set = list(set(tdata.index)) - y_set = list(set(tdata[y_name].values)) - rows = [] - for xval in x_set: - try: - row = tdata.loc[xval, label].values - prev_len = len(row) - except AttributeError: - # There is only one total value, fill the rest with - # nan - row = [] - for i in range(prev_len - 1): - row.append(np.nan) - row.append(tdata.loc[xval, label]) - - rows.append(row) - - try: - main_value_df = pd.DataFrame(rows, columns=y_set, index=x_set) - main_value_df.index.name = x_name - except ValueError: - print( - CRED - + """ - A ValueError was triggered, a probable cause may be that more than 2 binnings - are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) - + CEND - ) - # Safely exit from excel and from application - # ex.save() - sys.exit() - - # reorder index (quick reset of the index) - main_value_df.reset_index(inplace=True) - main_value_df = self._reorder_df(main_value_df, x_name) - main_value_df.set_index(x_name, inplace=True) - # memorize for atlas - outputs[num][label] = main_value_df - # insert the df in pieces - # ex.insert_cutted_df( - # "B", - # main_value_df, - # label + "s", - # ylim, - # header=(key, "Tally n." + str(num)), - # index_name=x_tag, - # cols_name=y_tag, - # index_num_format=idx_format, - # ) - else: - # reorder df - try: - tdata = self._reorder_df(tdata, x_name) - except KeyError: - print( - CRED - + """ -{} is not available in tally {}. Please check the configuration file. -The application will now exit """.format( - x_name, str(num) - ) - + CEND - ) - # Safely exit from excel and from application - # ex.save() - sys.exit() - - if label == "Value": - del tdata["Error"] - elif label == "Error": - del tdata["Value"] - # memorize for atlas and set index - tdata.set_index(x_name, inplace=True) - outputs[num][label] = tdata - - # Insert DF - # ex.insert_df( - # "B", - # tdata, - # label + "s", - # print_index=True, - # header=(key, "Tally n." + str(num)), - # ) - # memorize data for atlas - self.outputs[self.code] = outputs - # print(outputs) - # Dump them for comparisons - raw_outpath = os.path.join(self.raw_path, self.lib + ".pickle") - with open(raw_outpath, "wb") as outfile: - pickle.dump(outputs, outfile) - - # Compile general infos in the sheet - # ws = ex.current_ws - # title = self.testname + " RESULTS RECAP: " + label + "s" - # ws.range("A3").value = title - # ws.range("C1").value = self.lib - - # --- Compile statistical checks sheet --- - # ws = ex.wb.sheets["Statistical Checks"] - - dic_checks = sim_output.stat_checks - rows = [] - for num, key in zip(tally_numbers, tally_comments): - # num = tally.tallyNumber - # key = tally.tallyComment[0] - key_dic = key + " [" + str(num) + "]" - try: - stat = dic_checks[key_dic] - except (KeyError, TypeError): - stat = None - rows.append([num, key, stat]) - - stats = pd.DataFrame(rows) - stats.columns = ["Tally Number", "Tally Description", "Result"] - # ws.range("A9").options(index=False, header=False).value = df - - # ex.save() - exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) - - def _generate_comparison_excel_output(self): - # Get excel configuration - self.outputs = {} - self.results = {} - self.errors = {} - self.stat_checks = {} - ex_cnf = pd.read_excel(self.cnf_path, sheet_name="Excel") - ex_cnf.set_index("Tally", inplace=True) - - # Open the excel file - # name_tag = "Generic_comparison.xlsx" - # template = os.path.join(os.getcwd(), "templates", name_tag) - - if self.mcnp or self.d1s: - mcnp_outputs = {} - comps = {} - abs_diffs = {} - std_devs = {} - for reflib, tarlib, name in self.couples: - lib_to_comp = name - outfolder_path = self.excel_path - outpath = os.path.join( - outfolder_path, "Comparison_" + name + f"_{self.code}.xlsx" - ) - - # ex = ExcelOutputSheet(template, outpath) - # Get results - - # for lib in to_read: - # results_path = self.test_path[lib] - for lib, results_path in { - reflib: os.path.join(self.test_path[reflib], self.code), - tarlib: os.path.join(self.test_path[tarlib], self.code), - }.items(): - # Get mfile and outfile and possibly meshtal file - meshtalfile = None - for file in os.listdir(results_path): - if file[-1] == "m": - mfile = os.path.join(results_path, file) - elif file[-1] == "o": - ofile = os.path.join(results_path, file) - elif file[-4:] == "msht": - meshtalfile = os.path.join(results_path, file) - # Parse output - mcnp_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) - mcnp_outputs[lib] = mcnp_output - # Build the comparison - for label in ["Value", "Error"]: - for tally in mcnp_outputs[reflib].mctal.tallies: - num = tally.tallyNumber - key = tally.tallyComment[0] - - # Full tally data - tdata_ref = mcnp_outputs[reflib].tallydata[num].copy() - tdata_tar = mcnp_outputs[tarlib].tallydata[num].copy() - try: - tally_settings = ex_cnf.loc[num] - except KeyError: - print( - " Warning!: tally n." - + str(num) - + " is not in configuration" - ) - continue - - # Re-Elaborate tdata Dataframe - x_name = tally_settings["x"] - x_tag = tally_settings["x name"] - y_name = tally_settings["y"] - # y_tag = tally_settings["y name"] - # ylim = tally_settings["cut Y"] - # select the index format - if label == "Value": - for dic in [comps, abs_diffs, std_devs]: - dic[num] = {"title": key, "x_label": x_tag} - - # if x_name == "Energy": - # idx_format = "0.00E+00" - # # TODO all possible cases should be addressed - # else: - # idx_format = "0" - - if y_name != "tally": - tdata_ref.set_index(x_name, inplace=True) - tdata_tar.set_index(x_name, inplace=True) - x_set = list(set(tdata_ref.index)) - y_set = list(set(tdata_ref[y_name].values)) - rows_fin = [] - rows_abs_diff = [] - rows_std_dev = [] - for xval in x_set: - try: - ref = tdata_ref.loc[xval, "Value"].values - ref_err = tdata_ref.loc[xval, "Error"].values - tar = tdata_tar.loc[xval, "Value"].values - # !!! True divide warnings are suppressed !!! - with np.errstate(divide="ignore", invalid="ignore"): - row_fin = (ref - tar) / ref - row_abs_diff = ref - tar - row_std_dev = row_abs_diff / (ref_err * ref) - prev_len = len(ref) - except AttributeError: - # This is raised when total values are - # collected only for one bin. - # the rest needs to be filled by nan - ref = tdata_ref.loc[xval, "Value"] - ref_err = tdata_ref.loc[xval, "Error"] - tar = tdata_tar.loc[xval, "Value"] - row_fin = [] - row_abs_diff = [] - row_std_dev = [] - for i in range(prev_len - 1): - row_fin.append(np.nan) - row_abs_diff.append(np.nan) - row_std_dev.append(np.nan) - row_fin.append((ref - tar) / ref) - row_abs_diff.append(ref - tar) - row_std_dev.append((ref - tar) / (ref_err * ref)) - - rows_fin.append(row_fin) - rows_abs_diff.append(row_abs_diff) - rows_std_dev.append(row_std_dev) - try: - final = pd.DataFrame( - rows_fin, columns=y_set, index=x_set - ) - abs_diff = pd.DataFrame( - rows_abs_diff, columns=y_set, index=x_set - ) - std_dev = pd.DataFrame( - rows_std_dev, columns=y_set, index=x_set - ) - for df in [final, abs_diff, std_dev]: - df.index.name = x_name - df.replace(np.nan, "Not Available", inplace=True) - df.replace(float(0), "Identical", inplace=True) - df.replace(-np.inf, "Reference = 0", inplace=True) - df.replace(1, "Target = 0", inplace=True) - except ValueError: - print( - CRED - + """ - A ValueError was triggered, a probable cause may be that more than 2 binnings - are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) - + CEND - ) - # Safely exit from excel and from application - sys.exit() - - # reorder index and quick index reset - for df in [final, abs_diff, std_dev]: - df.reset_index(inplace=True) - df = self._reorder_df(df, x_name) - df.set_index(x_name, inplace=True) - comps[num][label] = final - abs_diffs[num][label] = abs_diff - std_devs[num][label] = std_dev - # insert the df in pieces - # ex.insert_cutted_df( - # "B", - # main_value_df, - # "Comparison", - # ylim, - # header=(key, "Tally n." + str(num)), - # index_name=x_tag, - # cols_name=y_tag, - # index_num_format=idx_format, - # values_format="0.00%", - # ) - else: - # reorder dfs - try: - tdata_ref = self._reorder_df(tdata_ref, x_name) - except KeyError: - print( - CRED - + """ - {} is not available in tally {}. Please check the configuration file. - The application will now exit """.format( - x_name, str(num) - ) - + CEND - ) - # Safely exit from excel and from application - sys.exit() - - del tdata_ref["Error"] - tdata_ref.set_index(x_name, inplace=True) - - tdata_tar = self._reorder_df(tdata_tar, x_name) - del tdata_tar["Error"] - tdata_tar.set_index(x_name, inplace=True) - - # !!! True divide warnings are suppressed !!! - with np.errstate(divide="ignore", invalid="ignore"): - comp_df = (tdata_ref - tdata_tar) / tdata_ref - abs_diff_df = tdata_ref - tdata_tar - std_dev_df = abs_diff_df - comps[num][label] = comp_df - abs_diffs[num][label] = abs_diff_df - std_devs[num][label] = abs_diff_df - # Insert DF - # ex.insert_df( - # "B", - # df, - # "Comparison", - # print_index=True, - # header=(key, "Tally n." + str(num)), - # values_format="0.00%", - # ) - - # Compile general infos in the sheet - # ws = ex.current_ws - # title = self.testname + " RESULTS RECAP: Comparison" - # ws.range("A3").value = title - # ws.range("C1").value = tarlib + " Vs " + reflib - - # Add single pp sheets - # for lib in [reflib, tarlib]: - # cp = self.state.get_path( - # "single", [lib, self.testname, "Excel"]) - # file = os.listdir(cp)[0] - # cp = os.path.join(cp, file) - # ex.copy_sheets(cp) - - # ex.save() - self.outputs[self.code] = comps - exsupp.comp_excel_writer( - self, - outpath, - lib_to_comp, - self.testname, - comps, - abs_diffs, - std_devs, - ) - -#class BenchmarkOutput(AbstractOutput): - ''' +class AbstractOutput(abc.ABC): def __init__(self, lib: str, code: str, testname: str, session: Session): """ General class for a Benchmark output @@ -1154,10 +190,100 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # Read the metadata results_path = os.path.join(self.test_path, code) - self.metadata = self._read_metadata_run(results_path) - ''' + self.metadata = self._read_metadata_run(results_path) + + ''' + @abc.abstractmethod + def single_postprocess(self): + """ + To be executed when a single pp is requested + """ + ''' + + ''' + @abc.abstractmethod + def compare(self): + """ + To be executed when a comparison is requested + """ + ''' + + @abc.abstractmethod + def initialise(self, *args): + """ + To be executed when a comparison is requested + """ + + @abc.abstractmethod + def _get_output_files(self, results_path): + """ + To be executed when a comparison is requested + """ + + @abc.abstractmethod + def _read_code_version(self, pathtofile): + """ + To be executed when a comparison is requested + """ + + ''' + @staticmethod + def _get_output_files(results_path, code): + """ + Recover the output files from a directory + + Parameters + ---------- + results_path : str or path + path where the results are contained. + code : str + code that generated the output ('mcnp' or 'openmc') + + Raises + ------ + FileNotFoundError + if the required files are not found. + NotImplementedError + if the code is not supported. + + Returns + ------- + file1 : path + path to the first file + file2 : path + path to the second file (only for mcnp) + + """ + file1 = None + file2 = None + + for file_name in os.listdir(results_path): + if code in ["mcnp", "d1s"]: + if file_name[-1] == "m": + file1 = file_name + elif file_name[-1] == "o": + file2 = file_name + elif code == "openmc": + if file_name.endswith(".out"): + file1 = file_name + elif file_name.startswith("statepoint"): + file2 = file_name + else: + raise NotImplementedError( + f"The code '{code}' is not currently supported." + ) + + if file1 is None or (code in ["mcnp", "d1s"] and file2 is None): + raise FileNotFoundError( + f"The following path does not contain the required files for {code} output: {results_path}" + ) + + file1 = os.path.join(results_path, file1) + file2 = os.path.join(results_path, file2) if file2 else None + return file1, file2 ''' + def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: """Retrieve the metadata from the run @@ -1186,46 +312,7 @@ def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: metadata["code_version"] = self._read_code_version(pathtofile) return metadata - ''' - - ''' - def _read_code_version(self, pathtofile: os.PathLike) -> str | None: - """Read the code version from the output files or in other ways depending - on the used code. - - Parameters - ---------- - pathtofile : os.PathLike - path to the folder where results are stored - - Returns - ------- - str | None - version of the code used to run the benchmarks - """ - if self.testname in ["Sphere", "SphereSDDR"]: - if not os.path.exists(pathtofile): - # this can happen the first time - return None - - if self.mcnp or self.d1s: - _, mcnp_ofile = self._get_output_files(pathtofile, "mcnp") - return self._read_mcnp_code_version(mcnp_ofile) - elif self.openmc: - _, openmc_sfile = self._get_output_files(pathtofile, "openmc") - return self._read_openmc_code_version(openmc_sfile) - elif self.serpent: - pass - - return None - ''' - - ''' - def _read_serpent_code_version(self, ofile: os.PathLike) -> str | None: - pass - ''' - ''' def single_postprocess(self): """ Execute the full post-processing of a single library (i.e. excel, @@ -1341,9 +428,7 @@ def single_postprocess(self): atlas.save(self.atlas_path) # Remove tmp images shutil.rmtree(outpath) - ''' - ''' def compare(self): """ Generates the full comparison post-processing (excel and atlas) @@ -1478,9 +563,7 @@ def compare(self): # Remove tmp images shutil.rmtree(outpath) - ''' - ''' @staticmethod def _reorder_df(df, x_set): # First of all try order by number @@ -1505,9 +588,16 @@ def _reorder_df(df, x_set): pass return df - ''' + + def _print_raw(self): + for key, data in self.raw_data.items(): + file = os.path.join(self.raw_path, str(key) + ".csv") + data.to_csv(file, header=True, index=False) + + metadata_file = os.path.join(self.raw_path, "metadata.json") + with open(metadata_file, "w", encoding="utf-8") as outfile: + json.dump(self.metadata, outfile, indent=4) - ''' def _generate_single_excel_output(self): # Get excel configuration self.outputs = {} @@ -1711,18 +801,6 @@ def _generate_single_excel_output(self): # ex.save() exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) - ''' - - ''' - def _print_raw(self): - for key, data in self.raw_data.items(): - file = os.path.join(self.raw_path, str(key) + ".csv") - data.to_csv(file, header=True, index=False) - - metadata_file = os.path.join(self.raw_path, "metadata.json") - with open(metadata_file, "w", encoding="utf-8") as outfile: - json.dump(self.metadata, outfile, indent=4) - ''' def _generate_comparison_excel_output(self): # Get excel configuration @@ -1965,7 +1043,10 @@ def _generate_comparison_excel_output(self): ) class MCNPoutput(AbstractOutput): - def __init__(self, mctal_file, output_file, meshtal_file=None): + def __init__(self): + super().__init__(self) + + def initialise(self, mctal_file, output_file, meshtal_file=None): """ Class representing all outputs coming from and MCNP run @@ -2131,8 +1212,13 @@ def _get_output_files(self, results_path): return file1, file2 class OpenMCOutput(AbstractOutput): - def __init__(self, output_path): + def __init__(self): + super().__init__(self) + + def initialise(self, output_path): self.output = omc.OpenMCSimOutput(output_path) + self.tally_numbers = self.output.tally_numbers + self.tally_comments = self.output.tally_comments self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None From 870025df31fc3521590fbc3e622a026d4115ad1a Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Mon, 4 Nov 2024 17:03:21 +0000 Subject: [PATCH 16/53] Need to complete abstracts --- jade/output.py | 78 ++------------------------------------------------ 1 file changed, 3 insertions(+), 75 deletions(-) diff --git a/jade/output.py b/jade/output.py index 1ede6136..b6f19a38 100644 --- a/jade/output.py +++ b/jade/output.py @@ -66,6 +66,8 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): return MCNPoutput(lib, code, testname, session) elif code == 'openmc': return OpenMCOutput(lib, code, testname, session) + else: + raise NotImplementedError class AbstractOutput(abc.ABC): def __init__(self, lib: str, code: str, testname: str, session: Session): @@ -192,22 +194,6 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): results_path = os.path.join(self.test_path, code) self.metadata = self._read_metadata_run(results_path) - ''' - @abc.abstractmethod - def single_postprocess(self): - """ - To be executed when a single pp is requested - """ - ''' - - ''' - @abc.abstractmethod - def compare(self): - """ - To be executed when a comparison is requested - """ - ''' - @abc.abstractmethod def initialise(self, *args): """ @@ -225,64 +211,6 @@ def _read_code_version(self, pathtofile): """ To be executed when a comparison is requested """ - - ''' - @staticmethod - def _get_output_files(results_path, code): - """ - Recover the output files from a directory - - Parameters - ---------- - results_path : str or path - path where the results are contained. - code : str - code that generated the output ('mcnp' or 'openmc') - - Raises - ------ - FileNotFoundError - if the required files are not found. - NotImplementedError - if the code is not supported. - - Returns - ------- - file1 : path - path to the first file - file2 : path - path to the second file (only for mcnp) - - """ - file1 = None - file2 = None - - for file_name in os.listdir(results_path): - if code in ["mcnp", "d1s"]: - if file_name[-1] == "m": - file1 = file_name - elif file_name[-1] == "o": - file2 = file_name - elif code == "openmc": - if file_name.endswith(".out"): - file1 = file_name - elif file_name.startswith("statepoint"): - file2 = file_name - else: - raise NotImplementedError( - f"The code '{code}' is not currently supported." - ) - - if file1 is None or (code in ["mcnp", "d1s"] and file2 is None): - raise FileNotFoundError( - f"The following path does not contain the required files for {code} output: {results_path}" - ) - - file1 = os.path.join(results_path, file1) - file2 = os.path.join(results_path, file2) if file2 else None - - return file1, file2 - ''' def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: """Retrieve the metadata from the run @@ -618,7 +546,7 @@ def _generate_single_excel_output(self): if self.openmc: results_path = os.path.join(self.test_path, self.code) - _, outfile = self._get_output_files(results_path, "openmc") + outfiles = self._get_output_files(results_path) sim_output = OpenMCOutput(outfile) tally_numbers = sim_output.output.tally_numbers tally_comments = sim_output.output.tally_comments From 18ae4659fc25eda133a3cdcb211a703dcb599bb8 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 08:46:02 +0000 Subject: [PATCH 17/53] Temp --- jade/output.py | 15 +++------------ jade/postprocess.py | 7 ++++++- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/jade/output.py b/jade/output.py index b6f19a38..d8ec77bf 100644 --- a/jade/output.py +++ b/jade/output.py @@ -60,15 +60,6 @@ CRED = "\033[91m" CEND = "\033[0m" -class BenchmarkOutput: - def __init__(self, lib: str, code: str, testname: str, session: Session): - if code in ['mcnp', 'd1s']: - return MCNPoutput(lib, code, testname, session) - elif code == 'openmc': - return OpenMCOutput(lib, code, testname, session) - else: - raise NotImplementedError - class AbstractOutput(abc.ABC): def __init__(self, lib: str, code: str, testname: str, session: Session): """ @@ -567,7 +558,7 @@ def _generate_single_excel_output(self): elif file[-4:] == "msht": meshtalfile = os.path.join(results_path, file) # Parse output - sim_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) + sim_output = MCNPOutput(mfile, ofile, meshtal_file=meshtalfile) tally_numbers = sim_output.tally_numbers tally_comments = sim_output.tally_comments @@ -774,7 +765,7 @@ def _generate_comparison_excel_output(self): elif file[-4:] == "msht": meshtalfile = os.path.join(results_path, file) # Parse output - mcnp_output = MCNPoutput(mfile, ofile, meshtal_file=meshtalfile) + mcnp_output = MCNPOutput(mfile, ofile, meshtal_file=meshtalfile) mcnp_outputs[lib] = mcnp_output # Build the comparison for label in ["Value", "Error"]: @@ -970,7 +961,7 @@ def _generate_comparison_excel_output(self): std_devs, ) -class MCNPoutput(AbstractOutput): +class MCNPOutput(AbstractOutput): def __init__(self): super().__init__(self) diff --git a/jade/postprocess.py b/jade/postprocess.py index 5b4559d5..7e5fd5ee 100644 --- a/jade/postprocess.py +++ b/jade/postprocess.py @@ -186,6 +186,11 @@ def _get_output(action, code, testname, lib, session): return False else: - out = bencho.BenchmarkOutput(lib, code, testname, session) + if code in ['mcnp', 'd1s']: + return bencho.MCNPOutput(lib, code, testname, session) + elif code == 'openmc': + return bencho.OpenMCOutput(lib, code, testname, session) + else: + raise NotImplementedError('Code has not been implemented') return out From 09d5424fb51cc13c33fb788f39553797515a45ca Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 09:11:21 +0000 Subject: [PATCH 18/53] Added revised benchmark output hierarchy --- jade/openmc.py | 2 +- jade/output.py | 222 ++++++++++++++++++++++++------------------------- 2 files changed, 110 insertions(+), 114 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 3995bca8..e5a71bf8 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -233,7 +233,7 @@ def write(self, path: str) -> None: self.materials.export_to_xml(os.path.join(path, "materials.xml")) -class OpenMCSimOutput: +class OpenMCStatePoint: def __init__(self, spfile_path: str) -> None: """Class for handling OpenMC tatepoint file diff --git a/jade/output.py b/jade/output.py index d8ec77bf..4236a628 100644 --- a/jade/output.py +++ b/jade/output.py @@ -60,7 +60,7 @@ CRED = "\033[91m" CEND = "\033[0m" -class AbstractOutput(abc.ABC): +class AbstractBenchmarkOutput(abc.ABC): def __init__(self, lib: str, code: str, testname: str, session: Session): """ General class for a Benchmark output @@ -183,13 +183,7 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # Read the metadata results_path = os.path.join(self.test_path, code) - self.metadata = self._read_metadata_run(results_path) - - @abc.abstractmethod - def initialise(self, *args): - """ - To be executed when a comparison is requested - """ + self.metadata = self._read_metadata_run(results_path) @abc.abstractmethod def _get_output_files(self, results_path): @@ -537,7 +531,7 @@ def _generate_single_excel_output(self): if self.openmc: results_path = os.path.join(self.test_path, self.code) - outfiles = self._get_output_files(results_path) + outfile = self._get_output_files(results_path) sim_output = OpenMCOutput(outfile) tally_numbers = sim_output.output.tally_numbers tally_comments = sim_output.output.tally_comments @@ -961,104 +955,10 @@ def _generate_comparison_excel_output(self): std_devs, ) -class MCNPOutput(AbstractOutput): +class MCNPBenchmarkOutput(AbstractBenchmarkOutput): def __init__(self): super().__init__(self) - def initialise(self, mctal_file, output_file, meshtal_file=None): - """ - Class representing all outputs coming from and MCNP run - - Parameters - ---------- - mctal_file : path like object - path to the mctal file. - output_file : path like object - path to the outp file. - meshtal_file : path like object, optional - path to the meshtal file. The default is None. - - Returns - ------- - None. - - """ - self.mctal_file = mctal_file # path to mcnp mctal file - self.output_file = output_file # path to mcnp output file - self.meshtal_file = meshtal_file # path to mcnp meshtal file - - # Read and parse the mctal file - mctal = Mctal(mctal_file) - # --- restore cabability to collapse segment and cells --- - # The double binning Surfaces/cells with segments can create - # issues for JADE since if another binning is added - # (such as energy) it is not supported. Nevertheless, - # the additional segmentation can be quite useful and this can be - # collapsed de facto in a single geometrical binning - tallydata = mctal.tallydata - total_bin = mctal.totalbin - for dictionary in [tallydata, total_bin]: - for _, df in dictionary.items(): - if df is not None: - if ( - "Cells" in df.columns - and "Segments" in df.columns - and len(df) > 1 - ): - # Then we can collapse this in a single geometrical binning - values = [] - for cell, segment in zip(df.Cells, df.Segments): - val = str(int(cell)) + "-" + str(int(segment)) - values.append(val) - df["Cells-Segments"] = values - # delete the collapsed columns - del df["Cells"] - del df["Segments"] - - # another thing that can happen mostly for d1s is that there - # are user bins with fake total bin, i.e., there is only one bin - # and a total bin having the same value. This is a problem - # since f4enix parser will not drop the "fake" additional column - try: - usr_bins = set(df["User"].to_list()) - if len(usr_bins) <= 2 and "total" in usr_bins: - # then the column does not add any additional info, to drop - del df["User"] - # and drop the duplicates - df.drop_duplicates(inplace=True) - except KeyError: - pass # no user column - - self.mctal = mctal - self.tally_comments = [tally.tallyNumber for tally in self.mctal.tallies] - self.tally_numbers = [tally.tallyComment[0] for tally in self.mctal.tallies] - self.tallydata = tallydata - self.totalbin = total_bin - # Read the output file - self.out = Output(self.output_file) - self.out.stat_checks = self.out.get_statistical_checks_tfc_bins() - self.out.stat_checks = self.out.assign_tally_description( - self.out.stat_checks, self.mctal.tallies - ) - self.stat_checks = self.out.stat_checks - # Read the meshtal file - if meshtal_file is not None: - self.meshtal = Meshtal(meshtal_file) - self.meshtal.readMesh() - # Extract the available 1D to be merged with normal tallies - for msh in self.meshtal.mesh.values(): - if isinstance(msh, Fmesh1D): - tallynum, tallydata, comment = msh.convert2tally() - # Add them to the tallly data - self.tallydata[tallynum] = tallydata - self.totalbin[tallynum] = None - # Create fake tallies to be added to the mctal - dummyTally = Tally(tallynum) - dummyTally.tallyComment = [comment] - self.mctal.tallies.append(dummyTally) - else: - continue - def _read_code_version(self, ofile: os.PathLike) -> str | None: """Read MCNP code version from the output file @@ -1130,17 +1030,10 @@ def _get_output_files(self, results_path): return file1, file2 -class OpenMCOutput(AbstractOutput): +class OpenMCBenchmarkOutput(AbstractBenchmarkOutput): def __init__(self): super().__init__(self) - def initialise(self, output_path): - self.output = omc.OpenMCSimOutput(output_path) - self.tally_numbers = self.output.tally_numbers - self.tally_comments = self.output.tally_comments - self.tallydata, self.totalbin = self.process_tally() - self.stat_checks = None - def _read_code_version(self, spfile: os.PathLike) -> str | None: """Read OpenMC code version from the statepoint file @@ -1154,7 +1047,7 @@ def _read_code_version(self, spfile: os.PathLike) -> str | None: str | None version of the OpenMC code used to run the benchmark """ - statepoint = omc.OpenMCSimOutput(spfile) + statepoint = omc.OpenMCStatePoint(spfile) version = statepoint.version return version @@ -1201,6 +1094,109 @@ def _get_output_files(self, results_path): return file1, file2 +class OpenMCSimOutput: + def __init__(self, mctal_file, output_file, meshtal_file=None): + """ + Class representing all outputs coming from and MCNP run + + Parameters + ---------- + mctal_file : path like object + path to the mctal file. + output_file : path like object + path to the outp file. + meshtal_file : path like object, optional + path to the meshtal file. The default is None. + + Returns + ------- + None. + + """ + self.mctal_file = mctal_file # path to mcnp mctal file + self.output_file = output_file # path to mcnp output file + self.meshtal_file = meshtal_file # path to mcnp meshtal file + + # Read and parse the mctal file + mctal = Mctal(mctal_file) + # --- restore cabability to collapse segment and cells --- + # The double binning Surfaces/cells with segments can create + # issues for JADE since if another binning is added + # (such as energy) it is not supported. Nevertheless, + # the additional segmentation can be quite useful and this can be + # collapsed de facto in a single geometrical binning + tallydata = mctal.tallydata + total_bin = mctal.totalbin + for dictionary in [tallydata, total_bin]: + for _, df in dictionary.items(): + if df is not None: + if ( + "Cells" in df.columns + and "Segments" in df.columns + and len(df) > 1 + ): + # Then we can collapse this in a single geometrical binning + values = [] + for cell, segment in zip(df.Cells, df.Segments): + val = str(int(cell)) + "-" + str(int(segment)) + values.append(val) + df["Cells-Segments"] = values + # delete the collapsed columns + del df["Cells"] + del df["Segments"] + + # another thing that can happen mostly for d1s is that there + # are user bins with fake total bin, i.e., there is only one bin + # and a total bin having the same value. This is a problem + # since f4enix parser will not drop the "fake" additional column + try: + usr_bins = set(df["User"].to_list()) + if len(usr_bins) <= 2 and "total" in usr_bins: + # then the column does not add any additional info, to drop + del df["User"] + # and drop the duplicates + df.drop_duplicates(inplace=True) + except KeyError: + pass # no user column + + self.mctal = mctal + self.tally_comments = [tally.tallyNumber for tally in self.mctal.tallies] + self.tally_numbers = [tally.tallyComment[0] for tally in self.mctal.tallies] + self.tallydata = tallydata + self.totalbin = total_bin + # Read the output file + self.out = Output(self.output_file) + self.out.stat_checks = self.out.get_statistical_checks_tfc_bins() + self.out.stat_checks = self.out.assign_tally_description( + self.out.stat_checks, self.mctal.tallies + ) + self.stat_checks = self.out.stat_checks + # Read the meshtal file + if meshtal_file is not None: + self.meshtal = Meshtal(meshtal_file) + self.meshtal.readMesh() + # Extract the available 1D to be merged with normal tallies + for msh in self.meshtal.mesh.values(): + if isinstance(msh, Fmesh1D): + tallynum, tallydata, comment = msh.convert2tally() + # Add them to the tallly data + self.tallydata[tallynum] = tallydata + self.totalbin[tallynum] = None + # Create fake tallies to be added to the mctal + dummyTally = Tally(tallynum) + dummyTally.tallyComment = [comment] + self.mctal.tallies.append(dummyTally) + else: + continue + +class OpenMCSimOutput: + def __init__(self, output_path): + self.output = omc.OpenMCStatePoint(output_path) + self.tally_numbers = self.output.tally_numbers + self.tally_comments = self.output.tally_comments + self.tallydata, self.totalbin = self.process_tally() + self.stat_checks = None + def _create_dataframes(self, tallies): tallydata = {} totalbin = {} From 3995fdeecaba4a97f835a0dc2c183703cbfd911d Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 09:36:35 +0000 Subject: [PATCH 19/53] Added abstract parse_output_data --- jade/output.py | 67 +++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/jade/output.py b/jade/output.py index 4236a628..ee1171fb 100644 --- a/jade/output.py +++ b/jade/output.py @@ -184,15 +184,21 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # Read the metadata results_path = os.path.join(self.test_path, code) self.metadata = self._read_metadata_run(results_path) - + + @abc.abstractmethod + def _read_code_version(self, pathtofile): + """ + To be executed when a comparison is requested + """ + @abc.abstractmethod def _get_output_files(self, results_path): """ To be executed when a comparison is requested """ - @abc.abstractmethod - def _read_code_version(self, pathtofile): + @abc.abstractmethod + def parse_output_data(self, results_path): """ To be executed when a comparison is requested """ @@ -529,33 +535,10 @@ def _generate_single_excel_output(self): self.excel_path, self.testname + "_" + self.lib + ".xlsx" ) - if self.openmc: - results_path = os.path.join(self.test_path, self.code) - outfile = self._get_output_files(results_path) - sim_output = OpenMCOutput(outfile) - tally_numbers = sim_output.output.tally_numbers - tally_comments = sim_output.output.tally_comments - - if self.mcnp or self.d1s: - # ex = ExcelOutputSheet(template, outpath) - # Get results - # results = [] - # errors = [] - results_path = os.path.join(self.test_path, self.code) - # Get mfile and outfile and possibly meshtal file - meshtalfile = None - for file in os.listdir(results_path): - if file[-1] == "m": - mfile = os.path.join(results_path, file) - elif file[-1] == "o": - ofile = os.path.join(results_path, file) - elif file[-4:] == "msht": - meshtalfile = os.path.join(results_path, file) - # Parse output - sim_output = MCNPOutput(mfile, ofile, meshtal_file=meshtalfile) - tally_numbers = sim_output.tally_numbers - tally_comments = sim_output.tally_comments - + # Parse output + results_path = os.path.join(self.test_path, self.code) + sim_output, tally_numbers, tally_comments = self.parse_output_data(results_path) + # Adjourn raw Data self.raw_data = sim_output.tallydata @@ -1013,12 +996,15 @@ def _get_output_files(self, results_path): """ file1 = None file2 = None + file3 = None for file_name in os.listdir(results_path): if file_name[-1] == "m": file1 = file_name elif file_name[-1] == "o": file2 = file_name + elif file_name[-4] == 'msht': + file3 = file_name if file1 is None or file2 is None: raise FileNotFoundError( @@ -1027,9 +1013,17 @@ def _get_output_files(self, results_path): file1 = os.path.join(results_path, file1) if file1 else None file2 = os.path.join(results_path, file2) if file2 else None + file3 = os.path.join(results_path, file2) if file3 else None return file1, file2 + def parse_output_data(self, results_path): + mfile, ofile, meshtalfile = self._get_output_files(results_path) + sim_output = MCNPSimOutput(mfile, ofile, meshtal_file=meshtalfile) + tally_numbers = sim_output.tally_numbers + tally_comments = sim_output.tally_comments + return sim_output, tally_numbers, tally_comments + class OpenMCBenchmarkOutput(AbstractBenchmarkOutput): def __init__(self): super().__init__(self) @@ -1090,11 +1084,18 @@ def _get_output_files(self, results_path): ) file1 = os.path.join(results_path, file1) if file1 else None - file2 = os.path.join(results_path, file2) if file1 else None + file2 = os.path.join(results_path, file2) if file2 else None + + return file1, file2 - return file1, file2 + def parse_output_data(self, results_path): + outfile, sfile = self._get_output_files(results_path) + sim_output = OpenMCSimOutput(sfile) + tally_numbers = sim_output.output.tally_numbers + tally_comments = sim_output.output.tally_comments + return sim_output, tally_numbers, tally_comments -class OpenMCSimOutput: +class MCNPSimOutput: def __init__(self, mctal_file, output_file, meshtal_file=None): """ Class representing all outputs coming from and MCNP run From f3018da96d2b114381e254aa7e93821248d9403f Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 10:14:11 +0000 Subject: [PATCH 20/53] Restored excel comparison --- jade/output.py | 444 +++++++++++++++++++++----------------------- jade/postprocess.py | 6 +- 2 files changed, 216 insertions(+), 234 deletions(-) diff --git a/jade/output.py b/jade/output.py index ee1171fb..b2d988cf 100644 --- a/jade/output.py +++ b/jade/output.py @@ -203,7 +203,7 @@ def parse_output_data(self, results_path): To be executed when a comparison is requested """ - def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: + def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: """Retrieve the metadata from the run Parameters @@ -218,17 +218,17 @@ def _read_metadata_run(self, pathtofile: os.PathLike) -> dict: """ try: with open( - os.path.join(pathtofile, "metadata.json"), + os.path.join(simulation_folder, "metadata.json"), "r", encoding="utf-8", ) as file: metadata = json.load(file) except FileNotFoundError: - logging.warning("No metadata file found at %s", pathtofile) + logging.warning("No metadata file found at %s", simulation_folder) metadata = {} metadata["jade_version"] = __version__ - metadata["code_version"] = self._read_code_version(pathtofile) + metadata["code_version"] = self._read_code_version(simulation_folder) return metadata @@ -711,237 +711,222 @@ def _generate_comparison_excel_output(self): # name_tag = "Generic_comparison.xlsx" # template = os.path.join(os.getcwd(), "templates", name_tag) - if self.mcnp or self.d1s: - mcnp_outputs = {} - comps = {} - abs_diffs = {} - std_devs = {} - for reflib, tarlib, name in self.couples: - lib_to_comp = name - outfolder_path = self.excel_path - outpath = os.path.join( - outfolder_path, "Comparison_" + name + f"_{self.code}.xlsx" - ) + sim_outputs = {} + tally_numbers = {} + tally_comments = {} + comps = {} + abs_diffs = {} + std_devs = {} + for reflib, tarlib, name in self.couples: + lib_to_comp = name + outfolder_path = self.excel_path + outpath = os.path.join( + outfolder_path, "Comparison_" + name + f"_{self.code}.xlsx" + ) - # ex = ExcelOutputSheet(template, outpath) - # Get results - - # for lib in to_read: - # results_path = self.test_path[lib] - for lib, results_path in { - reflib: os.path.join(self.test_path[reflib], self.code), - tarlib: os.path.join(self.test_path[tarlib], self.code), - }.items(): - # Get mfile and outfile and possibly meshtal file - meshtalfile = None - for file in os.listdir(results_path): - if file[-1] == "m": - mfile = os.path.join(results_path, file) - elif file[-1] == "o": - ofile = os.path.join(results_path, file) - elif file[-4:] == "msht": - meshtalfile = os.path.join(results_path, file) - # Parse output - mcnp_output = MCNPOutput(mfile, ofile, meshtal_file=meshtalfile) - mcnp_outputs[lib] = mcnp_output - # Build the comparison - for label in ["Value", "Error"]: - for tally in mcnp_outputs[reflib].mctal.tallies: - num = tally.tallyNumber - key = tally.tallyComment[0] - - # Full tally data - tdata_ref = mcnp_outputs[reflib].tallydata[num].copy() - tdata_tar = mcnp_outputs[tarlib].tallydata[num].copy() + # ex = ExcelOutputSheet(template, outpath) + # Get results + + # for lib in to_read: + # results_path = self.test_path[lib] + for lib, results_path in { + reflib: os.path.join(self.test_path[reflib], self.code), + tarlib: os.path.join(self.test_path[tarlib], self.code), + }.items(): + # Parse output + sim_outputs[lib], tally_numbers[lib], tally_comments[lib] = self.parse_output_data(results_path) + # Build the comparison + for label in ["Value", "Error"]: + for num, key in zip(tally_numbers[reflib], tally_comments[reflib]): + # Full tally data + tdata_ref = sim_outputs[reflib].tallydata[num].copy() + tdata_tar = sim_outputs[tarlib].tallydata[num].copy() + try: + tally_settings = ex_cnf.loc[num] + except KeyError: + print( + " Warning!: tally n." + + str(num) + + " is not in configuration" + ) + continue + + # Re-Elaborate tdata Dataframe + x_name = tally_settings["x"] + x_tag = tally_settings["x name"] + y_name = tally_settings["y"] + # y_tag = tally_settings["y name"] + # ylim = tally_settings["cut Y"] + # select the index format + if label == "Value": + for dic in [comps, abs_diffs, std_devs]: + dic[num] = {"title": key, "x_label": x_tag} + + # if x_name == "Energy": + # idx_format = "0.00E+00" + # # TODO all possible cases should be addressed + # else: + # idx_format = "0" + + if y_name != "tally": + tdata_ref.set_index(x_name, inplace=True) + tdata_tar.set_index(x_name, inplace=True) + x_set = list(set(tdata_ref.index)) + y_set = list(set(tdata_ref[y_name].values)) + rows_fin = [] + rows_abs_diff = [] + rows_std_dev = [] + for xval in x_set: + try: + ref = tdata_ref.loc[xval, "Value"].values + ref_err = tdata_ref.loc[xval, "Error"].values + tar = tdata_tar.loc[xval, "Value"].values + # !!! True divide warnings are suppressed !!! + with np.errstate(divide="ignore", invalid="ignore"): + row_fin = (ref - tar) / ref + row_abs_diff = ref - tar + row_std_dev = row_abs_diff / (ref_err * ref) + prev_len = len(ref) + except AttributeError: + # This is raised when total values are + # collected only for one bin. + # the rest needs to be filled by nan + ref = tdata_ref.loc[xval, "Value"] + ref_err = tdata_ref.loc[xval, "Error"] + tar = tdata_tar.loc[xval, "Value"] + row_fin = [] + row_abs_diff = [] + row_std_dev = [] + for i in range(prev_len - 1): + row_fin.append(np.nan) + row_abs_diff.append(np.nan) + row_std_dev.append(np.nan) + row_fin.append((ref - tar) / ref) + row_abs_diff.append(ref - tar) + row_std_dev.append((ref - tar) / (ref_err * ref)) + + rows_fin.append(row_fin) + rows_abs_diff.append(row_abs_diff) + rows_std_dev.append(row_std_dev) try: - tally_settings = ex_cnf.loc[num] - except KeyError: - print( - " Warning!: tally n." - + str(num) - + " is not in configuration" + final = pd.DataFrame( + rows_fin, columns=y_set, index=x_set ) - continue - - # Re-Elaborate tdata Dataframe - x_name = tally_settings["x"] - x_tag = tally_settings["x name"] - y_name = tally_settings["y"] - # y_tag = tally_settings["y name"] - # ylim = tally_settings["cut Y"] - # select the index format - if label == "Value": - for dic in [comps, abs_diffs, std_devs]: - dic[num] = {"title": key, "x_label": x_tag} - - # if x_name == "Energy": - # idx_format = "0.00E+00" - # # TODO all possible cases should be addressed - # else: - # idx_format = "0" - - if y_name != "tally": - tdata_ref.set_index(x_name, inplace=True) - tdata_tar.set_index(x_name, inplace=True) - x_set = list(set(tdata_ref.index)) - y_set = list(set(tdata_ref[y_name].values)) - rows_fin = [] - rows_abs_diff = [] - rows_std_dev = [] - for xval in x_set: - try: - ref = tdata_ref.loc[xval, "Value"].values - ref_err = tdata_ref.loc[xval, "Error"].values - tar = tdata_tar.loc[xval, "Value"].values - # !!! True divide warnings are suppressed !!! - with np.errstate(divide="ignore", invalid="ignore"): - row_fin = (ref - tar) / ref - row_abs_diff = ref - tar - row_std_dev = row_abs_diff / (ref_err * ref) - prev_len = len(ref) - except AttributeError: - # This is raised when total values are - # collected only for one bin. - # the rest needs to be filled by nan - ref = tdata_ref.loc[xval, "Value"] - ref_err = tdata_ref.loc[xval, "Error"] - tar = tdata_tar.loc[xval, "Value"] - row_fin = [] - row_abs_diff = [] - row_std_dev = [] - for i in range(prev_len - 1): - row_fin.append(np.nan) - row_abs_diff.append(np.nan) - row_std_dev.append(np.nan) - row_fin.append((ref - tar) / ref) - row_abs_diff.append(ref - tar) - row_std_dev.append((ref - tar) / (ref_err * ref)) - - rows_fin.append(row_fin) - rows_abs_diff.append(row_abs_diff) - rows_std_dev.append(row_std_dev) - try: - final = pd.DataFrame( - rows_fin, columns=y_set, index=x_set - ) - abs_diff = pd.DataFrame( - rows_abs_diff, columns=y_set, index=x_set - ) - std_dev = pd.DataFrame( - rows_std_dev, columns=y_set, index=x_set + abs_diff = pd.DataFrame( + rows_abs_diff, columns=y_set, index=x_set + ) + std_dev = pd.DataFrame( + rows_std_dev, columns=y_set, index=x_set + ) + for df in [final, abs_diff, std_dev]: + df.index.name = x_name + df.replace(np.nan, "Not Available", inplace=True) + df.replace(float(0), "Identical", inplace=True) + df.replace(-np.inf, "Reference = 0", inplace=True) + df.replace(1, "Target = 0", inplace=True) + except ValueError: + print( + CRED + + """ + A ValueError was triggered, a probable cause may be that more than 2 binnings + are defined in tally {}. This is a fatal exception, application will now + close""".format( + str(num) ) - for df in [final, abs_diff, std_dev]: - df.index.name = x_name - df.replace(np.nan, "Not Available", inplace=True) - df.replace(float(0), "Identical", inplace=True) - df.replace(-np.inf, "Reference = 0", inplace=True) - df.replace(1, "Target = 0", inplace=True) - except ValueError: - print( - CRED - + """ - A ValueError was triggered, a probable cause may be that more than 2 binnings - are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) - + CEND + + CEND + ) + # Safely exit from excel and from application + sys.exit() + + # reorder index and quick index reset + for df in [final, abs_diff, std_dev]: + df.reset_index(inplace=True) + df = self._reorder_df(df, x_name) + df.set_index(x_name, inplace=True) + comps[num][label] = final + abs_diffs[num][label] = abs_diff + std_devs[num][label] = std_dev + # insert the df in pieces + # ex.insert_cutted_df( + # "B", + # main_value_df, + # "Comparison", + # ylim, + # header=(key, "Tally n." + str(num)), + # index_name=x_tag, + # cols_name=y_tag, + # index_num_format=idx_format, + # values_format="0.00%", + # ) + else: + # reorder dfs + try: + tdata_ref = self._reorder_df(tdata_ref, x_name) + except KeyError: + print( + CRED + + """ + {} is not available in tally {}. Please check the configuration file. + The application will now exit """.format( + x_name, str(num) ) - # Safely exit from excel and from application - sys.exit() + + CEND + ) + # Safely exit from excel and from application + sys.exit() + + del tdata_ref["Error"] + tdata_ref.set_index(x_name, inplace=True) + + tdata_tar = self._reorder_df(tdata_tar, x_name) + del tdata_tar["Error"] + tdata_tar.set_index(x_name, inplace=True) + + # !!! True divide warnings are suppressed !!! + with np.errstate(divide="ignore", invalid="ignore"): + comp_df = (tdata_ref - tdata_tar) / tdata_ref + abs_diff_df = tdata_ref - tdata_tar + std_dev_df = abs_diff_df + comps[num][label] = comp_df + abs_diffs[num][label] = abs_diff_df + std_devs[num][label] = abs_diff_df + # Insert DF + # ex.insert_df( + # "B", + # df, + # "Comparison", + # print_index=True, + # header=(key, "Tally n." + str(num)), + # values_format="0.00%", + # ) - # reorder index and quick index reset - for df in [final, abs_diff, std_dev]: - df.reset_index(inplace=True) - df = self._reorder_df(df, x_name) - df.set_index(x_name, inplace=True) - comps[num][label] = final - abs_diffs[num][label] = abs_diff - std_devs[num][label] = std_dev - # insert the df in pieces - # ex.insert_cutted_df( - # "B", - # main_value_df, - # "Comparison", - # ylim, - # header=(key, "Tally n." + str(num)), - # index_name=x_tag, - # cols_name=y_tag, - # index_num_format=idx_format, - # values_format="0.00%", - # ) - else: - # reorder dfs - try: - tdata_ref = self._reorder_df(tdata_ref, x_name) - except KeyError: - print( - CRED - + """ - {} is not available in tally {}. Please check the configuration file. - The application will now exit """.format( - x_name, str(num) - ) - + CEND - ) - # Safely exit from excel and from application - sys.exit() - - del tdata_ref["Error"] - tdata_ref.set_index(x_name, inplace=True) - - tdata_tar = self._reorder_df(tdata_tar, x_name) - del tdata_tar["Error"] - tdata_tar.set_index(x_name, inplace=True) - - # !!! True divide warnings are suppressed !!! - with np.errstate(divide="ignore", invalid="ignore"): - comp_df = (tdata_ref - tdata_tar) / tdata_ref - abs_diff_df = tdata_ref - tdata_tar - std_dev_df = abs_diff_df - comps[num][label] = comp_df - abs_diffs[num][label] = abs_diff_df - std_devs[num][label] = abs_diff_df - # Insert DF - # ex.insert_df( - # "B", - # df, - # "Comparison", - # print_index=True, - # header=(key, "Tally n." + str(num)), - # values_format="0.00%", - # ) - - # Compile general infos in the sheet - # ws = ex.current_ws - # title = self.testname + " RESULTS RECAP: Comparison" - # ws.range("A3").value = title - # ws.range("C1").value = tarlib + " Vs " + reflib - - # Add single pp sheets - # for lib in [reflib, tarlib]: - # cp = self.state.get_path( - # "single", [lib, self.testname, "Excel"]) - # file = os.listdir(cp)[0] - # cp = os.path.join(cp, file) - # ex.copy_sheets(cp) - - # ex.save() - self.outputs[self.code] = comps - exsupp.comp_excel_writer( - self, - outpath, - lib_to_comp, - self.testname, - comps, - abs_diffs, - std_devs, - ) + # Compile general infos in the sheet + # ws = ex.current_ws + # title = self.testname + " RESULTS RECAP: Comparison" + # ws.range("A3").value = title + # ws.range("C1").value = tarlib + " Vs " + reflib + + # Add single pp sheets + # for lib in [reflib, tarlib]: + # cp = self.state.get_path( + # "single", [lib, self.testname, "Excel"]) + # file = os.listdir(cp)[0] + # cp = os.path.join(cp, file) + # ex.copy_sheets(cp) + + # ex.save() + self.outputs[self.code] = comps + exsupp.comp_excel_writer( + self, + outpath, + lib_to_comp, + self.testname, + comps, + abs_diffs, + std_devs, + ) class MCNPBenchmarkOutput(AbstractBenchmarkOutput): - def __init__(self): - super().__init__(self) - def _read_code_version(self, ofile: os.PathLike) -> str | None: """Read MCNP code version from the output file @@ -1025,9 +1010,6 @@ def parse_output_data(self, results_path): return sim_output, tally_numbers, tally_comments class OpenMCBenchmarkOutput(AbstractBenchmarkOutput): - def __init__(self): - super().__init__(self) - def _read_code_version(self, spfile: os.PathLike) -> str | None: """Read OpenMC code version from the statepoint file @@ -1089,7 +1071,7 @@ def _get_output_files(self, results_path): return file1, file2 def parse_output_data(self, results_path): - outfile, sfile = self._get_output_files(results_path) + _, sfile = self._get_output_files(results_path) sim_output = OpenMCSimOutput(sfile) tally_numbers = sim_output.output.tally_numbers tally_comments = sim_output.output.tally_comments diff --git a/jade/postprocess.py b/jade/postprocess.py index 7e5fd5ee..a979994d 100644 --- a/jade/postprocess.py +++ b/jade/postprocess.py @@ -187,10 +187,10 @@ def _get_output(action, code, testname, lib, session): else: if code in ['mcnp', 'd1s']: - return bencho.MCNPOutput(lib, code, testname, session) + return bencho.MCNPBenchmarkOutput(lib, code, testname, session) elif code == 'openmc': - return bencho.OpenMCOutput(lib, code, testname, session) + return bencho.OpenMCBenchmarkOutput(lib, code, testname, session) else: - raise NotImplementedError('Code has not been implemented') + raise NotImplementedError(f'Code {code} has not been implemented') return out From 48e660f344b731b58978eacd4ae3d3ec299b53f6 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 11:13:17 +0000 Subject: [PATCH 21/53] Started fixing bugs and tests --- jade/expoutput.py | 6 +++--- jade/openmc.py | 2 +- jade/output.py | 24 +++++++++++++----------- jade/sphereoutput.py | 22 +++++++++++----------- tests/output_test.py | 14 +++++++------- tests/sphereoutput_test.py | 2 +- 6 files changed, 36 insertions(+), 34 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index cebd2f9d..937ddf2a 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -36,8 +36,8 @@ import jade.atlas as at from f4enix.input.MCNPinput import D1S_Input -from jade.output import BenchmarkOutput -from jade.output import MCNPoutput +from jade.output import AbstractBenchmarkOutput +from jade.output import MCNPSimOutput from jade.plotter import Plotter from jade.status import EXP_TAG @@ -75,7 +75,7 @@ } -class ExperimentalOutput(BenchmarkOutput): +class ExperimentalOutput(AbstractBenchmarkOutput): def __init__(self, *args, **kwargs): """ This extends the Benchmark Output and creates an abstract class diff --git a/jade/openmc.py b/jade/openmc.py index e5a71bf8..321f798e 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -310,7 +310,7 @@ def tallies_to_dataframes(self): return tallies -class OpenMCSphereSimOutput(OpenMCSimOutput): +class OpenMCSphereStatePoint(OpenMCStatePoint): def __init__(self, spfile_path: str) -> None: """Class to handle the data extraction of the Sphere leakage benchmark in OpenMC diff --git a/jade/output.py b/jade/output.py index b2d988cf..885df6f1 100644 --- a/jade/output.py +++ b/jade/output.py @@ -927,12 +927,12 @@ def _generate_comparison_excel_output(self): ) class MCNPBenchmarkOutput(AbstractBenchmarkOutput): - def _read_code_version(self, ofile: os.PathLike) -> str | None: + def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: """Read MCNP code version from the output file Parameters ---------- - ofile : os.PathLike + simulation_folder : os.PathLike output file path Returns @@ -941,17 +941,18 @@ def _read_code_version(self, ofile: os.PathLike) -> str | None: version of the MCNP code used to run the benchmark """ - outp = MCNPOutputFile(ofile) + _, outf, _ = self._get_output_files(simulation_folder) + outp = MCNPOutputFile(outf) try: version = outp.get_code_version() return version except ValueError: logging.warning( "Code version not found in the output file or aux file for %s", - ofile, + simulation_folder, ) logging.warning( - "Contents of the directory: %s", os.listdir(os.path.dirname(ofile)) + "Contents of the directory: %s", os.listdir(os.path.dirname(simulation_folder)) ) return None @@ -1000,7 +1001,7 @@ def _get_output_files(self, results_path): file2 = os.path.join(results_path, file2) if file2 else None file3 = os.path.join(results_path, file2) if file3 else None - return file1, file2 + return file1, file2, file3 def parse_output_data(self, results_path): mfile, ofile, meshtalfile = self._get_output_files(results_path) @@ -1010,19 +1011,20 @@ def parse_output_data(self, results_path): return sim_output, tally_numbers, tally_comments class OpenMCBenchmarkOutput(AbstractBenchmarkOutput): - def _read_code_version(self, spfile: os.PathLike) -> str | None: + def _read_code_version(self, simulation_path: os.PathLike) -> str | None: """Read OpenMC code version from the statepoint file Parameters ---------- - spfile : os.PathLike - statepoint file path + simulation_path : os.PathLike + simulation file path Returns ------- str | None version of the OpenMC code used to run the benchmark """ + _, spfile = self._get_output_files(simulation_path) statepoint = omc.OpenMCStatePoint(spfile) version = statepoint.version return version @@ -1143,8 +1145,8 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): pass # no user column self.mctal = mctal - self.tally_comments = [tally.tallyNumber for tally in self.mctal.tallies] - self.tally_numbers = [tally.tallyComment[0] for tally in self.mctal.tallies] + self.tally_numbers = [tally.tallyNumber for tally in self.mctal.tallies] + self.tally_comments = [tally.tallyComment[0] for tally in self.mctal.tallies] self.tallydata = tallydata self.totalbin = total_bin # Read the output file diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 8508d898..8282aed9 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -42,7 +42,7 @@ import jade.atlas as at import jade.excelsupport as exsupp import jade.plotter as plotter -from jade.output import BenchmarkOutput, OpenMCOutput, MCNPoutput +from jade.output import AbstractBenchmarkOutput, OpenMCSimOutput, MCNPSimOutput if TYPE_CHECKING: from jade.main import Session @@ -53,7 +53,7 @@ import jade.openmc as omc -class SphereOutput(BenchmarkOutput): +class SphereOutput(AbstractBenchmarkOutput): def __init__(self, lib: str, code: str, testname: str, session: Session): """ Initialises the SphereOutput class from the general BenchmarkOutput @@ -402,7 +402,7 @@ def _read_mcnp_output(self): elif file[-1] == "o": ofile = file # Parse output - output = SphereMCNPoutput( + output = SphereMCNPSimOutput( os.path.join(results_path, mfile), os.path.join(results_path, ofile) ) @@ -483,7 +483,7 @@ def _read_openmc_output(self): zaidname = pieces[-1] # Parse output _, outfile = self._get_output_files(results_path, "openmc") - output = SphereOpenMCoutput(outfile) + output = SphereOpenMCSimOutput(outfile) outputs[zaidnum] = output # Adjourn raw Data self.raw_data["openmc"][zaidnum] = output.tallydata @@ -675,7 +675,7 @@ def pp_excel_comparison(self): # Parse output mfile = os.path.join(results_path, mfile) outfile = os.path.join(results_path, outfile) - output = SphereMCNPoutput(mfile, outfile) + output = SphereMCNPSimOutput(mfile, outfile) outputs_lib[zaidnum] = output res, err, columns = output.get_comparison_data( @@ -877,7 +877,7 @@ def pp_excel_comparison(self): # Parse output _, outfile = self._get_output_files(results_path, "openmc") - output = SphereOpenMCoutput(outfile) + output = SphereOpenMCSimOutput(outfile) outputs_lib[zaidnum] = output res, err, columns = output.get_comparison_data( ["4", "14"], "openmc" @@ -1183,7 +1183,7 @@ def get_comparison_data(self, tallies2pp, code): return results, errors, columns -class SphereMCNPoutput(MCNPoutput, SphereTallyOutput): +class SphereMCNPSimOutput(MCNPSimOutput, SphereTallyOutput): def __init__(self, mfile, outfile): super().__init__(mfile, outfile) self.tallydata, self.totalbin = self._get_tallydata(self.mctal) @@ -1264,9 +1264,9 @@ def _get_tallydata(self, mctal): return df, dftotal -class SphereOpenMCoutput(OpenMCOutput, SphereTallyOutput): +class SphereOpenMCSimOutput(OpenMCSimOutput, SphereTallyOutput): def __init__(self, output_path): - self.output = omc.OpenMCSphereSimOutput(output_path) + self.output = omc.OpenMCSphereStatePoint(output_path) self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None @@ -2002,7 +2002,7 @@ def _parserunmcnp(self, test_path, lib): elif file[-1] == "o": ofile = file # Parse output - output = SphereSDDRMCNPoutput( + output = SphereSDDRMCNPOutput( os.path.join(results_path, mfile), os.path.join(results_path, ofile) ) @@ -2042,7 +2042,7 @@ def print_raw(self): json.dump(self.metadata, outfile, indent=4) -class SphereSDDRMCNPoutput(SphereMCNPoutput): +class SphereSDDRMCNPOutput(SphereMCNPSimOutput): def _get_tallydata(self, mctal): diff --git a/tests/output_test.py b/tests/output_test.py index 7bcd05b2..21d15af6 100644 --- a/tests/output_test.py +++ b/tests/output_test.py @@ -38,7 +38,7 @@ import jade.sphereoutput as sout from jade.configuration import Configuration from jade.__version__ import __version__ -from jade.output import MCNPoutput +from jade.output import MCNPSimOutput from jade.postprocess import compareBenchmark from jade.__openmc__ import OMC_AVAIL @@ -54,9 +54,9 @@ ) -class TestSphereSDDRMCNPoutput: +class TestSphereSDDRMCNPSimOutput: - out = sout.SphereSDDRMCNPoutput(OUTM_SDDR, OUTP_SDDR) + out = sout.SphereSDDRMCNPOutput(OUTM_SDDR, OUTP_SDDR) def test_get_single_excel_data(self): vals, errors = self.out.get_single_excel_data() @@ -66,9 +66,9 @@ def test_get_single_excel_data(self): assert len(errors) == 23 -class TestMCNPoutput: +class TestMCNPSimOutput: def test_mcnpoutput(self): - out = MCNPoutput(OUTM_SDDR, OUTP_SDDR) + out = MCNPSimOutput(OUTM_SDDR, OUTP_SDDR) t4 = out.tallydata[4] t2 = out.tallydata[2] assert list(t4.columns) == ["Cells", "Segments", "Value", "Error"] @@ -110,7 +110,7 @@ def test_single_excel_mcnp(self, tmpdir): os.path.join(cp, "TestFiles", "output", "config_test.xlsx") ) session = MockSession(conf, tmpdir) - out = output.BenchmarkOutput("32c", "mcnp", "ITER_1D", session) + out = output.MCNPBenchmarkOutput("32c", "mcnp", "ITER_1D", session) out._generate_single_excel_output() out._print_raw() @@ -140,7 +140,7 @@ def test_single_excel_openmc(self, tmpdir): os.path.join(cp, "TestFiles", "output", "config_test.xlsx") ) session = MockSession(conf, tmpdir) - out = output.BenchmarkOutput("32c", "openmc", "ITER_1D", session) + out = output.OpenMCBenchmarkOutput("32c", "openmc", "ITER_1D", session) out._generate_single_excel_output() out._print_raw() diff --git a/tests/sphereoutput_test.py b/tests/sphereoutput_test.py index ad639630..3c242d0f 100644 --- a/tests/sphereoutput_test.py +++ b/tests/sphereoutput_test.py @@ -239,7 +239,7 @@ def test_full_comparison(self, tmpdir, lm: LibManager): class TestSphereSDDRMCNPoutput: - out = sout.SphereSDDRMCNPoutput( + out = sout.SphereSDDRMCNPOutput( os.path.join(resources, "SphereSDDR_11023_Na-23_102_m"), os.path.join(resources, "SphereSDDR_11023_Na-23_102_o"), ) From 89a224dfaacd1c02265571c967434be5d69e21c8 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 11:57:52 +0000 Subject: [PATCH 22/53] Fixed output_test --- jade/expoutput.py | 8 ++++---- jade/output.py | 10 ++++++++-- jade/postprocess.py | 2 +- jade/sphereoutput.py | 2 +- tests/output_test.py | 4 ++-- tests/sphereoutput_test.py | 14 +++++++------- 6 files changed, 23 insertions(+), 17 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 937ddf2a..271adc72 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -36,7 +36,7 @@ import jade.atlas as at from f4enix.input.MCNPinput import D1S_Input -from jade.output import AbstractBenchmarkOutput +from jade.output import MCNPBenchmarkOutput from jade.output import MCNPSimOutput from jade.plotter import Plotter from jade.status import EXP_TAG @@ -75,7 +75,7 @@ } -class ExperimentalOutput(AbstractBenchmarkOutput): +class ExperimentalOutput(MCNPBenchmarkOutput): def __init__(self, *args, **kwargs): """ This extends the Benchmark Output and creates an abstract class @@ -213,9 +213,9 @@ def build_atlas(self): def _extract_single_output( self, results_path: os.PathLike, folder: str, lib: str ) -> tuple[pd.DataFrame, str]: - mfile, ofile = self._get_output_files(results_path, "mcnp") + mfile, ofile, meshtalfile = self._get_output_files(results_path) # Parse output - output = MCNPoutput(mfile, ofile) + output = MCNPSimOutput(mfile, ofile, meshtalfile) # need to extract the input in case of multi if self.multiplerun: diff --git a/jade/output.py b/jade/output.py index 885df6f1..9a1b4a17 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1145,8 +1145,14 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): pass # no user column self.mctal = mctal - self.tally_numbers = [tally.tallyNumber for tally in self.mctal.tallies] - self.tally_comments = [tally.tallyComment[0] for tally in self.mctal.tallies] + self.tally_numbers = [] + self.tally_comments = [] + for tally in self.mctal.tallies: + self.tally_numbers.append(tally.tallyNumber) + if len(tally.tallyComment) > 0: + self.tally_comments.append(tally.tallyComment[0]) + else: + self.tally_comments.append('') self.tallydata = tallydata self.totalbin = total_bin # Read the output file diff --git a/jade/postprocess.py b/jade/postprocess.py index a979994d..60e31dea 100644 --- a/jade/postprocess.py +++ b/jade/postprocess.py @@ -132,7 +132,7 @@ def _get_output(action, code, testname, lib, session): out = spho.SphereOutput(lib, code, testname, session) elif testname == "SphereSDDR": - out = spho.SphereSDDRoutput(lib, code, testname, session) + out = spho.SphereSDDROutput(lib, code, testname, session) elif testname in ["Oktavian"]: if action == "compare": diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 8282aed9..cb88dcb4 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -1340,7 +1340,7 @@ def process_tally(self): return tallydata, totalbin -class SphereSDDRoutput(SphereOutput): +class SphereSDDROutput(SphereOutput): times = ["0s", "2.7h", "24h", "11.6d", "30d", "10y"] timecols = { "0s": "1.0", diff --git a/tests/output_test.py b/tests/output_test.py index 21d15af6..b2007b8f 100644 --- a/tests/output_test.py +++ b/tests/output_test.py @@ -169,9 +169,9 @@ def test_iter_cyl(self, tmpdir): os.path.join(cp, "TestFiles", "output", "config_itercyl.xlsx") ) session = MockSession(conf, tmpdir) - out = output.BenchmarkOutput("99c", "d1s", "ITER_Cyl_SDDR", session) + out = output.MCNPBenchmarkOutput("99c", "d1s", "ITER_Cyl_SDDR", session) out.single_postprocess() - out = output.BenchmarkOutput("93c", "d1s", "ITER_Cyl_SDDR", session) + out = output.MCNPBenchmarkOutput("93c", "d1s", "ITER_Cyl_SDDR", session) out.single_postprocess() compareBenchmark(session, "99c-93c", "d1s", ["ITER_Cyl_SDDR"], exp=False) diff --git a/tests/sphereoutput_test.py b/tests/sphereoutput_test.py index 3c242d0f..014f88f1 100644 --- a/tests/sphereoutput_test.py +++ b/tests/sphereoutput_test.py @@ -42,7 +42,7 @@ from f4enix.input.libmanager import LibManager from jade.status import Status from jade.__version__ import __version__ -from jade.output import MCNPoutput +from jade.output import MCNPSimOutput import jade.sphereoutput as sout @@ -150,7 +150,7 @@ def test_read_openmc_output(self, session_mock: MockUpSession): assert "M10" == results[1]["Zaid"] -class MockSphereSDDRoutput(sout.SphereSDDRoutput): +class MockSphereSDDROutput(sout.SphereSDDROutput): def __init__(self): self.lib = "99c" self.testname = "SphereSDDR" @@ -167,8 +167,8 @@ def __init__(self): self.d1s = True -class TestSphereSDDRoutput: - mockoutput = MockSphereSDDRoutput() +class TestSphereSDDROutput: + mockoutput = MockSphereSDDROutput() @pytest.fixture def lm(self): @@ -228,16 +228,16 @@ def test_full_comparison(self, tmpdir, lm: LibManager): session.conf = Configuration(os.path.join(resources, "config_SphereSDDR.xlsx")) # do the single pp first for lib in ["99c", "98c", "93c"]: - output = sout.SphereSDDRoutput(lib, "d1s", "SphereSDDR", session) + output = sout.SphereSDDROutput(lib, "d1s", "SphereSDDR", session) output.single_postprocess() - output = sout.SphereSDDRoutput( + output = sout.SphereSDDROutput( ["98c", "99c", "93c"], "d1s", "SphereSDDR", session ) output.compare() assert True -class TestSphereSDDRMCNPoutput: +class TestSphereSDDRMCNPOutput: out = sout.SphereSDDRMCNPOutput( os.path.join(resources, "SphereSDDR_11023_Na-23_102_m"), From 2f6169c6ef36cfde14e52a7109033c66e1b48ed2 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 14:30:58 +0000 Subject: [PATCH 23/53] debugging tests --- jade/output.py | 13 ++++++------- jade/sphereoutput.py | 9 +++++++++ tests/expoutput_test.py | 12 ++++++------ 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/jade/output.py b/jade/output.py index 9a1b4a17..de78afaa 100644 --- a/jade/output.py +++ b/jade/output.py @@ -538,7 +538,6 @@ def _generate_single_excel_output(self): # Parse output results_path = os.path.join(self.test_path, self.code) sim_output, tally_numbers, tally_comments = self.parse_output_data(results_path) - # Adjourn raw Data self.raw_data = sim_output.tallydata @@ -1147,12 +1146,6 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): self.mctal = mctal self.tally_numbers = [] self.tally_comments = [] - for tally in self.mctal.tallies: - self.tally_numbers.append(tally.tallyNumber) - if len(tally.tallyComment) > 0: - self.tally_comments.append(tally.tallyComment[0]) - else: - self.tally_comments.append('') self.tallydata = tallydata self.totalbin = total_bin # Read the output file @@ -1179,6 +1172,12 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): self.mctal.tallies.append(dummyTally) else: continue + for tally in self.mctal.tallies: + self.tally_numbers.append(tally.tallyNumber) + if len(tally.tallyComment) > 0: + self.tally_comments.append(tally.tallyComment[0]) + else: + self.tally_comments.append('') class OpenMCSimOutput: def __init__(self, output_path): diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index cb88dcb4..2ecfa7b8 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -99,6 +99,15 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # metadata involved here self.metadata = None + def _get_output_files(self, resuts_path): + pass + + def _read_code_version(self, pathtofile): + pass + + def parse_output_data(self, results_path): + pass + def single_postprocess(self): """ Execute the full post-processing of a single library (i.e. excel, diff --git a/tests/expoutput_test.py b/tests/expoutput_test.py index bcf3cf63..be0166a3 100644 --- a/tests/expoutput_test.py +++ b/tests/expoutput_test.py @@ -69,11 +69,11 @@ def test_benchmarkoutput(self, session_mock: MockUpSession): testname = "ITER_1D" os.makedirs(session_mock.path_comparison) os.makedirs(session_mock.path_single) - self.benchoutput_32c = outp.BenchmarkOutput("32c", code, testname, session_mock) + self.benchoutput_32c = outp.MCNPBenchmarkOutput("32c", code, testname, session_mock) self.benchoutput_32c.single_postprocess() - self.benchoutput_31c = outp.BenchmarkOutput("31c", code, testname, session_mock) + self.benchoutput_31c = outp.MCNPBenchmarkOutput("31c", code, testname, session_mock) self.benchoutput_31c.single_postprocess() - self.benchoutput_comp = outp.BenchmarkOutput( + self.benchoutput_comp = outp.MCNPBenchmarkOutput( ["32c", "31c"], code, testname, session_mock ) self.benchoutput_comp.compare() @@ -86,11 +86,11 @@ def test_benchmarkoutputmesh(self, session_mock: MockUpSession): testname = "WCLL_TBM_1D" os.makedirs(session_mock.path_comparison) os.makedirs(session_mock.path_single) - self.benchoutput_32c = outp.BenchmarkOutput("32c", code, testname, session_mock) + self.benchoutput_32c = outp.MCNPBenchmarkOutput("32c", code, testname, session_mock) self.benchoutput_32c.single_postprocess() - self.benchoutput_31c = outp.BenchmarkOutput("31c", code, testname, session_mock) + self.benchoutput_31c = outp.MCNPBenchmarkOutput("31c", code, testname, session_mock) self.benchoutput_31c.single_postprocess() - self.benchoutput_comp = outp.BenchmarkOutput( + self.benchoutput_comp = outp.MCNPBenchmarkOutput( ["32c", "31c"], code, testname, session_mock ) self.benchoutput_comp.compare() From ce6521358bc6c2786765799e4987304ca79cafa1 Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Tue, 5 Nov 2024 15:53:50 +0100 Subject: [PATCH 24/53] add some type hint --- jade/openmc.py | 7 ++-- jade/output.py | 99 ++++++++++++++++++++++++++------------------------ 2 files changed, 56 insertions(+), 50 deletions(-) diff --git a/jade/openmc.py b/jade/openmc.py index 321f798e..aedf205f 100644 --- a/jade/openmc.py +++ b/jade/openmc.py @@ -1,11 +1,12 @@ from __future__ import annotations + import logging import os import re -import openmc - from typing import TYPE_CHECKING +import openmc + if TYPE_CHECKING: from f4enix.input.libmanager import LibManager from f4enix.input.materials import Material, SubMaterial, Zaid @@ -234,7 +235,7 @@ def write(self, path: str) -> None: class OpenMCStatePoint: - def __init__(self, spfile_path: str) -> None: + def __init__(self, spfile_path: str | os.PathLike) -> None: """Class for handling OpenMC tatepoint file Parameters diff --git a/jade/output.py b/jade/output.py index de78afaa..12c03d8e 100644 --- a/jade/output.py +++ b/jade/output.py @@ -24,31 +24,31 @@ from __future__ import annotations import abc +import json +import logging import os import pickle import shutil import string import sys -import json -import logging from typing import TYPE_CHECKING -from f4enix.output.mctal import Mctal, Tally -from f4enix.output.meshtal import Meshtal, Fmesh1D -from f4enix.output.MCNPoutput import Output + import numpy as np # import xlwings as xw import pandas as pd +from f4enix.output.MCNPoutput import Output +from f4enix.output.MCNPoutput import Output as MCNPOutputFile +from f4enix.output.mctal import Mctal, Tally +from f4enix.output.meshtal import Fmesh1D, Meshtal from tqdm import tqdm import jade.atlas as at import jade.excelsupport as exsupp import jade.plotter as plotter -from jade.constants import CODES - -from jade.__version__ import __version__ -from f4enix.output.MCNPoutput import Output as MCNPOutputFile from jade.__openmc__ import OMC_AVAIL +from jade.__version__ import __version__ +from jade.constants import CODES if OMC_AVAIL: import jade.openmc as omc @@ -60,8 +60,9 @@ CRED = "\033[91m" CEND = "\033[0m" + class AbstractBenchmarkOutput(abc.ABC): - def __init__(self, lib: str, code: str, testname: str, session: Session): + def __init__(self, lib: str, code: str, testname: str, session: Session) -> None: """ General class for a Benchmark output @@ -183,16 +184,16 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # Read the metadata results_path = os.path.join(self.test_path, code) - self.metadata = self._read_metadata_run(results_path) + self.metadata = self._read_metadata_run(results_path) - @abc.abstractmethod - def _read_code_version(self, pathtofile): + @abc.abstractmethod + def _read_code_version(self, pathtofile: str | os.PathLike) -> str | None: """ To be executed when a comparison is requested """ @abc.abstractmethod - def _get_output_files(self, results_path): + def _get_output_files(self, results_path: str | os.PathLike) -> list: """ To be executed when a comparison is requested """ @@ -507,7 +508,7 @@ def _reorder_df(df, x_set): pass return df - + def _print_raw(self): for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, str(key) + ".csv") @@ -601,9 +602,7 @@ def _generate_single_excel_output(self): + """ A ValueError was triggered, a probable cause may be that more than 2 binnings are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) + close""".format(str(num)) + CEND ) # Safely exit from excel and from application @@ -636,9 +635,7 @@ def _generate_single_excel_output(self): CRED + """ {} is not available in tally {}. Please check the configuration file. -The application will now exit """.format( - x_name, str(num) - ) +The application will now exit """.format(x_name, str(num)) + CEND ) # Safely exit from excel and from application @@ -733,7 +730,9 @@ def _generate_comparison_excel_output(self): tarlib: os.path.join(self.test_path[tarlib], self.code), }.items(): # Parse output - sim_outputs[lib], tally_numbers[lib], tally_comments[lib] = self.parse_output_data(results_path) + sim_outputs[lib], tally_numbers[lib], tally_comments[lib] = ( + self.parse_output_data(results_path) + ) # Build the comparison for label in ["Value", "Error"]: for num, key in zip(tally_numbers[reflib], tally_comments[reflib]): @@ -808,9 +807,7 @@ def _generate_comparison_excel_output(self): rows_abs_diff.append(row_abs_diff) rows_std_dev.append(row_std_dev) try: - final = pd.DataFrame( - rows_fin, columns=y_set, index=x_set - ) + final = pd.DataFrame(rows_fin, columns=y_set, index=x_set) abs_diff = pd.DataFrame( rows_abs_diff, columns=y_set, index=x_set ) @@ -829,9 +826,7 @@ def _generate_comparison_excel_output(self): + """ A ValueError was triggered, a probable cause may be that more than 2 binnings are defined in tally {}. This is a fatal exception, application will now - close""".format( - str(num) - ) + close""".format(str(num)) + CEND ) # Safely exit from excel and from application @@ -866,9 +861,7 @@ def _generate_comparison_excel_output(self): CRED + """ {} is not available in tally {}. Please check the configuration file. - The application will now exit """.format( - x_name, str(num) - ) + The application will now exit """.format(x_name, str(num)) + CEND ) # Safely exit from excel and from application @@ -925,6 +918,7 @@ def _generate_comparison_excel_output(self): std_devs, ) + class MCNPBenchmarkOutput(AbstractBenchmarkOutput): def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: """Read MCNP code version from the output file @@ -951,11 +945,12 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: simulation_folder, ) logging.warning( - "Contents of the directory: %s", os.listdir(os.path.dirname(simulation_folder)) + "Contents of the directory: %s", + os.listdir(os.path.dirname(simulation_folder)), ) return None - - def _get_output_files(self, results_path): + + def _get_output_files(self, results_path: str | os.PathLike) -> tuple: """ Recover the output files from a directory @@ -988,7 +983,7 @@ def _get_output_files(self, results_path): file1 = file_name elif file_name[-1] == "o": file2 = file_name - elif file_name[-4] == 'msht': + elif file_name[-4] == "msht": file3 = file_name if file1 is None or file2 is None: @@ -998,7 +993,7 @@ def _get_output_files(self, results_path): file1 = os.path.join(results_path, file1) if file1 else None file2 = os.path.join(results_path, file2) if file2 else None - file3 = os.path.join(results_path, file2) if file3 else None + file3 = os.path.join(results_path, file3) if file3 else None return file1, file2, file3 @@ -1009,6 +1004,7 @@ def parse_output_data(self, results_path): tally_comments = sim_output.tally_comments return sim_output, tally_numbers, tally_comments + class OpenMCBenchmarkOutput(AbstractBenchmarkOutput): def _read_code_version(self, simulation_path: os.PathLike) -> str | None: """Read OpenMC code version from the statepoint file @@ -1028,7 +1024,7 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: version = statepoint.version return version - def _get_output_files(self, results_path): + def _get_output_files(self, results_path: str | os.PathLike) -> tuple: """ Recover the output files from a directory @@ -1057,7 +1053,7 @@ def _get_output_files(self, results_path): for file_name in os.listdir(results_path): if file_name.endswith(".out"): - file1 = file_name + file1 = file_name elif file_name.startswith("statepoint"): file2 = file_name @@ -1076,10 +1072,16 @@ def parse_output_data(self, results_path): sim_output = OpenMCSimOutput(sfile) tally_numbers = sim_output.output.tally_numbers tally_comments = sim_output.output.tally_comments - return sim_output, tally_numbers, tally_comments + return sim_output, tally_numbers, tally_comments + class MCNPSimOutput: - def __init__(self, mctal_file, output_file, meshtal_file=None): + def __init__( + self, + mctal_file: str | os.PathLike, + output_file: str | os.PathLike, + meshtal_file: str | os.PathLike | None = None, + ): """ Class representing all outputs coming from and MCNP run @@ -1177,17 +1179,20 @@ def __init__(self, mctal_file, output_file, meshtal_file=None): if len(tally.tallyComment) > 0: self.tally_comments.append(tally.tallyComment[0]) else: - self.tally_comments.append('') + self.tally_comments.append("") -class OpenMCSimOutput: - def __init__(self, output_path): + +class OpenMCSimOutput: + def __init__(self, output_path: str | os.PathLike) -> None: self.output = omc.OpenMCStatePoint(output_path) self.tally_numbers = self.output.tally_numbers self.tally_comments = self.output.tally_comments self.tallydata, self.totalbin = self.process_tally() - self.stat_checks = None - - def _create_dataframes(self, tallies): + self.stat_checks = None + + def _create_dataframes( + self, tallies: dict + ) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: tallydata = {} totalbin = {} filter_lookup = { @@ -1239,7 +1244,7 @@ def _create_dataframes(self, tallies): totalbin[id] = None return tallydata, totalbin - def process_tally(self): + def process_tally(self) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: tallies = self.output.tallies_to_dataframes() tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin From 6f75f0473a1403f19031524b30359d1742fab572 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 14:58:40 +0000 Subject: [PATCH 25/53] Fixed meshtal test --- jade/output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jade/output.py b/jade/output.py index 12c03d8e..578c245e 100644 --- a/jade/output.py +++ b/jade/output.py @@ -983,7 +983,7 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: file1 = file_name elif file_name[-1] == "o": file2 = file_name - elif file_name[-4] == "msht": + elif file_name[-4:] == "msht": file3 = file_name if file1 is None or file2 is None: From 381f985eb553d8472d405de1d43c50706b9916ec Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Tue, 5 Nov 2024 16:12:38 +0100 Subject: [PATCH 26/53] type hints --- jade/output.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/jade/output.py b/jade/output.py index 12c03d8e..6d7a436a 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1067,7 +1067,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: return file1, file2 - def parse_output_data(self, results_path): + def parse_output_data( + self, results_path: str | os.PathLike + ) -> tuple[OpenMCSimOutput, list, list]: _, sfile = self._get_output_files(results_path) sim_output = OpenMCSimOutput(sfile) tally_numbers = sim_output.output.tally_numbers From 68aba66039797982a7ed3645dd8816a8fa1b3ca5 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 15:39:40 +0000 Subject: [PATCH 27/53] Initialise abstract sphere --- jade/output.py | 6 ++++++ jade/sphereoutput.py | 12 +----------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/jade/output.py b/jade/output.py index 578c245e..41246c95 100644 --- a/jade/output.py +++ b/jade/output.py @@ -934,6 +934,9 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: version of the MCNP code used to run the benchmark """ + if self.testname in ['Sphere', 'SphereSDDR']: + if not os.path.exists(simulation_folder): + return None _, outf, _ = self._get_output_files(simulation_folder) outp = MCNPOutputFile(outf) try: @@ -1019,6 +1022,9 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: str | None version of the OpenMC code used to run the benchmark """ + if self.testname in ['Sphere', 'SphereSDDR']: + if not os.path.exists(simulation_path): + return None _, spfile = self._get_output_files(simulation_path) statepoint = omc.OpenMCStatePoint(spfile) version = statepoint.version diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 2ecfa7b8..1f396cae 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -52,8 +52,7 @@ if OMC_AVAIL: import jade.openmc as omc - -class SphereOutput(AbstractBenchmarkOutput): +class AbstractSphereBenchmarkOutput(AbstractBenchmarkOutput): def __init__(self, lib: str, code: str, testname: str, session: Session): """ Initialises the SphereOutput class from the general BenchmarkOutput @@ -98,15 +97,6 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # means that self.test_path is a dict, hence a comparison. No # metadata involved here self.metadata = None - - def _get_output_files(self, resuts_path): - pass - - def _read_code_version(self, pathtofile): - pass - - def parse_output_data(self, results_path): - pass def single_postprocess(self): """ From d2eda700dd740077f88549f4d81463993efc2a92 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 16:11:59 +0000 Subject: [PATCH 28/53] Docstrings --- jade/output.py | 48 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 11 deletions(-) diff --git a/jade/output.py b/jade/output.py index 3d98ebf9..379ea4c3 100644 --- a/jade/output.py +++ b/jade/output.py @@ -187,15 +187,33 @@ def __init__(self, lib: str, code: str, testname: str, session: Session) -> None self.metadata = self._read_metadata_run(results_path) @abc.abstractmethod - def _read_code_version(self, pathtofile: str | os.PathLike) -> str | None: - """ - To be executed when a comparison is requested + def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None: + """Abstract function to retrieve code version. Implimentation should be added to child classes for each code. + + Parameters + ---------- + simulation_folder : str | os.PathLike + Path to simulation results folder. + + Returns + ------- + str | None + Returns the code version, except for sphere benchmark, which returns None """ @abc.abstractmethod def _get_output_files(self, results_path: str | os.PathLike) -> list: - """ - To be executed when a comparison is requested + """Abstract function to retrieve code output files. Implimentation should be added to child classes for each code. + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results folder. + + Returns + ------- + list + List of simulation results files. """ @abc.abstractmethod @@ -602,7 +620,9 @@ def _generate_single_excel_output(self): + """ A ValueError was triggered, a probable cause may be that more than 2 binnings are defined in tally {}. This is a fatal exception, application will now - close""".format(str(num)) + close""".format( + str(num) + ) + CEND ) # Safely exit from excel and from application @@ -635,7 +655,9 @@ def _generate_single_excel_output(self): CRED + """ {} is not available in tally {}. Please check the configuration file. -The application will now exit """.format(x_name, str(num)) +The application will now exit """.format( + x_name, str(num) + ) + CEND ) # Safely exit from excel and from application @@ -826,7 +848,9 @@ def _generate_comparison_excel_output(self): + """ A ValueError was triggered, a probable cause may be that more than 2 binnings are defined in tally {}. This is a fatal exception, application will now - close""".format(str(num)) + close""".format( + str(num) + ) + CEND ) # Safely exit from excel and from application @@ -861,7 +885,9 @@ def _generate_comparison_excel_output(self): CRED + """ {} is not available in tally {}. Please check the configuration file. - The application will now exit """.format(x_name, str(num)) + The application will now exit """.format( + x_name, str(num) + ) + CEND ) # Safely exit from excel and from application @@ -934,7 +960,7 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: version of the MCNP code used to run the benchmark """ - if self.testname in ['Sphere', 'SphereSDDR']: + if self.testname in ["Sphere", "SphereSDDR"]: if not os.path.exists(simulation_folder): return None _, outf, _ = self._get_output_files(simulation_folder) @@ -1022,7 +1048,7 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: str | None version of the OpenMC code used to run the benchmark """ - if self.testname in ['Sphere', 'SphereSDDR']: + if self.testname in ["Sphere", "SphereSDDR"]: if not os.path.exists(simulation_path): return None _, spfile = self._get_output_files(simulation_path) From 3c87d60e6904925eba290958d6fa8afec1e0e61c Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 16:44:07 +0000 Subject: [PATCH 29/53] Docstrings for output.py --- jade/output.py | 90 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 8 deletions(-) diff --git a/jade/output.py b/jade/output.py index 379ea4c3..425fa49f 100644 --- a/jade/output.py +++ b/jade/output.py @@ -216,8 +216,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> list: List of simulation results files. """ + #TODO Output types @abc.abstractmethod - def parse_output_data(self, results_path): + def parse_output_data(self, results_path : str | os.PathLike): """ To be executed when a comparison is requested """ @@ -367,7 +368,7 @@ def single_postprocess(self): # Remove tmp images shutil.rmtree(outpath) - def compare(self): + def compare(self) -> None: """ Generates the full comparison post-processing (excel and atlas) @@ -503,7 +504,21 @@ def compare(self): shutil.rmtree(outpath) @staticmethod - def _reorder_df(df, x_set): + def _reorder_df(df : pd.DataFrame, x_set : list) -> pd.DataFrame: + """Method to re-organise pandas data frame. + + Parameters + ---------- + df : pd.DataFrame + Input pandas data frame. + x_set : list + List of values to re-order data frame on. + + Returns + ------- + pd.DataFrame + Re-ordered pandas data frame. + """ # First of all try order by number df["index"] = pd.to_numeric(df[x_set], errors="coerce") @@ -527,7 +542,13 @@ def _reorder_df(df, x_set): return df - def _print_raw(self): + def _print_raw(self) -> None: + """Method to print raw data to json. + + Returns + ------- + None + """ for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, str(key) + ".csv") data.to_csv(file, header=True, index=False) @@ -536,7 +557,7 @@ def _print_raw(self): with open(metadata_file, "w", encoding="utf-8") as outfile: json.dump(self.metadata, outfile, indent=4) - def _generate_single_excel_output(self): + def _generate_single_excel_output(self) -> None: # Get excel configuration self.outputs = {} self.results = {} @@ -716,7 +737,7 @@ def _generate_single_excel_output(self): # ex.save() exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) - def _generate_comparison_excel_output(self): + def _generate_comparison_excel_output(self) -> None: # Get excel configuration self.outputs = {} self.results = {} @@ -1000,7 +1021,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: file1 : path path to the first file file2 : path - path to the second file (only for mcnp) + path to the second file + file2 : path + path to the third file (only for mcnp meshtal) """ file1 = None @@ -1102,6 +1125,22 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: def parse_output_data( self, results_path: str | os.PathLike ) -> tuple[OpenMCSimOutput, list, list]: + """_summary_ + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results. + + Returns + ------- + sim_ouput : OpenMCSimOutput + OpenMC simulation output object + tally_numbers : list + List of tally numbers in simulation output + tally_comments : list + List of tally comments in simulation output + """ _, sfile = self._get_output_files(results_path) sim_output = OpenMCSimOutput(sfile) tally_numbers = sim_output.output.tally_numbers @@ -1117,7 +1156,7 @@ def __init__( meshtal_file: str | os.PathLike | None = None, ): """ - Class representing all outputs coming from and MCNP run + Class representing all outputs coming from MCNP run Parameters ---------- @@ -1218,6 +1257,18 @@ def __init__( class OpenMCSimOutput: def __init__(self, output_path: str | os.PathLike) -> None: + """Class representing all outputs coming from OpenMC run + + Parameters + ---------- + output_path : str | os.PathLike + Path to simulation output files + + Returns + ------- + None. + + """ self.output = omc.OpenMCStatePoint(output_path) self.tally_numbers = self.output.tally_numbers self.tally_comments = self.output.tally_comments @@ -1227,6 +1278,20 @@ def __init__(self, output_path: str | os.PathLike) -> None: def _create_dataframes( self, tallies: dict ) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: + """Function to create dataframes in JADE format from OpenMC dataframes. + + Parameters + ---------- + tallies : dict + Dictionary of OpenMC tally dataframes, indexed by tally number + + Returns + ------- + tallydata : dict[int, pd.DataFrame] + Dictionary of JADE formatted tally dataframes, indexed by tally number + totalbin : dict[int, None]] + Dictionary of JADE formatted total tally values, each are None for OpenMC + """ tallydata = {} totalbin = {} filter_lookup = { @@ -1279,6 +1344,15 @@ def _create_dataframes( return tallydata, totalbin def process_tally(self) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: + """Function to retrieve OpenMC tally dataframes, and re-format for JADE. + + Returns + ------- + tallydata : dict[int, pd.DataFrame] + Dictionary of JADE formatted tally dataframes, indexed by tally number + totalbin : dict[int, None]] + Dictionary of JADE formatted total tally values, each are None for OpenMC + """ tallies = self.output.tallies_to_dataframes() tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin From fcd1f5eabf227a28670b0455f6ac3675f1bafe5e Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 16:49:35 +0000 Subject: [PATCH 30/53] Abstract sim output initialised --- jade/output.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/jade/output.py b/jade/output.py index 425fa49f..a49596ac 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1147,8 +1147,10 @@ def parse_output_data( tally_comments = sim_output.output.tally_comments return sim_output, tally_numbers, tally_comments +class AbstractSimOutput(abc.ABC): + pass -class MCNPSimOutput: +class MCNPSimOutput(AbstractSimOutput): def __init__( self, mctal_file: str | os.PathLike, @@ -1255,7 +1257,7 @@ def __init__( self.tally_comments.append("") -class OpenMCSimOutput: +class OpenMCSimOutput(AbstractSimOutput): def __init__(self, output_path: str | os.PathLike) -> None: """Class representing all outputs coming from OpenMC run From 7f2ddfefd1654f9f67862622768a6c360da822c7 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Tue, 5 Nov 2024 17:13:20 +0000 Subject: [PATCH 31/53] First pass abstract attributes --- jade/output.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/jade/output.py b/jade/output.py index a49596ac..0865b04b 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1148,7 +1148,15 @@ def parse_output_data( return sim_output, tally_numbers, tally_comments class AbstractSimOutput(abc.ABC): - pass + @property + @abc.abstractmethod + def tallydata(self): + pass + + @property + @abc.abstractmethod + def totalbin(self): + pass class MCNPSimOutput(AbstractSimOutput): def __init__( From 2d889202cb5238f6aaacc038ed0e90a57977b8cc Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 08:18:22 +0000 Subject: [PATCH 32/53] Attribute enorcement with NotImplimentError --- jade/output.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/jade/output.py b/jade/output.py index 0865b04b..21c846c6 100644 --- a/jade/output.py +++ b/jade/output.py @@ -1147,16 +1147,14 @@ def parse_output_data( tally_comments = sim_output.output.tally_comments return sim_output, tally_numbers, tally_comments -class AbstractSimOutput(abc.ABC): - @property - @abc.abstractmethod - def tallydata(self): - pass - - @property - @abc.abstractmethod - def totalbin(self): - pass +class AbstractSimOutput: + tallydata = None + totalbin = None + def __init__(self): + if not isinstance(self.tallydata, dict): + raise NotImplementedError + if not isinstance(self.totalbin, dict): + raise NotImplementedError class MCNPSimOutput(AbstractSimOutput): def __init__( From 758047eb306dc1aab5d2aa76fbe051639d7ec238 Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Wed, 6 Nov 2024 09:24:24 +0100 Subject: [PATCH 33/53] abstract layer in Sphere --- jade/output.py | 6 +- jade/sphereoutput.py | 1350 +++++++++++++++++------------------- tests/sphereoutput_test.py | 53 +- 3 files changed, 687 insertions(+), 722 deletions(-) diff --git a/jade/output.py b/jade/output.py index 3d98ebf9..2353c2ef 100644 --- a/jade/output.py +++ b/jade/output.py @@ -107,8 +107,6 @@ def __init__(self, lib: str, code: str, testname: str, session: Session) -> None for available_code in CODES.values(): if code == available_code: setattr(self, available_code, True) - self.raw_data[code] = {} - self.outputs[code] = {} else: setattr(self, available_code, False) @@ -934,7 +932,7 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: version of the MCNP code used to run the benchmark """ - if self.testname in ['Sphere', 'SphereSDDR']: + if self.testname in ["Sphere", "SphereSDDR"]: if not os.path.exists(simulation_folder): return None _, outf, _ = self._get_output_files(simulation_folder) @@ -1022,7 +1020,7 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: str | None version of the OpenMC code used to run the benchmark """ - if self.testname in ['Sphere', 'SphereSDDR']: + if self.testname in ["Sphere", "SphereSDDR"]: if not os.path.exists(simulation_path): return None _, spfile = self._get_output_files(simulation_path) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 1f396cae..4810f57d 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -21,28 +21,28 @@ You should have received a copy of the GNU General Public License along with JADE. If not, see . """ + from __future__ import annotations +import abc +import itertools +import json +import logging import math import os import shutil -import json -import itertools -import logging - from typing import TYPE_CHECKING import numpy as np import pandas as pd - +from docx.shared import Inches from tqdm import tqdm from xlsxwriter.utility import xl_rowcol_to_cell -from docx.shared import Inches import jade.atlas as at import jade.excelsupport as exsupp import jade.plotter as plotter -from jade.output import AbstractBenchmarkOutput, OpenMCSimOutput, MCNPSimOutput +from jade.output import AbstractBenchmarkOutput, MCNPSimOutput, OpenMCSimOutput if TYPE_CHECKING: from jade.main import Session @@ -52,6 +52,7 @@ if OMC_AVAIL: import jade.openmc as omc + class AbstractSphereBenchmarkOutput(AbstractBenchmarkOutput): def __init__(self, lib: str, code: str, testname: str, session: Session): """ @@ -97,7 +98,13 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # means that self.test_path is a dict, hence a comparison. No # metadata involved here self.metadata = None - + + def _read_get_output_files(self, results_path: str, code: str): + pass + + def parse_output_data(self, results_path: str): + pass + def single_postprocess(self): """ Execute the full post-processing of a single library (i.e. excel, @@ -115,6 +122,26 @@ def single_postprocess(self): print(" Generating plots...") self._generate_single_plots() + @abc.abstractmethod + def _read_output(self) -> tuple[dict, list, list, list | None]: + """Reads all outputs for a library. To be implemented for each different code. + + Returns + ------- + outputs : dic + Dictionary of sphere output objects used in plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + stat_checks : dic + Dictionary the MCNP statistical check results for each material/ZAID. Contents of the "Statistical + Checks" Worksheet. + """ + pass + def _generate_single_plots(self): """ Generate all the requested plots in a temporary folder @@ -125,60 +152,60 @@ def _generate_single_plots(self): """ - for code, outputs in self.outputs.items(): - # edited by T. Wheeler. openmc requires separate tally numbers which is accounted for here - outpath = os.path.join(self.atlas_path, "tmp") - os.mkdir(outpath) - if self.openmc: - tally_info = [ - ( - 4, - "Averaged Neutron Flux (175 groups)", - "Neutron Flux", - r"$\#/cm^2$", - ), - (14, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), - ] - else: - tally_info = [ - ( - 2, - "Averaged Neutron Flux (175 groups)", - "Neutron Flux", - r"$\#/cm^2$", - ), - (32, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), - ] - for tally, title, quantity, unit in tally_info: - print(" Plotting tally n." + str(tally)) - for zaidnum, output in tqdm(outputs.items()): - title = title - tally_data = output.tallydata.set_index("Tally N.").loc[tally] - energy = tally_data["Energy"].values - values = tally_data["Value"].values - error = tally_data["Error"].values - lib_name = self.session.conf.get_lib_name(self.lib) - lib = { - "x": energy, - "y": values, - "err": error, - "ylabel": str(zaidnum) + " (" + lib_name + ")", - } - data = [lib] - outname = str(zaidnum) + "-" + self.lib + "-" + str(tally) - plot = plotter.Plotter( - data, - title, - outpath, - outname, - quantity, - unit, - "Energy [MeV]", - self.testname, - ) - plot.plot("Binned graph") + # edited by T. Wheeler. openmc requires separate tally numbers which is accounted for here + # TODO this should be brought into the implementation, but actually the tally numbers + # will be made the same across codes so this will not be needed. + outpath = os.path.join(self.atlas_path, "tmp") + os.mkdir(outpath) + if self.openmc: + tally_info = [ + ( + 4, + "Averaged Neutron Flux (175 groups)", + "Neutron Flux", + r"$\#/cm^2$", + ), + (14, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), + ] + else: + tally_info = [ + ( + 2, + "Averaged Neutron Flux (175 groups)", + "Neutron Flux", + r"$\#/cm^2$", + ), + (32, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), + ] + for tally, title, quantity, unit in tally_info: + print(" Plotting tally n." + str(tally)) + for zaidnum, output in tqdm(self.outputs.items()): + tally_data = output.tallydata.set_index("Tally N.").loc[tally] + energy = tally_data["Energy"].values + values = tally_data["Value"].values + error = tally_data["Error"].values + lib_name = self.session.conf.get_lib_name(self.lib) + lib = { + "x": energy, + "y": values, + "err": error, + "ylabel": str(zaidnum) + " (" + lib_name + ")", + } + data = [lib] + outname = str(zaidnum) + "-" + self.lib + "-" + str(tally) + plot = plotter.Plotter( + data, + title, + outpath, + outname, + quantity, + unit, + "Energy [MeV]", + self.testname, + ) + plot.plot("Binned graph") - self._build_atlas(outpath) + self._build_atlas(outpath) def _build_atlas(self, outpath): """ @@ -260,75 +287,75 @@ def _generate_plots(self, allzaids, globalname): None. """ - for code, code_outputs in self.outputs.items(): - outpath = os.path.join(self.atlas_path, "tmp") - if not os.path.exists(outpath): - os.mkdir(outpath) - if code == "mcnp": - tally_info = [ - ( - 2, - "Averaged Neutron Flux (175 groups)", - "Neutron Flux", - r"$\#/cm^2$", - ), - (32, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), - ] - if code == "openmc": - tally_info = [ - ( - 4, - "Averaged Neutron Flux (175 groups)", - "Neutron Flux", - r"$\#/cm^2$", - ), - (14, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), - ] - for tally, title, quantity, unit in tally_info: - print(" Plotting tally n." + str(tally)) - for zaidnum in tqdm(allzaids): - # title = title - data = [] - for library, lib_outputs in code_outputs.items(): - try: # Zaid could not be common to the libraries - tally_data = ( - lib_outputs[zaidnum] - .tallydata.set_index("Tally N.") - .loc[tally] - ) - # print(lib_outputs[zaidnum]) - energy = tally_data["Energy"].values - values = tally_data["Value"].values - error = tally_data["Error"].values - lib_name = self.session.conf.get_lib_name(library) - lib = { - "x": energy, - "y": values, - "err": error, - "ylabel": str(zaidnum) + " (" + str(lib_name) + ")", - } - data.append(lib) - except KeyError: - # It is ok, simply nothing to plot here - pass + outpath = os.path.join(self.atlas_path, "tmp") + if not os.path.exists(outpath): + os.mkdir(outpath) + # TODO this if else should be removed once the tally numbers are made the same + if self.code == "mcnp": + tally_info = [ + ( + 2, + "Averaged Neutron Flux (175 groups)", + "Neutron Flux", + r"$\#/cm^2$", + ), + (32, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), + ] + if self.code == "openmc": + tally_info = [ + ( + 4, + "Averaged Neutron Flux (175 groups)", + "Neutron Flux", + r"$\#/cm^2$", + ), + (14, "Averaged Gamma Flux (24 groups)", "Gamma Flux", r"$\#/cm^2$"), + ] + for tally, title, quantity, unit in tally_info: + print(" Plotting tally n." + str(tally)) + for zaidnum in tqdm(allzaids): + # title = title + data = [] + for library, lib_outputs in self.outputs.items(): + try: # Zaid could not be common to the libraries + tally_data = ( + lib_outputs[zaidnum] + .tallydata.set_index("Tally N.") + .loc[tally] + ) + # print(lib_outputs[zaidnum]) + energy = tally_data["Energy"].values + values = tally_data["Value"].values + error = tally_data["Error"].values + lib_name = self.session.conf.get_lib_name(library) + lib = { + "x": energy, + "y": values, + "err": error, + "ylabel": str(zaidnum) + " (" + str(lib_name) + ")", + } + data.append(lib) + except KeyError: + # It is ok, simply nothing to plot here + pass - outname = str(zaidnum) + "-" + globalname + "-" + str(tally) - plot = plotter.Plotter( - data, - title, - outpath, - outname, - quantity, - unit, - "Energy [MeV]", - self.testname, - ) - try: - plot.plot("Binned graph") - except IndexError: - print(data) + outname = str(zaidnum) + "-" + globalname + "-" + str(tally) + plot = plotter.Plotter( + data, + title, + outpath, + outname, + quantity, + unit, + "Energy [MeV]", + self.testname, + ) + try: + plot.plot("Binned graph") + except IndexError: + print(data) - self._build_atlas(outpath) + self._build_atlas(outpath) def _get_organized_output(self): """ @@ -360,7 +387,367 @@ def _get_organized_output(self): return libraries, allzaids, outputs - def _read_mcnp_output(self): + def _generate_dataframe(self, results, errors, stat_checks=None): + """Function to turn the output of the read_{code}_output functions into DataFrames + for use with xlsxwriter + + Arguments + ------ + results (dic): dictionary of tally summaries for each material/ZAID. + errors (dic): dictionaty of average tally errors across all energy bins. + stat_checks (dic, optional): dictionary containing results of MCNP statistical checks + (MCNP only). Defaults to None. + + Returns + ------- + results (DataFrame): previous dictionary but in DataFrame form + errors (DataFrame): previous dictionary but in DataFrame form + stat_checks (DataFrame): previous dictionary but in DataFrame form + """ + # Generate DataFrames + results = pd.DataFrame(results) + errors = pd.DataFrame(errors) + + # Swap Columns and correct zaid sorting + # results + for df in [results, errors]: + df["index"] = pd.to_numeric(df["Zaid"].values, errors="coerce") + df.sort_values("index", inplace=True) + del df["index"] + + df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) + df.reset_index(inplace=True) + + if stat_checks is not None: + stat_checks = pd.DataFrame(stat_checks) + stat_checks["index"] = pd.to_numeric( + stat_checks["Zaid"].values, errors="coerce" + ) + stat_checks.sort_values("index", inplace=True) + del stat_checks["index"] + + stat_checks.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) + stat_checks.reset_index(inplace=True) + return results, errors, stat_checks + + def pp_excel_single(self): + """ + Generate the single library results excel + + Returns + ------- + None. + + """ + outfolder_path = self.excel_path + # os.makedirs(outfolder_path, exist_ok=True) + # outpath = os.path.join(self.excel_path_mcnp,'Sphere_single_' + 'MCNP_' + self.lib+'.xlsx') + outpath = os.path.join( + outfolder_path, f"Sphere_single_{self.code}_{self.lib}.xlsx" + ) + outputs, results, errors, stat_checks = self._read_output() + results, errors, stat_checks = self._generate_dataframe( + results, errors, stat_checks + ) + self.outputs = outputs + self.results = results + self.errors = errors + self.stat_checks = stat_checks + lib_name = self.session.conf.get_lib_name(self.lib) + # Generate DataFrames + # results = pd.DataFrame(results) + # errors = pd.DataFrame(errors) + # stat_checks = pd.DataFrame(stat_checks) + + # Swap Columns and correct zaid sorting + # results + # for df in [results, errors, stat_checks]: + # df['index'] = pd.to_numeric(df['Zaid'].values, errors='coerce') + # df.sort_values('index', inplace=True) + # del df['index'] + + # df.set_index(['Zaid', 'Zaid Name'], inplace=True) + # df.reset_index(inplace=True) + exsupp.sphere_single_excel_writer( + self, outpath, lib_name, results, errors, stat_checks + ) + + @abc.abstractmethod + def _get_output(self, results_path: str) -> SphereTallyOutput: + """Get the output files for the code being post-processed. + + Returns + ------- + AbstractSimulationOutput + """ + pass + + def pp_excel_comparison(self): + """ + Compute the data and create the excel for all libraries comparisons. + In the meantime, additional data is stored for future plots. + + + Returns + ------- + None. + + """ + + iteration = 0 + outputs = {} + for reflib, tarlib, name in self.couples: + outfolder_path = self.excel_path + # os.mkdir(outfolder_path) + outpath = os.path.join( + outfolder_path, f"Sphere_comparison_{name}_{self.code}.xlsx" + ) + # outpath = os.path.join(self.excel_path_mcnp, 'Sphere_comparison_' + + # name+'.xlsx') + # Get results + comp_dfs = [] + error_dfs = [] + + for test_path in [ + self.test_path[reflib], + self.test_path[tarlib], + ]: + results = [] + errors = [] + iteration = iteration + 1 + outputs_lib = {} + for folder in os.listdir(test_path): + results_path = os.path.join(test_path, folder, self.code) + pieces = folder.split("_") + # Get zaid + zaidnum = pieces[-2] + # Check for material exception + if zaidnum == "Sphere": + zaidnum = pieces[-1].upper() + zaidname = self.mat_settings.loc[zaidnum, "Name"] + else: + zaidname = pieces[-1] + + output = self._get_output(results_path) + + outputs_lib[zaidnum] = output + + # TODO to remove when tallies are the same + if self.code == "mcnp": + res, err, columns = output.get_comparison_data( + ["12", "22", "24", "14", "34", "6", "46"], "mcnp" + ) + elif self.code == "openmc": + res, err, columns = output.get_comparison_data( + ["4", "14"], "openmc" + ) + + try: + zn = int(zaidnum) + except ValueError: # Happens for typical materials + zn = zaidnum + + res.append(zn) + err.append(zn) + res.append(zaidname) + err.append(zaidname) + + results.append(res) + errors.append(err) + + # Add reference library outputs + if iteration == 1: + outputs[reflib] = outputs_lib + + if iteration == 2: + outputs[tarlib] = outputs_lib + + # Generate DataFrames + columns.extend(["Zaid", "Zaid/Mat Name"]) + comp_df = pd.DataFrame(results, columns=columns) + error_df = pd.DataFrame(errors, columns=columns) + comp_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) + error_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) + comp_dfs.append(comp_df) + error_dfs.append(error_df) + + self.outputs = outputs + # self.results["mcnp"] = results + # self.errors["mcnp"] = errors + + # Consider only common zaids + idx1 = comp_dfs[0].index + idx2 = comp_dfs[1].index + newidx = idx1.intersection(idx2) + + # Build the final excel data + final = (comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx]) / comp_dfs[ + 0 + ].loc[newidx] + absdiff = comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx] + + # self.diff_data["mcnp"] = final + # self.absdiff["mcnp"] = absdiff + + # Standard deviation + idx1 = absdiff.index + idx2 = error_dfs[0].index + newidx = idx1.intersection(idx2) + + std_dev = absdiff.loc[newidx] / ( + error_dfs[0].loc[newidx] * comp_dfs[0].loc[newidx] + ) + + # self.std_dev["mcnp"] = std_dev + # Correct sorting + for df in [final, absdiff, std_dev]: + df.reset_index(inplace=True) + df["index"] = pd.to_numeric(df["Zaid"].values, errors="coerce") + df.sort_values("index", inplace=True) + del df["index"] + df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) + + # Create and concat the summary + old_l = 0 + old_lim = 0 + rows = [] + limits = [0, 0.05, 0.1, 0.2, 0.2] + for i, sup_lim in enumerate(limits[1:]): + if i == len(limits) - 2: + row = {"Range": "% of cells > " + str(sup_lim * 100)} + for column in final.columns: + cleaned = final[column].replace("", np.nan).dropna() + l_range = len(cleaned[abs(cleaned) > sup_lim]) + try: + row[column] = l_range / len(cleaned) + except ZeroDivisionError: + row[column] = np.nan + else: + row = { + "Range": str(old_lim * 100) + + " < " + + "% of cells" + + " < " + + str(sup_lim * 100) + } + for column in final.columns: + cleaned = final[column].replace("", np.nan).dropna() + lenght = len(cleaned[abs(cleaned) < sup_lim]) + old_l = len(cleaned[abs(cleaned) < limits[i]]) + l_range = lenght - old_l + try: + row[column] = l_range / len(cleaned) + except ZeroDivisionError: + row[column] = np.nan + + old_lim = sup_lim + rows.append(row) + + summary = pd.DataFrame(rows) + summary.set_index("Range", inplace=True) + # If it is zero the CS are equal! (NaN if both zeros) + for df in [final, absdiff, std_dev]: + # df[df == np.nan] = 'Not Available' + df.astype({col: float for col in df.columns[1:]}) + df.replace(np.nan, "Not Available", inplace=True) + df.replace(float(0), "Identical", inplace=True) + df.replace(-np.inf, "Reference = 0", inplace=True) + df.replace(1, "Target = 0", inplace=True) + + # retrieve single pp files to add as extra tabs to comparison workbook + single_pp_files = [] + # Add single pp sheets + for lib in [reflib, tarlib]: + pp_dir = self.session.state.get_path( + "single", [lib, "Sphere", self.code, "Excel"] + ) + pp_file = os.listdir(pp_dir)[0] + single_pp_path = os.path.join(pp_dir, pp_file) + single_pp_files.append(single_pp_path) + + # --- Write excel --- + # Generate the excel + exsupp.sphere_comp_excel_writer( + self, + outpath, + name, + final, + absdiff, + std_dev, + summary, + single_pp_files, + ) + + # # Add single pp sheets + # current_wb = openpyxl.load_workbook(outpath) + # for lib in [reflib, tarlib]: + # cp = self.session.state.get_path( + # "single", [lib, "Sphere", "mcnp", "Excel"] + # ) + # file = os.listdir(cp)[0] + # cp = os.path.join(cp, file) + # # open file + # single_wb = openpyxl.load_workbook(cp) + # for ws in single_wb.worksheets: + # destination = current_wb.create_sheet(ws.title + " " + lib) + # exsupp.copy_sheet(ws, destination) + # single_wb.close() + + # current_wb.save(outpath) + # current_wb.close() + + # ex.save() + # "" + + def print_raw(self): + """ + Assigns a path and prints the post processing data as a .csv + + """ + for key, data in self.raw_data.items(): + file = os.path.join(self.raw_path, self.code + key + ".csv") + data.to_csv(file, header=True, index=False) + + metadata_file = os.path.join(self.raw_path, "metadata.json") + with open(metadata_file, "w", encoding="utf-8") as outfile: + json.dump(self.metadata, outfile, indent=4) + + +class MCNPSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): + def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None: + # correct the path + lvl1 = os.path.dirname(simulation_folder) + path = os.path.join(lvl1, os.listdir(lvl1)[0]) + output = self._get_output(path) + try: + version = output.out.get_code_version() + return version + except ValueError: + logging.warning( + "Code version not found in the output file or aux file for %s", + simulation_folder, + ) + logging.warning( + "Contents of the directory: %s", + os.listdir(os.path.dirname(simulation_folder)), + ) + return None + + def _get_output(self, results_path) -> SphereMCNPSimOutput: + # Get mfile + for file in os.listdir(results_path): + if file[-1] == "m": + mfile = file + elif file[-1] == "o": + outfile = file + + # Parse output + mfile = os.path.join(results_path, mfile) + outfile = os.path.join(results_path, outfile) + output = SphereMCNPSimOutput(mfile, outfile) + return output + + def _read_output(self): """Reads all MCNP outputs from a library Returns @@ -407,7 +794,7 @@ def _read_mcnp_output(self): outputs[zaidnum] = output # Adjourn raw Data - self.raw_data["mcnp"][zaidnum] = output.tallydata + self.raw_data[zaidnum] = output.tallydata # Recover statistical checks st_ck = output.stat_checks # Recover results and precisions @@ -422,34 +809,119 @@ def _read_mcnp_output(self): stat_checks.append(st_ck) return outputs, results, errors, stat_checks - def _read_serpent_output(self): - """Reads all Serpent outputs from a library + def _get_output_files(self, results_path: str | os.PathLike) -> tuple: + """ + Recover the output files from a directory - NOT YET IMPLEMENTED + Parameters + ---------- + results_path : str or path + path where the results are contained. + code : str + code that generated the output ('mcnp' or 'openmc') + + Raises + ------ + FileNotFoundError + if the required files are not found. Returns ------- - outputs : dic - Dictionary of Serpent sphere output objects used in plotting, keys are material name or ZAID number - results : dic - Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for - tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding - tally bin if it finds any negative values. Contents of the "Values" worksheet. - errors : dic - Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + file1 : path + path to the first file + file2 : path + path to the second file (only for mcnp) + """ - # Get results - results = [] - errors = [] - stat_checks = [] - outputs = {} - test_path_serpent = os.path.join(self.test_path, "serpent") - for folder in os.listdir(test_path_serpent): - # Call parser here - continue - return outputs, results, errors, stat_checks + file1 = None + file2 = None + file3 = None + + for file_name in os.listdir(results_path): + if file_name[-1] == "m": + file1 = file_name + elif file_name[-1] == "o": + file2 = file_name + elif file_name[-4:] == "msht": + file3 = file_name + + if file1 is None or file2 is None: + raise FileNotFoundError( + f"The following path does not contain the required files for {self.code} output: {results_path}" + ) + + file1 = os.path.join(results_path, file1) if file1 else None + file2 = os.path.join(results_path, file2) if file2 else None + file3 = os.path.join(results_path, file3) if file3 else None - def _read_openmc_output(self): + return file1, file2, file3 + + +class OpenMCSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): + def _read_code_version(self, simulation_path: str | os.PathLike) -> str | None: + # correct the path + lvl1 = os.path.dirname(simulation_path) + path = os.path.join(lvl1, os.listdir(lvl1)[0]) + _, spfile = self._get_output_files(path, "openmc") + statepoint = omc.OpenMCStatePoint(spfile) + version = statepoint.version + return version + + def _get_output(self, results_path: str) -> SphereOpenMCSimOutput: + # Get mfile + for file in os.listdir(results_path): + if "tallies.out" in file: + outfile = file + + # Parse output + _, outfile = self._get_output_files(results_path, "openmc") + output = SphereOpenMCSimOutput(outfile) + return output + + def _get_output_files(self, results_path: str | os.PathLike) -> tuple: + """ + Recover the output files from a directory + + Parameters + ---------- + results_path : str or path + path where the results are contained. + code : str + code that generated the output ('mcnp' or 'openmc') + + Raises + ------ + FileNotFoundError + if the required files are not found. + + Returns + ------- + file1 : path + path to the first file + file2 : path + path to the second file (only for mcnp) + + """ + file1 = None + file2 = None + + for file_name in os.listdir(results_path): + if file_name.endswith(".out"): + file1 = file_name + elif file_name.startswith("statepoint"): + file2 = file_name + + if file1 is None or file2 is None: + raise FileNotFoundError( + f"The following path does not contain the required files for {self.code} output: {results_path}" + ) + + file1 = os.path.join(results_path, file1) if file1 else None + file2 = os.path.join(results_path, file2) if file2 else None + + return file1, file2 + + def _read_output(self): """Reads all OpenMC outputs from a library Returns @@ -485,7 +957,7 @@ def _read_openmc_output(self): output = SphereOpenMCSimOutput(outfile) outputs[zaidnum] = output # Adjourn raw Data - self.raw_data["openmc"][zaidnum] = output.tallydata + self.raw_data[zaidnum] = output.tallydata # Recover statistical checks # st_ck = output.stat_checks # Recover results and precisions @@ -496,553 +968,36 @@ def _read_openmc_output(self): results.append(res) errors.append(err) # stat_checks.append(st_ck) - return ( - outputs, - results, - errors, - ) # stat_checks + return (outputs, results, errors, None) # stat_checks - def _generate_dataframe(self, results, errors, stat_checks=None): - """Function to turn the output of the read_{code}_output functions into DataFrames - for use with xlsxwriter - Arguments - ------ - results (dic): dictionary of tally summaries for each material/ZAID. - errors (dic): dictionaty of average tally errors across all energy bins. - stat_checks (dic, optional): dictionary containing results of MCNP statistical checks - (MCNP only). Defaults to None. - - Returns - ------- - results (DataFrame): previous dictionary but in DataFrame form - errors (DataFrame): previous dictionary but in DataFrame form - stat_checks (DataFrame): previous dictionary but in DataFrame form - """ - # Generate DataFrames - results = pd.DataFrame(results) - errors = pd.DataFrame(errors) - - # Swap Columns and correct zaid sorting - # results - for df in [results, errors]: - df["index"] = pd.to_numeric(df["Zaid"].values, errors="coerce") - df.sort_values("index", inplace=True) - del df["index"] - - df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - df.reset_index(inplace=True) - - if stat_checks is not None: - stat_checks = pd.DataFrame(stat_checks) - stat_checks["index"] = pd.to_numeric( - stat_checks["Zaid"].values, errors="coerce" - ) - stat_checks.sort_values("index", inplace=True) - del stat_checks["index"] - - stat_checks.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - stat_checks.reset_index(inplace=True) - return results, errors, stat_checks - - def pp_excel_single(self): - """ - Generate the single library results excel - - Returns - ------- - None. - - """ - self.outputs = {} - self.results = {} - self.errors = {} - self.stat_checks = {} - - if self.mcnp: - outfolder_path = self.excel_path - # os.makedirs(outfolder_path, exist_ok=True) - # outpath = os.path.join(self.excel_path_mcnp,'Sphere_single_' + 'MCNP_' + self.lib+'.xlsx') - outpath = os.path.join( - outfolder_path, "Sphere_single_" + "MCNP_" + self.lib + ".xlsx" - ) - outputs, results, errors, stat_checks = self._read_mcnp_output() - results, errors, stat_checks = self._generate_dataframe( - results, errors, stat_checks - ) - self.outputs["mcnp"] = outputs - self.results["mcnp"] = results - self.errors["mcnp"] = errors - self.stat_checks["mcnp"] = stat_checks - lib_name = self.session.conf.get_lib_name(self.lib) - # Generate DataFrames - # results = pd.DataFrame(results) - # errors = pd.DataFrame(errors) - # stat_checks = pd.DataFrame(stat_checks) - - # Swap Columns and correct zaid sorting - # results - # for df in [results, errors, stat_checks]: - # df['index'] = pd.to_numeric(df['Zaid'].values, errors='coerce') - # df.sort_values('index', inplace=True) - # del df['index'] - - # df.set_index(['Zaid', 'Zaid Name'], inplace=True) - # df.reset_index(inplace=True) - exsupp.sphere_single_excel_writer( - self, outpath, lib_name, results, errors, stat_checks - ) - - if self.serpent: - pass - - if self.openmc: - outfolder_path = self.excel_path - # os.mkdir(outfolder_path) - # outpath = os.path.join(self.excel_path_openmc,'Sphere_single_' + 'OpenMC_' + self.lib+'.xlsx') - outpath = os.path.join( - outfolder_path, "Sphere_single_" + "OpenMC_" + self.lib + ".xlsx" - ) - outputs, results, errors = self._read_openmc_output() - results, errors, stat_checks = self._generate_dataframe(results, errors) - self.outputs["openmc"] = outputs - self.results["openmc"] = results - self.errors["openmc"] = errors - self.stat_checks["openmc"] = stat_checks - - exsupp.sphere_single_excel_writer(self, outpath, self.lib, results, errors) - - if self.d1s: - pass - - def pp_excel_comparison(self): - """ - Compute the data and create the excel for all libraries comparisons. - In the meantime, additional data is stored for future plots. +class SerpentSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): + def _read_output(self): + """Reads all Serpent outputs from a library + NOT YET IMPLEMENTED Returns ------- - None. - - """ - - code_outputs = {} - - if self.mcnp: - iteration = 0 - outputs = {} - for reflib, tarlib, name in self.couples: - outfolder_path = self.excel_path - # os.mkdir(outfolder_path) - outpath = os.path.join( - outfolder_path, "Sphere_comparison_" + name + "_mcnp.xlsx" - ) - # outpath = os.path.join(self.excel_path_mcnp, 'Sphere_comparison_' + - # name+'.xlsx') - # Get results - comp_dfs = [] - error_dfs = [] - - for test_path in [ - self.test_path[reflib], - self.test_path[tarlib], - ]: - results = [] - errors = [] - iteration = iteration + 1 - outputs_lib = {} - for folder in os.listdir(test_path): - results_path = os.path.join(test_path, folder, "mcnp") - pieces = folder.split("_") - # Get zaid - zaidnum = pieces[-2] - # Check for material exception - if zaidnum == "Sphere": - zaidnum = pieces[-1].upper() - zaidname = self.mat_settings.loc[zaidnum, "Name"] - else: - zaidname = pieces[-1] - - # Get mfile - for file in os.listdir(results_path): - if file[-1] == "m": - mfile = file - elif file[-1] == "o": - outfile = file - - # Parse output - mfile = os.path.join(results_path, mfile) - outfile = os.path.join(results_path, outfile) - output = SphereMCNPSimOutput(mfile, outfile) - - outputs_lib[zaidnum] = output - res, err, columns = output.get_comparison_data( - ["12", "22", "24", "14", "34", "6", "46"], "mcnp" - ) - try: - zn = int(zaidnum) - except ValueError: # Happens for typical materials - zn = zaidnum - - res.append(zn) - err.append(zn) - res.append(zaidname) - err.append(zaidname) - - results.append(res) - errors.append(err) - - # Add reference library outputs - if iteration == 1: - outputs[reflib] = outputs_lib - - if iteration == 2: - outputs[tarlib] = outputs_lib - - # Generate DataFrames - columns.extend(["Zaid", "Zaid/Mat Name"]) - comp_df = pd.DataFrame(results, columns=columns) - error_df = pd.DataFrame(errors, columns=columns) - comp_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - error_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - comp_dfs.append(comp_df) - error_dfs.append(error_df) - - code_outputs["mcnp"] = outputs - self.outputs = code_outputs - # self.results["mcnp"] = results - # self.errors["mcnp"] = errors - - # Consider only common zaids - idx1 = comp_dfs[0].index - idx2 = comp_dfs[1].index - newidx = idx1.intersection(idx2) - - # Build the final excel data - final = (comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx]) / comp_dfs[ - 0 - ].loc[newidx] - absdiff = comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx] - - # self.diff_data["mcnp"] = final - # self.absdiff["mcnp"] = absdiff - - # Standard deviation - idx1 = absdiff.index - idx2 = error_dfs[0].index - newidx = idx1.intersection(idx2) - - std_dev = absdiff.loc[newidx] / ( - error_dfs[0].loc[newidx] * comp_dfs[0].loc[newidx] - ) - - # self.std_dev["mcnp"] = std_dev - # Correct sorting - for df in [final, absdiff, std_dev]: - df.reset_index(inplace=True) - df["index"] = pd.to_numeric(df["Zaid"].values, errors="coerce") - df.sort_values("index", inplace=True) - del df["index"] - df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - - # Create and concat the summary - old_l = 0 - old_lim = 0 - rows = [] - limits = [0, 0.05, 0.1, 0.2, 0.2] - for i, sup_lim in enumerate(limits[1:]): - if i == len(limits) - 2: - row = {"Range": "% of cells > " + str(sup_lim * 100)} - for column in final.columns: - cleaned = final[column].replace("", np.nan).dropna() - l_range = len(cleaned[abs(cleaned) > sup_lim]) - try: - row[column] = l_range / len(cleaned) - except ZeroDivisionError: - row[column] = np.nan - else: - row = { - "Range": str(old_lim * 100) - + " < " - + "% of cells" - + " < " - + str(sup_lim * 100) - } - for column in final.columns: - cleaned = final[column].replace("", np.nan).dropna() - lenght = len(cleaned[abs(cleaned) < sup_lim]) - old_l = len(cleaned[abs(cleaned) < limits[i]]) - l_range = lenght - old_l - try: - row[column] = l_range / len(cleaned) - except ZeroDivisionError: - row[column] = np.nan - - old_lim = sup_lim - rows.append(row) - - summary = pd.DataFrame(rows) - summary.set_index("Range", inplace=True) - # If it is zero the CS are equal! (NaN if both zeros) - for df in [final, absdiff, std_dev]: - # df[df == np.nan] = 'Not Available' - df.astype({col: float for col in df.columns[1:]}) - df.replace(np.nan, "Not Available", inplace=True) - df.replace(float(0), "Identical", inplace=True) - df.replace(-np.inf, "Reference = 0", inplace=True) - df.replace(1, "Target = 0", inplace=True) - - # retrieve single pp files to add as extra tabs to comparison workbook - single_pp_files = [] - # Add single pp sheets - for lib in [reflib, tarlib]: - pp_dir = self.session.state.get_path( - "single", [lib, "Sphere", "mcnp", "Excel"] - ) - pp_file = os.listdir(pp_dir)[0] - single_pp_path = os.path.join(pp_dir, pp_file) - single_pp_files.append(single_pp_path) - - # --- Write excel --- - # Generate the excel - exsupp.sphere_comp_excel_writer( - self, - outpath, - name, - final, - absdiff, - std_dev, - summary, - single_pp_files, - ) - - # # Add single pp sheets - # current_wb = openpyxl.load_workbook(outpath) - # for lib in [reflib, tarlib]: - # cp = self.session.state.get_path( - # "single", [lib, "Sphere", "mcnp", "Excel"] - # ) - # file = os.listdir(cp)[0] - # cp = os.path.join(cp, file) - # # open file - # single_wb = openpyxl.load_workbook(cp) - # for ws in single_wb.worksheets: - # destination = current_wb.create_sheet(ws.title + " " + lib) - # exsupp.copy_sheet(ws, destination) - # single_wb.close() - - # current_wb.save(outpath) - # current_wb.close() - - # ex.save() - # """ - if self.openmc: - iteration = 0 - outputs = {} - for reflib, tarlib, name in self.couples: - outfolder_path = self.excel_path - # os.mkdir(outfolder_path) - outpath = os.path.join( - outfolder_path, "Sphere_comparison_" + name + "_openmc.xlsx" - ) - # outpath = os.path.join(self.excel_path_openmc, 'Sphere_comparison_' + - # name+'openmc.xlsx') - # Get results - comp_dfs = [] - error_dfs = [] - - for test_path in [self.test_path[reflib], self.test_path[tarlib]]: - results = [] - errors = [] - iteration = iteration + 1 - outputs_lib = {} - for folder in os.listdir(test_path): - results_path = os.path.join(test_path, folder, "openmc") - pieces = folder.split("_") - # Get zaid - zaidnum = pieces[-2] - # Check for material exception - if zaidnum == "Sphere": - zaidnum = pieces[-1].upper() - zaidname = self.mat_settings.loc[zaidnum, "Name"] - else: - zaidname = pieces[-1] - - # Get mfile - for file in os.listdir(results_path): - if "tallies.out" in file: - outfile = file - - # Parse output - _, outfile = self._get_output_files(results_path, "openmc") - output = SphereOpenMCSimOutput(outfile) - outputs_lib[zaidnum] = output - res, err, columns = output.get_comparison_data( - ["4", "14"], "openmc" - ) - try: - zn = int(zaidnum) - except ValueError: # Happens for typical materials - zn = zaidnum - - res.append(zn) - err.append(zn) - res.append(zaidname) - err.append(zaidname) - - results.append(res) - errors.append(err) - # Add reference library outputs - if iteration == 1: - outputs[reflib] = outputs_lib - - if test_path == os.path.join(self.test_path[tarlib], "openmc"): - outputs[tarlib] = outputs_lib - - # Generate DataFrames - columns.extend(["Zaid", "Zaid/Mat Name"]) - comp_df = pd.DataFrame(results, columns=columns) - error_df = pd.DataFrame(errors, columns=columns) - comp_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - error_df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - comp_dfs.append(comp_df) - error_dfs.append(error_df) - - # outputs_couple = outputs - # self.results = results - code_outputs["openmc"] = outputs - self.outputs = code_outputs - # self.results["openmc"] = results - # self.errors["openmc"] = errors - # Consider only common zaids - idx1 = comp_dfs[0].index - idx2 = comp_dfs[1].index - newidx = idx1.intersection(idx2) - - # Build the final excel data - final = (comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx]) / comp_dfs[ - 0 - ].loc[newidx] - absdiff = comp_dfs[0].loc[newidx] - comp_dfs[1].loc[newidx] - - # self.diff_data["openmc"] = final - # self.absdiff["openmc"] = absdiff - - # Standard deviation - idx1 = absdiff.index - idx2 = error_dfs[0].index - newidx = idx1.intersection(idx2) - - std_dev = absdiff.loc[newidx] / error_dfs[0].loc[newidx] - - # self.std_dev["openmc"] = std_dev - - # Correct sorting - for df in [final, absdiff, std_dev]: - df.reset_index(inplace=True) - df["index"] = pd.to_numeric(df["Zaid"].values, errors="coerce") - df.sort_values("index", inplace=True) - del df["index"] - df.set_index(["Zaid", "Zaid/Mat Name"], inplace=True) - # Create and concat the summary - old_l = 0 - old_lim = 0 - rows = [] - limits = [0, 0.05, 0.1, 0.2, 0.2] - for i, sup_lim in enumerate(limits[1:]): - if i == len(limits) - 2: - row = {"Range": "% of cells > " + str(sup_lim * 100)} - for column in final.columns: - cleaned = final[column].replace("", np.nan).dropna() - l_range = len(cleaned[abs(cleaned) > sup_lim]) - try: - row[column] = l_range / len(cleaned) - except ZeroDivisionError: - row[column] = np.nan - else: - row = { - "Range": str(old_lim * 100) - + " < " - + "% of cells" - + " < " - + str(sup_lim * 100) - } - for column in final.columns: - cleaned = final[column].replace("", np.nan).dropna() - lenght = len(cleaned[abs(cleaned) < sup_lim]) - old_l = len(cleaned[abs(cleaned) < limits[i]]) - l_range = lenght - old_l - try: - row[column] = l_range / len(cleaned) - except ZeroDivisionError: - row[column] = np.nan - - old_lim = sup_lim - rows.append(row) - - summary = pd.DataFrame(rows) - summary.set_index("Range", inplace=True) - # If it is zero the CS are equal! (NaN if both zeros) - for df in [final, absdiff, std_dev]: - # df[df == np.nan] = 'Not Available' - df.astype({col: float for col in df.columns[1:]}) - df.replace(np.nan, "Not Available", inplace=True) - df.replace(float(0), "Identical", inplace=True) - df.replace(-np.inf, "Reference = 0", inplace=True) - df.replace(1, "Target = 0", inplace=True) - - # retrieve single pp files to add as extra tabs to comparison workbook - single_pp_files = [] - # Add single pp sheets - for lib in [reflib, tarlib]: - pp_dir = self.session.state.get_path( - "single", [lib, "Sphere", "openmc", "Excel"] - ) - pp_file = os.listdir(pp_dir)[0] - single_pp_path = os.path.join(pp_dir, pp_file) - single_pp_files.append(single_pp_path) - - # --- Write excel --- - # Generate the excel - exsupp.sphere_comp_excel_writer( - self, - outpath, - name, - final, - absdiff, - std_dev, - summary, - single_pp_files, - ) - if self.serpent: - pass - - def print_raw(self): - """ - Assigns a path and prints the post processing data as a .csv - + outputs : dic + Dictionary of Serpent sphere output objects used in plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. """ - if self.mcnp: - for key, data in self.raw_data["mcnp"].items(): - file = os.path.join(self.raw_path, "mcnp" + key + ".csv") - data.to_csv(file, header=True, index=False) - if self.serpent: - for key, data in self.raw_data["serpent"].items(): - file = os.path.join(self.raw_path, "serpent" + key + ".csv") - data.to_csv(file, header=True, index=False) - if self.openmc: - for key, data in self.raw_data["openmc"].items(): - file = os.path.join(self.raw_path, "openmc" + key + ".csv") - data.to_csv(file, header=True, index=False) - if self.d1s: - for key, data in self.raw_data["d1s"].items(): - file = os.path.join(self.raw_path, "d1s" + key + ".csv") - data.to_csv(file, header=True, index=False) - - metadata_file = os.path.join(self.raw_path, "metadata.json") - with open(metadata_file, "w", encoding="utf-8") as outfile: - json.dump(self.metadata, outfile, indent=4) + # Get results + results = [] + errors = [] + stat_checks = [] + outputs = {} + test_path_serpent = os.path.join(self.test_path, "serpent") + for folder in os.listdir(test_path_serpent): + # Call parser here + continue + return outputs, results, errors, stat_checks class SphereTallyOutput: @@ -1339,7 +1294,7 @@ def process_tally(self): return tallydata, totalbin -class SphereSDDROutput(SphereOutput): +class SphereSDDROutput(MCNPSphereBenchmarkOutput): times = ["0s", "2.7h", "24h", "11.6d", "30d", "10y"] timecols = { "0s": "1.0", @@ -1573,7 +1528,6 @@ def _generate_plots(self, allzaids, globalname): title = "Gamma Leakage flux after a {} cooldown".format(time) data = [] for lib in libraries: - try: # Zaid could not be common to the libraries outp = self.outputs["d1s"][zaidnum, mt, lib] except KeyError: @@ -2007,7 +1961,7 @@ def _parserunmcnp(self, test_path, lib): outputs[zaidnum, mt, lib] = output # Adjourn raw Data - self.raw_data["d1s"][zaidnum, mt, lib] = output.tallydata + self.raw_data[zaidnum, mt, lib] = output.tallydata # Recover statistical checks st_ck = output.stat_checks # Recover results and precisions @@ -2028,7 +1982,7 @@ def print_raw(self): Assigns a path and prints the post processing data as a .csv """ - for key, data in self.raw_data["d1s"].items(): + for key, data in self.raw_data.items(): # Follow the same structure of other benchmarks for tallynum, df in data.items(): filename = "{}_{}_{}.csv".format(key[0], key[1], tallynum) @@ -2042,9 +1996,7 @@ def print_raw(self): class SphereSDDRMCNPOutput(SphereMCNPSimOutput): - def _get_tallydata(self, mctal): - return self.tallydata, self.totalbin @staticmethod diff --git a/tests/sphereoutput_test.py b/tests/sphereoutput_test.py index 014f88f1..6af02536 100644 --- a/tests/sphereoutput_test.py +++ b/tests/sphereoutput_test.py @@ -21,11 +21,14 @@ You should have received a copy of the GNU General Public License along with JADE. If not, see . """ -import sys + +import json import os +import sys + import pandas as pd import pytest -import json + from jade.__openmc__ import OMC_AVAIL cp = os.path.dirname(os.path.abspath(__file__)) @@ -37,13 +40,19 @@ root = os.path.dirname(cp) ISOTOPES_FILE = os.path.join(root, "jade", "resources", "Isotopes.txt") -from jade.main import Session -from jade.configuration import Configuration from f4enix.input.libmanager import LibManager -from jade.status import Status + +import jade.sphereoutput as sout from jade.__version__ import __version__ +from jade.configuration import Configuration +from jade.main import Session from jade.output import MCNPSimOutput -import jade.sphereoutput as sout +from jade.sphereoutput import ( + MCNPSphereBenchmarkOutput, + OpenMCSphereBenchmarkOutput, + SerpentSphereBenchmarkOutput, +) +from jade.status import Status class MockUpSession(Session): @@ -69,7 +78,7 @@ def __init__(self, tmp_dir: os.PathLike, lm: LibManager): self.state = Status(self) -class TestSphereOutput: +class TestSphereBenchamarkOutput: @pytest.fixture() def session_mock(self, tmpdir, lm: LibManager): session = MockUpSession(tmpdir, lm) @@ -87,7 +96,7 @@ def lm(self): return LibManager(df_lib, isotopes_file=ISOTOPES_FILE) def test_sphereoutput_mcnp(self, session_mock: MockUpSession): - sphere_00c = sout.SphereOutput("00c", "mcnp", "Sphere", session_mock) + sphere_00c = MCNPSphereBenchmarkOutput("00c", "mcnp", "Sphere", session_mock) sphere_00c.single_postprocess() sphere_00c.print_raw() @@ -105,27 +114,33 @@ def test_sphereoutput_mcnp(self, session_mock: MockUpSession): assert metadata["jade_version"] == __version__ assert metadata["code_version"] == "6.2" - sphere_31c = sout.SphereOutput("31c", "mcnp", "Sphere", session_mock) + sphere_31c = MCNPSphereBenchmarkOutput("31c", "mcnp", "Sphere", session_mock) sphere_31c.single_postprocess() - sphere_comp = sout.SphereOutput(["31c", "00c"], "mcnp", "Sphere", session_mock) + sphere_comp = MCNPSphereBenchmarkOutput( + ["31c", "00c"], "mcnp", "Sphere", session_mock + ) sphere_comp.compare() assert True @pytest.mark.skipif(not OMC_AVAIL, reason="OpenMC is not available") def test_sphereoutput_openmc(self, session_mock: MockUpSession): - sphere_00c = sout.SphereOutput("00c", "openmc", "Sphere", session_mock) + sphere_00c = OpenMCSphereBenchmarkOutput( + "00c", "openmc", "Sphere", session_mock + ) sphere_00c.single_postprocess() - sphere_31c = sout.SphereOutput("31c", "openmc", "Sphere", session_mock) + sphere_31c = OpenMCSphereBenchmarkOutput( + "31c", "openmc", "Sphere", session_mock + ) sphere_31c.single_postprocess() - sphere_comp = sout.SphereOutput( + sphere_comp = OpenMCSphereBenchmarkOutput( ["31c", "00c"], "openmc", "Sphere", session_mock ) sphere_comp.compare() assert True def test_read_mcnp_output(self, session_mock: MockUpSession): - sphere_00c = sout.SphereOutput("00c", "mcnp", "Sphere", session_mock) - outputs, results, errors, stat_checks = sphere_00c._read_mcnp_output() + sphere_00c = MCNPSphereBenchmarkOutput("00c", "mcnp", "Sphere", session_mock) + outputs, results, errors, stat_checks = sphere_00c._read_output() tally_values = outputs["M10"].tallydata["Value"] tally_errors = outputs["M10"].tallydata["Error"] assert 3.80420e-07 == pytest.approx(tally_values[10]) @@ -140,8 +155,10 @@ def test_read_mcnp_output(self, session_mock: MockUpSession): @pytest.mark.skipif(not OMC_AVAIL, reason="OpenMC is not available") def test_read_openmc_output(self, session_mock: MockUpSession): - sphere_00c = sout.SphereOutput("00c", "openmc", "Sphere", session_mock) - outputs, results, errors = sphere_00c._read_openmc_output() + sphere_00c = OpenMCSphereBenchmarkOutput( + "00c", "openmc", "Sphere", session_mock + ) + outputs, results, errors = sphere_00c._read_output() tally_values = outputs["M10"].tallydata["Value"] tally_errors = outputs["M10"].tallydata["Error"] assert 0.8271037652370498 == pytest.approx(tally_values[10]) @@ -184,7 +201,6 @@ def lm(self): return LibManager(df_lib, isotopes_file=ISOTOPES_FILE) def test_compute_single_results(self): - cols = [ "Parent", "Parent Name", @@ -238,7 +254,6 @@ def test_full_comparison(self, tmpdir, lm: LibManager): class TestSphereSDDRMCNPOutput: - out = sout.SphereSDDRMCNPOutput( os.path.join(resources, "SphereSDDR_11023_Na-23_102_m"), os.path.join(resources, "SphereSDDR_11023_Na-23_102_o"), From 06df2c77881305135e2478a8b701e147f02a9f87 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 08:41:42 +0000 Subject: [PATCH 34/53] Refactored raw_data to remove code layer --- jade/expoutput.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 271adc72..7dc0b86e 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -287,7 +287,8 @@ def _extract_outputs(self) -> None: code_raw_data = {(self.testname, lib): tallydata} # Adjourn raw Data - self.raw_data[code_tag].update(code_raw_data) + #self.raw_data[code_tag].update(code_raw_data) + self.raw_data.update(code_raw_data) def _read_exp_results(self): """ @@ -353,14 +354,7 @@ def _print_raw(self): ------- None. """ - if self.mcnp: - raw_to_print = self.raw_data["mcnp"].items() - if self.openmc: - pass - if self.serpent: - pass - if self.d1s: - raw_to_print = self.raw_data["d1s"].items() + raw_to_print = self.raw_data.items() for (folder, lib), item in raw_to_print: # Create the lib directory if it is not there From e4964182f26b76770d81546661b5163a3ceedf25 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 09:00:34 +0000 Subject: [PATCH 35/53] Started fixing expout tests --- jade/expoutput.py | 30 +++++++++++++++--------------- tests/expoutput_test.py | 16 ++++++++-------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 7dc0b86e..ab51d314 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -1169,7 +1169,7 @@ def _case_tree_df_build(self): case_tree.loc[cont, "Library"] = self.session.conf.get_lib_name(lib) # Put tally values in dataframe for tally in self.outputs[(case, lib)].mctal.tallies: - temp = (self.raw_data["mcnp"])[(case, lib)] + temp = (self.raw_data)[(case, lib)] val = temp[tally.tallyNumber].iloc[-1]["Value"] err = temp[tally.tallyNumber].iloc[-1]["Error"] case_tree.loc[cont, tally.tallyComment] = val @@ -1810,21 +1810,21 @@ def _pp_excel_comparison(self): t = (mat, lib_names_dict[idx_col[0]]) if idx_col[1] == "Value": if mat != "TLD": - vals = self.raw_data[code][t][4]["Value"].values[ + vals = self.raw_data[t][4]["Value"].values[ : len(x) ] else: - vals = self.raw_data[code][t][6]["Value"].values[ + vals = self.raw_data[t][6]["Value"].values[ : len(x) ] df_tab[idx_col] = vals elif idx_col[1] == "C/E Error": if mat != "TLD": - errs = self.raw_data[code][t][4]["Error"].values[ + errs = self.raw_data[t][4]["Error"].values[ : len(x) ] else: - errs = self.raw_data[code][t][6]["Error"].values[ + errs = self.raw_data[t][6]["Error"].values[ : len(x) ] vals1 = np.square(errs) @@ -1836,11 +1836,11 @@ def _pp_excel_comparison(self): df_tab[idx_col] = ce_err else: if mat != "TLD": - vals1 = self.raw_data[code][t][4]["Value"].values[ + vals1 = self.raw_data[t][4]["Value"].values[ : len(x) ] else: - vals1 = self.raw_data[code][t][6]["Value"].values[ + vals1 = self.raw_data[t][6]["Value"].values[ : len(x) ] vals2 = exp_data_df.loc[:, "Reaction Rate"].to_numpy() @@ -1904,20 +1904,20 @@ def _build_atlas(self, tmp_path, atlas): y = [] err = [] if material != "TLD": - v = self.raw_data[code][(material, lib)][4]["Value"].values[ + v = self.raw_data[(material, lib)][4]["Value"].values[ : len(x) ] else: - v = self.raw_data[code][(material, lib)][6]["Value"].values[ + v = self.raw_data[(material, lib)][6]["Value"].values[ : len(x) ] y.append(v) if material != "TLD": - v = self.raw_data[code][(material, lib)][4]["Error"].values[ + v = self.raw_data[(material, lib)][4]["Error"].values[ : len(x) ] else: - v = self.raw_data[code][(material, lib)][6]["Error"].values[ + v = self.raw_data[(material, lib)][6]["Error"].values[ : len(x) ] err.append(v) @@ -1947,11 +1947,11 @@ def _get_conv_df(self, mat, size): conv_df = pd.DataFrame() for lib in self.lib[1:]: if mat != "TLD": - max = self.raw_data[code][(mat, lib)][4]["Error"].values[:size].max() - avg = self.raw_data[code][(mat, lib)][4]["Error"].values[:size].mean() + max = self.raw_data[(mat, lib)][4]["Error"].values[:size].max() + avg = self.raw_data[(mat, lib)][4]["Error"].values[:size].mean() else: - max = self.raw_data[code][(mat, lib)][6]["Error"].values[:size].max() - avg = self.raw_data[code][(mat, lib)][6]["Error"].values[:size].mean() + max = self.raw_data[(mat, lib)][6]["Error"].values[:size].max() + avg = self.raw_data[(mat, lib)][6]["Error"].values[:size].mean() library = self.session.conf.get_lib_name(lib) conv_df.loc["Max Error", library] = max conv_df.loc["Average Error", library] = avg diff --git a/tests/expoutput_test.py b/tests/expoutput_test.py index be0166a3..55b6dc2c 100644 --- a/tests/expoutput_test.py +++ b/tests/expoutput_test.py @@ -122,13 +122,13 @@ def test_spectrumoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Be-15", "32c")][5]["Energy"].iloc[ + self.benchoutput_comp.raw_data[("Be-15", "32c")][5]["Energy"].iloc[ 10 ] == 0.0196645 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Be-15", "32c")][5]["Value"].iloc[ + self.benchoutput_comp.raw_data[("Be-15", "32c")][5]["Value"].iloc[ 14 ] == 3.93654e-08 @@ -155,11 +155,11 @@ def test_shieldingoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Au", "31c")][4]["Cells"].iloc[1] + self.benchoutput_comp.raw_data[("Au", "31c")][4]["Cells"].iloc[1] == 643 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Au", "31c")][4]["Error"].iloc[2] + self.benchoutput_comp.raw_data[("Au", "31c")][4]["Error"].iloc[2] == 0.6207 ) assert len(os.listdir(path2raw)) == 3 @@ -176,13 +176,13 @@ def test_tiaraoutput(self, session_mock: MockUpSession): self.benchoutput_comp.compare() # conf = config.iloc[4] assert ( - self.benchoutput_comp.raw_data["mcnp"][("cc-43-25-40", "32c")][14][ + self.benchoutput_comp.raw_data[("cc-43-25-40", "32c")][14][ "Value" ].iloc[0] == 448.291 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("cc-43-25-40", "32c")][24][ + self.benchoutput_comp.raw_data[("cc-43-25-40", "32c")][24][ "Error" ].iloc[0] == 0.5933 @@ -201,13 +201,13 @@ def test_tiaraoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("cc-43-25-00", "32c")][24][ + self.benchoutput_comp.raw_data[("cc-43-25-00", "32c")][24][ "Value" ].iloc[0] == 6.02648e03 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("cc-43-25-00", "32c")][44][ + self.benchoutput_comp.raw_data[("cc-43-25-00", "32c")][44][ "Value" ].iloc[0] == 4.59843e02 From e01017b4602e0b5ecf0a750c8c638e9f03b61a60 Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Wed, 6 Nov 2024 10:01:26 +0100 Subject: [PATCH 36/53] fix sphere_output --- jade/sphereoutput.py | 69 ++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 25 deletions(-) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 4810f57d..3562fcdc 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -86,18 +86,18 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): zaid_path = os.path.join(self.cnf_path, "ZaidSettings.csv") self.zaid_settings = pd.read_csv(zaid_path, sep=",").set_index("Z") - # The metadata needs to be re-read since no multitest is foreseen in the - # normal BenchmarkOutput class - # Read the metadata, they should be all equal - try: - results_path = os.path.join( - self.test_path, os.listdir(self.test_path)[0], code - ) - self.metadata = self._read_metadata_run(results_path) - except TypeError: - # means that self.test_path is a dict, hence a comparison. No - # metadata involved here - self.metadata = None + # # The metadata needs to be re-read since no multitest is foreseen in the + # # normal BenchmarkOutput class + # # Read the metadata, they should be all equal + # try: + # results_path = os.path.join( + # self.test_path, os.listdir(self.test_path)[0], code + # ) + # self.metadata = self._read_metadata_run(results_path) + # except TypeError: + # # means that self.test_path is a dict, hence a comparison. No + # # metadata involved here + # self.metadata = None def _read_get_output_files(self, results_path: str, code: str): pass @@ -374,11 +374,11 @@ def _get_organized_output(self): libraries = [] outputs = [] zaids = [] - for code, library_outputs in self.outputs.items(): - for libname, outputslib in library_outputs.items(): - libraries.append(libname) - outputs.append(outputslib) - zaids.append(list(outputslib.keys())) + + for libname, outputslib in self.outputs.items(): + libraries.append(libname) + outputs.append(outputslib) + zaids.append(list(outputslib.keys())) # Extend list to all zaids allzaids = zaids[0] for zaidlist in zaids[1:]: @@ -712,13 +712,35 @@ def print_raw(self): with open(metadata_file, "w", encoding="utf-8") as outfile: json.dump(self.metadata, outfile, indent=4) + def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: + """Retrieve the metadata from the run + + Parameters + ---------- + pathtofile : os.PathLike + path to metadata file + + Returns + ------- + dict + metadata dictionary + """ + # the super can be used, just changing the expected path + dirname = os.path.dirname(simulation_folder) + folder = os.path.join( + dirname, + os.listdir(dirname)[0], + self.code, + ) + + metadata = super()._read_metadata_run(folder) + + return metadata + class MCNPSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None: - # correct the path - lvl1 = os.path.dirname(simulation_folder) - path = os.path.join(lvl1, os.listdir(lvl1)[0]) - output = self._get_output(path) + output = self._get_output(simulation_folder) try: version = output.out.get_code_version() return version @@ -859,10 +881,7 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: class OpenMCSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): def _read_code_version(self, simulation_path: str | os.PathLike) -> str | None: - # correct the path - lvl1 = os.path.dirname(simulation_path) - path = os.path.join(lvl1, os.listdir(lvl1)[0]) - _, spfile = self._get_output_files(path, "openmc") + _, spfile = self._get_output_files(simulation_path) statepoint = omc.OpenMCStatePoint(spfile) version = statepoint.version return version From be19f5aef55354af8a3c684a9b61fb69465cf90b Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 09:28:32 +0000 Subject: [PATCH 37/53] Started fixing sphereout tests --- jade/sphereoutput.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 3562fcdc..e5582220 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -893,7 +893,7 @@ def _get_output(self, results_path: str) -> SphereOpenMCSimOutput: outfile = file # Parse output - _, outfile = self._get_output_files(results_path, "openmc") + _, outfile = self._get_output_files(results_path) output = SphereOpenMCSimOutput(outfile) return output @@ -972,7 +972,7 @@ def _read_output(self): else: zaidname = pieces[-1] # Parse output - _, outfile = self._get_output_files(results_path, "openmc") + _, outfile = self._get_output_files(results_path) output = SphereOpenMCSimOutput(outfile) outputs[zaidnum] = output # Adjourn raw Data From 460d877be5fa0a27d4b4847d7cd1df3b0e266304 Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Wed, 6 Nov 2024 10:28:46 +0100 Subject: [PATCH 38/53] fixed exp tests --- jade/expoutput.py | 66 +++++++++++++---------------------------------- 1 file changed, 18 insertions(+), 48 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index ab51d314..7e1b49fc 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -21,23 +21,23 @@ # along with JADE. If not, see . from __future__ import annotations + +import json import math import os import re import shutil -import json from abc import abstractmethod import numpy as np import pandas as pd from docx.shared import Inches +from f4enix.input.MCNPinput import D1S_Input from scipy.interpolate import interp1d from tqdm import tqdm import jade.atlas as at -from f4enix.input.MCNPinput import D1S_Input -from jade.output import MCNPBenchmarkOutput -from jade.output import MCNPSimOutput +from jade.output import MCNPBenchmarkOutput, MCNPSimOutput from jade.plotter import Plotter from jade.status import EXP_TAG @@ -287,7 +287,7 @@ def _extract_outputs(self) -> None: code_raw_data = {(self.testname, lib): tallydata} # Adjourn raw Data - #self.raw_data[code_tag].update(code_raw_data) + # self.raw_data[code_tag].update(code_raw_data) self.raw_data.update(code_raw_data) def _read_exp_results(self): @@ -450,7 +450,7 @@ class FNGOutput(ExperimentalOutput): ], } - def _processMCNPdata(self, output): + def _processMCNPdata(self, output: MCNPSimOutput): """ Read All tallies and return them as a dictionary of DataFrames. This aslo needs to ovveride the raw data since unfortunately it appears @@ -507,8 +507,8 @@ def _processMCNPdata(self, output): # --- Override the raw data --- # Get the folder and lib - path = mctal.mctalFileName - folderpath = os.path.dirname(path) + path = output.mctal_file + folderpath = os.path.dirname(os.path.dirname(path)) folder = os.path.basename(folderpath) lib = os.path.basename(os.path.dirname(os.path.dirname(folderpath))) self.raw_data[folder, lib] = res @@ -714,7 +714,6 @@ def _read_exp_file(self, filepath): class SpectrumOutput(ExperimentalOutput): - def _build_atlas(self, tmp_path, atlas): """ Fill the atlas with the customized plots. Creation and saving of the @@ -827,7 +826,6 @@ def _dump_ce_table(self): skipcol_global = 0 binning_list = ["Energy", "Time"] for x_ax in binning_list: # to update if other binning will be used - x_lab = x_ax[0] col_check = "Max " + x_lab ft = final_table.set_index(["Input"]) @@ -1135,9 +1133,7 @@ def _get_tablevalues( class TiaraOutput(ExperimentalOutput): - def _processMCNPdata(self, output): - return None def _case_tree_df_build(self): @@ -1235,7 +1231,6 @@ def _get_conv_df(self, df): class TiaraFCOutput(TiaraOutput): - def _pp_excel_comparison(self): """ Builds dataframe from computational output comparable to experimental @@ -1284,7 +1279,6 @@ def _pp_excel_comparison(self): # Build ExcelWriter object filepath = os.path.join(self.excel_path, "Tiara_Fission_Cells_CE_tables.xlsx") with pd.ExcelWriter(filepath, engine="xlsxwriter") as writer: - # Create 1 worksheet for each energy/material combination mats = self.case_tree_df.index.unique(level="Shield Material").tolist() ens = self.case_tree_df.index.unique(level="Energy").tolist() @@ -1561,7 +1555,6 @@ def _build_atlas(self, tmp_path, atlas): class TiaraBSOutput(TiaraOutput): - def _pp_excel_comparison(self): """ This method prints Tiara C/E tables for Bonner Spheres detectors @@ -1761,9 +1754,7 @@ def _build_atlas(self, tmp_path, atlas): class ShieldingOutput(ExperimentalOutput): - def _processMCNPdata(self, output): - return None def _pp_excel_comparison(self): @@ -1810,23 +1801,15 @@ def _pp_excel_comparison(self): t = (mat, lib_names_dict[idx_col[0]]) if idx_col[1] == "Value": if mat != "TLD": - vals = self.raw_data[t][4]["Value"].values[ - : len(x) - ] + vals = self.raw_data[t][4]["Value"].values[: len(x)] else: - vals = self.raw_data[t][6]["Value"].values[ - : len(x) - ] + vals = self.raw_data[t][6]["Value"].values[: len(x)] df_tab[idx_col] = vals elif idx_col[1] == "C/E Error": if mat != "TLD": - errs = self.raw_data[t][4]["Error"].values[ - : len(x) - ] + errs = self.raw_data[t][4]["Error"].values[: len(x)] else: - errs = self.raw_data[t][6]["Error"].values[ - : len(x) - ] + errs = self.raw_data[t][6]["Error"].values[: len(x)] vals1 = np.square(errs) vals2 = np.square( exp_data_df.loc[:, "Error"].to_numpy() / 100 @@ -1836,13 +1819,9 @@ def _pp_excel_comparison(self): df_tab[idx_col] = ce_err else: if mat != "TLD": - vals1 = self.raw_data[t][4]["Value"].values[ - : len(x) - ] + vals1 = self.raw_data[t][4]["Value"].values[: len(x)] else: - vals1 = self.raw_data[t][6]["Value"].values[ - : len(x) - ] + vals1 = self.raw_data[t][6]["Value"].values[: len(x)] vals2 = exp_data_df.loc[:, "Reaction Rate"].to_numpy() ratio = vals1 / vals2 ratio = ratio.tolist() @@ -1904,22 +1883,14 @@ def _build_atlas(self, tmp_path, atlas): y = [] err = [] if material != "TLD": - v = self.raw_data[(material, lib)][4]["Value"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][4]["Value"].values[: len(x)] else: - v = self.raw_data[(material, lib)][6]["Value"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][6]["Value"].values[: len(x)] y.append(v) if material != "TLD": - v = self.raw_data[(material, lib)][4]["Error"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][4]["Error"].values[: len(x)] else: - v = self.raw_data[(material, lib)][6]["Error"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][6]["Error"].values[: len(x)] err.append(v) # Append computational data to data list(to be sent to plotter) data_comp = {"x": x, "y": y, "err": err, "ylabel": ylabel} @@ -1959,7 +1930,6 @@ def _get_conv_df(self, mat, size): class MultipleSpectrumOutput(SpectrumOutput): - def _build_atlas(self, tmp_path, atlas): """ Fill the atlas with the customized plots. Creation and saving of the From aeb697123a4bcf280dacf7e0bcbeb62e42c4ba4b Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 09:37:46 +0000 Subject: [PATCH 39/53] Fixed test_read_openmc_output test --- tests/sphereoutput_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/sphereoutput_test.py b/tests/sphereoutput_test.py index 6af02536..dffa48b8 100644 --- a/tests/sphereoutput_test.py +++ b/tests/sphereoutput_test.py @@ -158,7 +158,7 @@ def test_read_openmc_output(self, session_mock: MockUpSession): sphere_00c = OpenMCSphereBenchmarkOutput( "00c", "openmc", "Sphere", session_mock ) - outputs, results, errors = sphere_00c._read_output() + outputs, results, errors, stat_checks = sphere_00c._read_output() tally_values = outputs["M10"].tallydata["Value"] tally_errors = outputs["M10"].tallydata["Error"] assert 0.8271037652370498 == pytest.approx(tally_values[10]) From 31a2d228a1c50990aed0b33752f40fa4f03daa52 Mon Sep 17 00:00:00 2001 From: Davide Laghi Date: Wed, 6 Nov 2024 10:59:19 +0100 Subject: [PATCH 40/53] fix sphereSDDR --- jade/sphereoutput.py | 26 +++++++++++++------------- tests/sphereoutput_test.py | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 3562fcdc..39018a4d 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -1344,10 +1344,10 @@ def pp_excel_single(self): ) # compute the results outputs, results, errors, stat_checks = self._compute_single_results() - self.outputs["d1s"] = outputs - self.results["d1s"] = results - self.errors["d1s"] = errors - self.stat_checks["d1s"] = stat_checks + self.outputs = outputs + self.results = results + self.errors = errors + self.stat_checks = stat_checks lib_name = self.session.conf.get_lib_name(self.lib) # Write excel # ex = SphereExcelOutputSheet(template, outpath) @@ -1415,9 +1415,9 @@ def _get_organized_output(self): Simply recover a list of the zaids and libraries involved """ zaids = [] - for code, library_outputs in self.outputs.items(): - for (zaidnum, mt, lib), outputslib in library_outputs.items(): - zaids.append((zaidnum, mt)) + + for (zaidnum, mt, lib), outputslib in self.outputs.items(): + zaids.append((zaidnum, mt)) zaids = list(set(zaids)) libs = [] # Not used @@ -1490,7 +1490,7 @@ def _generate_plots(self, allzaids, globalname): if material: for lib in libraries: try: # Zaid could not be common to the libraries - outp = self.outputs["d1s"][zaidnum, mt, lib] + outp = self.outputs[zaidnum, mt, lib] except KeyError: # It is ok, simply nothing to plot here since zaid was # not in library @@ -1548,7 +1548,7 @@ def _generate_plots(self, allzaids, globalname): data = [] for lib in libraries: try: # Zaid could not be common to the libraries - outp = self.outputs["d1s"][zaidnum, mt, lib] + outp = self.outputs[zaidnum, mt, lib] except KeyError: # It is ok, simply nothing to plot here since zaid was # not in library @@ -1597,7 +1597,7 @@ def _generate_plots(self, allzaids, globalname): # that appears on the reference + at least one lib # Build a df will all possible zaid, mt, lib combination if self.d1s: - allkeys = list(self.outputs["d1s"].keys()) + allkeys = list(self.outputs.keys()) else: raise NotImplementedError("Only d1s is implemented") df = pd.DataFrame(allkeys) @@ -1751,7 +1751,7 @@ def _extract_data4plots(self, zaid, mt, lib, time): pflux (float): proton flux sddr (float): shut down dose rate """ - tallies = self.outputs["d1s"][zaid, mt, lib].tallydata + tallies = self.outputs[zaid, mt, lib].tallydata # Extract values nflux = tallies[12].set_index("Energy") # .drop("total") nflux = nflux.sum().loc["Value"] @@ -1801,7 +1801,7 @@ def _compute_single_results( self.test_path, self.lib ) - self.outputs["d1s"] = outputs + self.outputs = outputs # Generate DataFrames results = pd.concat(results, axis=1).T @@ -1864,7 +1864,7 @@ def _compute_compare_result(self, reflib, tarlib): lib_dics.append(outputs) for dic in lib_dics: code_outputs.update(dic) - self.outputs["d1s"].update(code_outputs) + self.outputs.update(code_outputs) # Consider only common zaids idx1 = comp_dfs[0].index idx2 = comp_dfs[1].index diff --git a/tests/sphereoutput_test.py b/tests/sphereoutput_test.py index 6af02536..908471ee 100644 --- a/tests/sphereoutput_test.py +++ b/tests/sphereoutput_test.py @@ -179,7 +179,7 @@ def __init__(self): {"num": "dummy", "Name": "dummy", "dummy": 1}, ] self.mat_settings = pd.DataFrame(mat_settings).set_index("num") - self.raw_data = {"d1s": {}} + self.raw_data = {} self.outputs = {} self.d1s = True From c13f6b4a466d7a8b43a9ff1f2426e25cc4b2caee Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 10:18:38 +0000 Subject: [PATCH 41/53] Expout docstrings --- jade/expoutput.py | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 7e1b49fc..ddaa0131 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -76,7 +76,7 @@ class ExperimentalOutput(MCNPBenchmarkOutput): - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: """ This extends the Benchmark Output and creates an abstract class for all experimental outputs. @@ -131,7 +131,7 @@ def __init__(self, *args, **kwargs): metadata[lib] = metadata_lib self.metadata = metadata - def single_postprocess(self): + def single_postprocess(self) -> None: """ Always raise an Attribute Error since no single post-processing is foreseen for experimental benchmarks @@ -145,7 +145,7 @@ def single_postprocess(self): """ raise AttributeError("\n No single pp is foreseen for exp benchmark") - def compare(self): + def compare(self) -> None: """ Complete the routines that perform the comparison of one or more libraries results with the experimental ones. @@ -168,7 +168,7 @@ def compare(self): print(" Creating Atlas...") self.build_atlas() - def pp_excel_comparison(self): + def pp_excel_comparison(self) -> None: """ At the moment everything is handled by _pp_excel_comparison that needs to be implemented in each child class. Some standard procedures may be @@ -180,7 +180,7 @@ def pp_excel_comparison(self): """ self._pp_excel_comparison() - def build_atlas(self): + def build_atlas(self) -> None: """ Creation and saving of the atlas are handled by this function while the actual filling of the atlas is left to _build_atlas which needs @@ -211,8 +211,26 @@ def build_atlas(self): shutil.rmtree(tmp_path) def _extract_single_output( - self, results_path: os.PathLike, folder: str, lib: str + self, results_path: str | os.PathLike, folder: str, lib: str ) -> tuple[pd.DataFrame, str]: + """Method to extract single output data from MCNP files + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulations results. + folder : str + Sub-folder for multiple run case. + lib : str + Test library. + + Returns + ------- + tallydata : pd.DataFrame + Pandas dataframe containing tally data. + input : str + Test name. + """ mfile, ofile, meshtalfile = self._get_output_files(results_path) # Parse output output = MCNPSimOutput(mfile, ofile, meshtalfile) @@ -290,7 +308,7 @@ def _extract_outputs(self) -> None: # self.raw_data[code_tag].update(code_raw_data) self.raw_data.update(code_raw_data) - def _read_exp_results(self): + def _read_exp_results(self) -> None: """ Read all experimental results and organize it in the self.exp_results dictionary. @@ -333,7 +351,7 @@ def _read_exp_results(self): self.exp_results = exp_results @staticmethod - def _read_exp_file(filepath): + def _read_exp_file(filepath : str | os.PathLike) -> pd.DataFrame: """ Default way of reading a csv file Parameters @@ -347,7 +365,7 @@ def _read_exp_file(filepath): """ return pd.read_csv(filepath) - def _print_raw(self): + def _print_raw(self) -> None: """ Dump all the raw data Returns @@ -376,7 +394,7 @@ def _print_raw(self): data.to_csv(file, header=True, index=False) @abstractmethod - def _processMCNPdata(self, output): + def _processMCNPdata(self, output : MCNPSimOutput) -> dict[int, pd.DataFrame]: """ Given an mctal file object return the meaningful data extracted. Some post-processing on the data may be foreseen at this stage. @@ -395,7 +413,7 @@ def _processMCNPdata(self, output): return item @abstractmethod - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: """ Responsible for producing excel outputs Returns @@ -404,7 +422,7 @@ def _pp_excel_comparison(self): pass @abstractmethod - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. From 4c6b8666b90b077ce7ef8819be4dcfa563a8e9c5 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 10:25:45 +0000 Subject: [PATCH 42/53] Docstrings --- jade/expoutput.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index ddaa0131..8c5f586a 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -394,7 +394,7 @@ def _print_raw(self) -> None: data.to_csv(file, header=True, index=False) @abstractmethod - def _processMCNPdata(self, output : MCNPSimOutput) -> dict[int, pd.DataFrame]: + def _processMCNPdata(self, output : MCNPSimOutput) -> dict: """ Given an mctal file object return the meaningful data extracted. Some post-processing on the data may be foreseen at this stage. @@ -533,7 +533,7 @@ def _processMCNPdata(self, output: MCNPSimOutput): return res - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: """ Responsible for producing excel outputs """ From 587a1357ca550ccfae1db21207481d0e8c6c6e58 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 10:44:02 +0000 Subject: [PATCH 43/53] Fixed expout tests after merge --- tests/expoutput_test.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/expoutput_test.py b/tests/expoutput_test.py index e5d07ed9..b38f358f 100644 --- a/tests/expoutput_test.py +++ b/tests/expoutput_test.py @@ -149,13 +149,13 @@ def test_spectrumoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("spectra", "32c")][914][ + self.benchoutput_comp.raw_data[("spectra", "32c")][914][ "Energy" ].iloc[10] == 1.83000e-01 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("spectra", "00c")][924][ + self.benchoutput_comp.raw_data[("spectra", "00c")][924][ "Error" ].iloc[0] == 0.3742 @@ -206,11 +206,11 @@ def test_shieldingoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Al", "31c")][4]["Cells"].iloc[1] + self.benchoutput_comp.raw_data[("Al", "31c")][4]["Cells"].iloc[1] == 501 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Au", "00c")][4]["Error"].iloc[2] + self.benchoutput_comp.raw_data[("Au", "00c")][4]["Error"].iloc[2] == 0.0194 ) @@ -229,11 +229,11 @@ def test_shieldingoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Al", "31c")][4]["Cells"].iloc[1] + self.benchoutput_comp.raw_data[("Al", "31c")][4]["Cells"].iloc[1] == 612 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("TLD", "00c")][16]["Error"].iloc[2] + self.benchoutput_comp.raw_data[("TLD", "00c")][16]["Error"].iloc[2] == 0.0089 ) @@ -252,11 +252,11 @@ def test_shieldingoutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("In", "31c")][4]["Cells"].iloc[1] + self.benchoutput_comp.raw_data[("In", "31c")][4]["Cells"].iloc[1] == 924 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Rh", "00c")][4]["Error"].iloc[2] + self.benchoutput_comp.raw_data[("Rh", "00c")][4]["Error"].iloc[2] == 0.0049 ) @@ -352,11 +352,11 @@ def test_fnghcpboutput(self, session_mock: MockUpSession): "Raw_Data", ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("Al", "31c")][4]["Cells"].iloc[1] + self.benchoutput_comp.raw_data[("Al", "31c")][4]["Cells"].iloc[1] == 48002 ) assert ( - self.benchoutput_comp.raw_data["mcnp"][("H3", "00c")][84]["Error"].iloc[2] + self.benchoutput_comp.raw_data[("H3", "00c")][84]["Error"].iloc[2] == 0.0165 ) assert len(os.listdir(path2raw)) == 3 From 27d7a1d9798c3b59563da171330ac85ec746358b Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 11:41:39 +0000 Subject: [PATCH 44/53] More docstrings --- jade/expoutput.py | 85 +++++++++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 28 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 96cc3fa5..098d53a5 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -33,6 +33,7 @@ import pandas as pd from docx.shared import Inches from f4enix.input.MCNPinput import D1S_Input +from f4enix.output.mctal import Tally from scipy.interpolate import interp1d from tqdm import tqdm @@ -395,7 +396,7 @@ def _print_raw(self) -> None: data.to_csv(file, header=True, index=False) @abstractmethod - def _processMCNPdata(self, output : MCNPSimOutput) -> dict: + def _processMCNPdata(self, output : MCNPSimOutput): """ Given an mctal file object return the meaningful data extracted. Some post-processing on the data may be foreseen at this stage. @@ -469,7 +470,7 @@ class FNGOutput(ExperimentalOutput): ], } - def _processMCNPdata(self, output: MCNPSimOutput): + def _processMCNPdata(self, output: MCNPSimOutput) -> pd.DataFrame: """ Read All tallies and return them as a dictionary of DataFrames. This aslo needs to ovveride the raw data since unfortunately it appears @@ -537,6 +538,10 @@ def _processMCNPdata(self, output: MCNPSimOutput): def _pp_excel_comparison(self) -> None: """ Responsible for producing excel outputs + + Returns + ------- + None. """ # Dump the global C/E table ex_outpath = os.path.join(self.excel_path, self.testname + "_CE_tables.xlsx") @@ -570,7 +575,7 @@ def _pp_excel_comparison(self) -> None: ws = writer.sheets[folder] ws.write_string(0, 0, '"C/E (mean +/- σ)"') - def _get_collected_data(self, folder): + def _get_collected_data(self, folder : str) -> pd.DataFrame: """ Given a campaign it builds a single table containing all experimental and computational data available for the total SDDR tally. @@ -597,13 +602,13 @@ def _get_collected_data(self, folder): return df - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. Parameters ---------- - tmp_path : path + tmp_path : str or os.PathLike path to the temporary folder where to dump images. atlas : Atlas Object representing the plot Atlas. @@ -718,33 +723,42 @@ def _build_atlas(self, tmp_path, atlas): return atlas - def _read_exp_file(self, filepath): + def _read_exp_file(self, filepath : str | os.PathLike) -> pd.DataFrame: """ Override parent method since the separator for these experimental files is ";" + Parameters ---------- - filepath : str - string containing the path to the experimental file to be read + filepath : str | os.PathLike + string or os.PathLike containing the path to the experimental file to be read for comparison + Returns + ------- + pd.DataFrame + Pandas data frame containing experimental data """ return pd.read_csv(filepath, sep=";") class SpectrumOutput(ExperimentalOutput): - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. Parameters ---------- - tmp_path : str + tmp_path : str | os.PathLike path to the temporary folder containing the plots for the atlas - atlas : Atlas + atlas : at.Atlas Object representing the plot Atlas. + Returns + ------- + atlas : at.Atlas + Object representing the plot Atlas. """ self.tables = [] self.bench_conf = pd.read_excel(self.cnf_path) @@ -794,19 +808,24 @@ def _build_atlas(self, tmp_path, atlas): return atlas - def _get_tally_info(self, tally): + def _get_tally_info(self, tally : Tally) -> tuple[int, str, str]: """ Extracts and assigns information from the tally object, as well as information from the benchmark config variable - Args: - tally (Tally): JADE tally object - - Returns: - tallynum (int): Tally number of the tally being plotted - particle (str): Type of quantity being plotted on the X axis - quant + unit (str): Unit of quantity being plotted on the X axis + Parameters + ---------- + tally : Tally + F4Enix tally object + Returns + ------- + tallynum : int + Tally number of the tally being plotted + particle : str + Type of quantity being plotted on the X axis + quant + unit : str + Unit of quantity being plotted on the X axis """ tallynum = tally.tallyNumber particle = tally.particleList[np.where(tally.tallyParticles == 1)[0][0]] @@ -814,7 +833,7 @@ def _get_tally_info(self, tally): unit = self.bench_conf.loc[tallynum, "X Unit"] return tallynum, particle, quant + " [" + unit + "]" - def _define_title(self, input, quantity_CE): + def _define_title(self, input : str, quantity_CE : str) -> str: """Assigns the title for atlas plot Parameters @@ -837,9 +856,13 @@ def _define_title(self, input, quantity_CE): title = self.testname + " " + input + ", " + quantity_CE return title - def _dump_ce_table(self): + def _dump_ce_table(self) -> None: """ Generates the C/E table and dumps them as an .xlsx file + + Returns + ------- + None """ final_table = pd.concat(self.tables) skipcol_global = 0 @@ -930,7 +953,7 @@ def _dump_ce_table(self): return - def _data_collect(self, input, tallynum, quantity_CE, e_intervals): + def _data_collect(self, input : str, tallynum : str, quantity_CE : str, e_intervals : list) -> tuple[list, str]: """Collect data for C/E tables Parameters @@ -999,26 +1022,32 @@ def _data_collect(self, input, tallynum, quantity_CE, e_intervals): pass return data, x_lab - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: + """ + Excel is actually printed by the build atlas in this case + + Returns + ------- + None + """ # Excel is actually printed by the build atlas in this case pass - def _processMCNPdata(self, output): + def _processMCNPdata(self, output : MCNPSimOutput) -> dict: """ given the mctal file the lethargy flux and energies are returned both for the neutron and photon tally Parameters ---------- - output : MCNPoutput + output : MCNPSimOutput object representing the MCNP output. - Returns ------- - res : dic + res : dict contains the extracted lethargy flux and energies. - """ + res = {} # Read tally energy binned fluxes for tallynum, data in output.tallydata.items(): From 950778f1ca6e7eed9d46461ee1db8e23eecf27c0 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 12:04:47 +0000 Subject: [PATCH 45/53] More docstrings --- jade/expoutput.py | 72 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 19 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 098d53a5..e2a4ebcc 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -1047,7 +1047,7 @@ def _processMCNPdata(self, output : MCNPSimOutput) -> dict: res : dict contains the extracted lethargy flux and energies. """ - + res = {} # Read tally energy binned fluxes for tallynum, data in output.tallydata.items(): @@ -1067,14 +1067,15 @@ def _processMCNPdata(self, output : MCNPSimOutput) -> dict: return res - def _parse_data_df(self, data, output, x_axis, tallynum): - """Read information from data DataFrame + def _parse_data_df(self, data : pd.DataFrame, output : MCNPSimOutput, x_axis : str, tallynum : str) -> tuple[list, list, list]: + """ + Read information from data DataFrame Parameters ---------- - data : DataFrame + data : pd.DataFrame DataFrame containing all tally data for an output - output : JADE MCNPoutput + output : MCNPSimOutput MCNP output object generated by MCNP parser x_axis : str X axis title @@ -1127,7 +1128,7 @@ def _parse_data_df(self, data, output, x_axis, tallynum): def _get_tablevalues( - df, interpolator, x="Energy [MeV]", y="C", e_intervals=[0.1, 1, 5, 10, 20] + df : pd.DataFrame, interpolator : function, x : str = "Energy [MeV]", y : str = "C", e_intervals : list = [0.1, 1, 5, 10, 20] ): """ Given the benchmark and experimental results returns a df to compile the @@ -1181,10 +1182,22 @@ def _get_tablevalues( class TiaraOutput(ExperimentalOutput): - def _processMCNPdata(self, output): + def _processMCNPdata(self, output : MCNPSimOutput) -> None: + """ + Used to override parent function as this is not required. + + Parameters + ---------- + output : MCNPSimOutput + MCNP simulation output object + + Returns + ------- + None + """ return None - def _case_tree_df_build(self): + def _case_tree_df_build(self) -> pd.DataFrame: """ Builds a dataframe containing library, source energy, shield material and thickness for each benchmark case, with all tallies for each case @@ -1228,7 +1241,7 @@ def _case_tree_df_build(self): # Return complete dataframe return pd.concat(to_concat) - def _exp_comp_case_check(self, indexes): + def _exp_comp_case_check(self, indexes : list) -> None: """ Removes from mcnp case dataframe experimental data which don't have correspondent mcnp outputs and removes mcnp outputs without @@ -1249,7 +1262,7 @@ def _exp_comp_case_check(self, indexes): self.case_tree_df = self.case_tree_df.set_index(indexes) return - def _get_conv_df(self, df): + def _get_conv_df(self, df : pd.DataFrame) -> pd.DataFrame: """ Adds extra columns to the dataframe containing the maximum and average errors of the tallies @@ -1279,10 +1292,14 @@ def _get_conv_df(self, df): class TiaraFCOutput(TiaraOutput): - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: """ Builds dataframe from computational output comparable to experimental data and generates the excel comparison + + Returns + ------- + None """ # Get computational data structure for each library @@ -1403,9 +1420,13 @@ def _pp_excel_comparison(self): new_dataframe.to_excel(writer, sheet_name=sheet_name) conv_df.to_excel(writer, sheet_name=sheet_name, startrow=18) - def _read_exp_results(self): + def _read_exp_results(self) -> None: """ Reads and manipulates conderc Excel file + + Returns + ------- + None """ # Read experimental data from CONDERC Excel file @@ -1462,18 +1483,24 @@ def _read_exp_results(self): # Assign exp data variable self.exp_data = exp_data - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the - atlas are handled elsewhere. + atlas are handled elsewhere. Parameters ---------- - tmp_path : str + tmp_path : str | os.PathLike path to the temporary folder containing the plots for the atlas - atlas : Atlas + atlas : at.Atlas + Object representing the plot Atlas. + + Returns + ------- + atlas : at.Atlas Object representing the plot Atlas. """ + # Set plot and axes details unit = "-" quantity = ["On-axis reaction rate", "Off-axis 20 cm reaction rate"] @@ -1603,9 +1630,13 @@ def _build_atlas(self, tmp_path, atlas): class TiaraBSOutput(TiaraOutput): - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: """ This method prints Tiara C/E tables for Bonner Spheres detectors + + Returns + ------- + None """ # Get main dataframe with computational data of all cases @@ -1687,10 +1718,13 @@ def _pp_excel_comparison(self): new_dataframe.to_excel(writer, sheet_name=sheet_name) conv_df.to_excel(writer, sheet_name=sheet_name, startrow=12) - def _read_exp_results(self): + def _read_exp_results(self) -> None: """ Reads and manipulates conderc Excel file + Returns + ------- + None """ # Get experimental data filepath filepath = os.path.join( @@ -1733,7 +1767,7 @@ def _read_exp_results(self): # Save experimental data self.exp_data = exp_data - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. From 33da2c856959380fef73dbcb2e698751d505f908 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 13:52:50 +0000 Subject: [PATCH 46/53] Black format and docstring expout --- jade/expoutput.py | 289 +++++++++++++++++++++++----------------- jade/postprocess.py | 2 +- tests/expoutput_test.py | 2 +- 3 files changed, 171 insertions(+), 122 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index e2a4ebcc..02ae9a38 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -222,7 +222,7 @@ def _extract_single_output( results_path : str | os.PathLike Path to simulations results. folder : str - Sub-folder for multiple run case. + Sub-folder for multiple run case. lib : str Test library. @@ -353,7 +353,7 @@ def _read_exp_results(self) -> None: self.exp_results = exp_results @staticmethod - def _read_exp_file(filepath : str | os.PathLike) -> pd.DataFrame: + def _read_exp_file(filepath: str | os.PathLike) -> pd.DataFrame: """ Default way of reading a csv file Parameters @@ -396,7 +396,7 @@ def _print_raw(self) -> None: data.to_csv(file, header=True, index=False) @abstractmethod - def _processMCNPdata(self, output : MCNPSimOutput): + def _processMCNPdata(self, output: MCNPSimOutput): """ Given an mctal file object return the meaningful data extracted. Some post-processing on the data may be foreseen at this stage. @@ -424,7 +424,7 @@ def _pp_excel_comparison(self) -> None: pass @abstractmethod - def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. @@ -575,7 +575,7 @@ def _pp_excel_comparison(self) -> None: ws = writer.sheets[folder] ws.write_string(0, 0, '"C/E (mean +/- σ)"') - def _get_collected_data(self, folder : str) -> pd.DataFrame: + def _get_collected_data(self, folder: str) -> pd.DataFrame: """ Given a campaign it builds a single table containing all experimental and computational data available for the total SDDR tally. @@ -602,7 +602,7 @@ def _get_collected_data(self, folder : str) -> pd.DataFrame: return df - def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. @@ -723,7 +723,7 @@ def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atl return atlas - def _read_exp_file(self, filepath : str | os.PathLike) -> pd.DataFrame: + def _read_exp_file(self, filepath: str | os.PathLike) -> pd.DataFrame: """ Override parent method since the separator for these experimental files is ";" @@ -743,7 +743,7 @@ def _read_exp_file(self, filepath : str | os.PathLike) -> pd.DataFrame: class SpectrumOutput(ExperimentalOutput): - def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. @@ -808,7 +808,7 @@ def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atl return atlas - def _get_tally_info(self, tally : Tally) -> tuple[int, str, str]: + def _get_tally_info(self, tally: Tally) -> tuple[int, str, str]: """ Extracts and assigns information from the tally object, as well as information from the benchmark config variable @@ -833,7 +833,7 @@ def _get_tally_info(self, tally : Tally) -> tuple[int, str, str]: unit = self.bench_conf.loc[tallynum, "X Unit"] return tallynum, particle, quant + " [" + unit + "]" - def _define_title(self, input : str, quantity_CE : str) -> str: + def _define_title(self, input: str, quantity_CE: str) -> str: """Assigns the title for atlas plot Parameters @@ -953,7 +953,9 @@ def _dump_ce_table(self) -> None: return - def _data_collect(self, input : str, tallynum : str, quantity_CE : str, e_intervals : list) -> tuple[list, str]: + def _data_collect( + self, input: str, tallynum: str, quantity_CE: str, e_intervals: list + ) -> tuple[list, str]: """Collect data for C/E tables Parameters @@ -1033,7 +1035,7 @@ def _pp_excel_comparison(self) -> None: # Excel is actually printed by the build atlas in this case pass - def _processMCNPdata(self, output : MCNPSimOutput) -> dict: + def _processMCNPdata(self, output: MCNPSimOutput) -> dict: """ given the mctal file the lethargy flux and energies are returned both for the neutron and photon tally @@ -1067,7 +1069,9 @@ def _processMCNPdata(self, output : MCNPSimOutput) -> dict: return res - def _parse_data_df(self, data : pd.DataFrame, output : MCNPSimOutput, x_axis : str, tallynum : str) -> tuple[list, list, list]: + def _parse_data_df( + self, data: pd.DataFrame, output: MCNPSimOutput, x_axis: str, tallynum: str + ) -> tuple[list, list, list]: """ Read information from data DataFrame @@ -1128,7 +1132,11 @@ def _parse_data_df(self, data : pd.DataFrame, output : MCNPSimOutput, x_axis : s def _get_tablevalues( - df : pd.DataFrame, interpolator : function, x : str = "Energy [MeV]", y : str = "C", e_intervals : list = [0.1, 1, 5, 10, 20] + df: pd.DataFrame, + interpolator: function, + x: str = "Energy [MeV]", + y: str = "C", + e_intervals: list = [0.1, 1, 5, 10, 20], ): """ Given the benchmark and experimental results returns a df to compile the @@ -1182,7 +1190,7 @@ def _get_tablevalues( class TiaraOutput(ExperimentalOutput): - def _processMCNPdata(self, output : MCNPSimOutput) -> None: + def _processMCNPdata(self, output: MCNPSimOutput) -> None: """ Used to override parent function as this is not required. @@ -1241,7 +1249,7 @@ def _case_tree_df_build(self) -> pd.DataFrame: # Return complete dataframe return pd.concat(to_concat) - def _exp_comp_case_check(self, indexes : list) -> None: + def _exp_comp_case_check(self, indexes: list) -> None: """ Removes from mcnp case dataframe experimental data which don't have correspondent mcnp outputs and removes mcnp outputs without @@ -1262,7 +1270,7 @@ def _exp_comp_case_check(self, indexes : list) -> None: self.case_tree_df = self.case_tree_df.set_index(indexes) return - def _get_conv_df(self, df : pd.DataFrame) -> pd.DataFrame: + def _get_conv_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Adds extra columns to the dataframe containing the maximum and average errors of the tallies @@ -1483,10 +1491,10 @@ def _read_exp_results(self) -> None: # Assign exp data variable self.exp_data = exp_data - def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the - atlas are handled elsewhere. + atlas are handled elsewhere. Parameters ---------- @@ -1767,16 +1775,21 @@ def _read_exp_results(self) -> None: # Save experimental data self.exp_data = exp_data - def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atlas: + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. Parameters ---------- - tmp_path : str + tmp_path : str | os.PathLike path to the temporary folder containing the plots for the atlas - atlas : Atlas + atlas : at.Atlas + Object representing the plot Atlas. + + Returns + ------- + atlas : at.Atlas Object representing the plot Atlas. """ # Set plot axes @@ -1836,12 +1849,28 @@ def _build_atlas(self, tmp_path : str | os.PathLike, atlas : at.Atlas) -> at.Atl class ShieldingOutput(ExperimentalOutput): - def _processMCNPdata(self, output): + def _processMCNPdata(self, output: MCNPSimOutput) -> None: + """ + Used to override parent function as this is not required. + + Parameters + ---------- + output : MCNPSimOutput + MCNP simulation output object + + Returns + ------- + None + """ return None - def _pp_excel_comparison(self): + def _pp_excel_comparison(self) -> None: """ This method prints C/E tables for shielding benchmark comparisons + + Returns + ------- + None. """ # FNG SiC specific corrections/normalisations fngsic_k = [0.212, 0.204, 0.202, 0.202] # Neutron sensitivity of TL detectors @@ -1892,9 +1921,7 @@ def _pp_excel_comparison(self): if self.testname == "FNG-SiC": # Neutron dose Dn = ( - self.raw_data[t][16]["Value"].values[ - : len(x) - ] + self.raw_data[t][16]["Value"].values[: len(x)] ) * fngsic_norm Dn_multiplied = [ value * constant @@ -1902,17 +1929,13 @@ def _pp_excel_comparison(self): ] # Photon dose Dp = ( - self.raw_data[t][26]["Value"].values[ - : len(x) - ] + self.raw_data[t][26]["Value"].values[: len(x)] ) * fngsic_norm # Sum neutron and photon dose with neutron sensitivity as a function of depth Dt = [sum(pair) for pair in zip(Dn_multiplied, Dp)] vals = Dt else: - vals = self.raw_data[t][6]["Value"].values[ - : len(x) - ] + vals = self.raw_data[t][6]["Value"].values[: len(x)] df_tab[idx_col] = vals elif idx_col[1] == "C/E Error": if mat != "TLD": @@ -1932,9 +1955,7 @@ def _pp_excel_comparison(self): ) ) else: - errs = self.raw_data[t][6]["Error"].values[ - : len(x) - ] + errs = self.raw_data[t][6]["Error"].values[: len(x)] vals1 = np.square(errs) vals2 = np.square( exp_data_df.loc[:, "Error"].to_numpy() / 100 @@ -1949,9 +1970,7 @@ def _pp_excel_comparison(self): if self.testname == "FNG-SiC": # Neutron dose Dn = ( - self.raw_data[t][16]["Value"].values[ - : len(x) - ] + self.raw_data[t][16]["Value"].values[: len(x)] ) * fngsic_norm Dn_multiplied = [ value * constant @@ -1959,9 +1978,7 @@ def _pp_excel_comparison(self): ] # Photon dose Dp = ( - self.raw_data[t][26]["Value"].values[ - : len(x) - ] + self.raw_data[t][26]["Value"].values[: len(x)] ) * fngsic_norm # Sum neutron and photon dose with neutron sensitivity as a function of depth Dt = [sum(pair) for pair in zip(Dn_multiplied, Dp)] @@ -1982,16 +1999,21 @@ def _pp_excel_comparison(self): df_tab.to_excel(writer, sheet_name=sheet_name) conv_df.to_excel(writer, sheet_name=sheet_name, startrow=18) - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. Parameters ---------- - tmp_path : str + tmp_path : str | os.PathLike path to the temporary folder containing the plots for the atlas - atlas : Atlas + atlas : at.Atlas + Object representing the plot Atlas. + + Returns + ------- + atlas : at.Atlas Object representing the plot Atlas. """ # FNG SiC specific corrections/normalisations @@ -2046,25 +2068,19 @@ def _build_atlas(self, tmp_path, atlas): if self.testname == "FNG-SiC": # Neutron dose Dn = ( - self.raw_data[(material, lib)][16]["Value"].values[ - : len(x) - ] + self.raw_data[(material, lib)][16]["Value"].values[: len(x)] ) * fngsic_norm Dn_multiplied = [ value * constant for value, constant in zip(Dn, fngsic_k) ] # Photon dose Dp = ( - self.raw_data[(material, lib)][26]["Value"].values[ - : len(x) - ] + self.raw_data[(material, lib)][26]["Value"].values[: len(x)] ) * fngsic_norm # Sum neutron and photon dose with neutron sensitivity as a function of depth v = [sum(pair) for pair in zip(Dn_multiplied, Dp)] else: - v = self.raw_data[(material, lib)][6]["Value"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][6]["Value"].values[: len(x)] y.append(v) if material != "TLD": v = self.raw_data[(material, lib)][4]["Error"].values[: len(x)] @@ -2072,20 +2088,18 @@ def _build_atlas(self, tmp_path, atlas): if self.testname == "FNG-SiC": v = np.sqrt( np.square( - self.raw_data[(material, lib)][16][ - "Error" - ].values[: len(x)] + self.raw_data[(material, lib)][16]["Error"].values[ + : len(x) + ] ) + np.square( - self.raw_data[(material, lib)][26][ - "Error" - ].values[: len(x)] + self.raw_data[(material, lib)][26]["Error"].values[ + : len(x) + ] ) ) else: - v = self.raw_data[(material, lib)][6]["Error"].values[ - : len(x) - ] + v = self.raw_data[(material, lib)][6]["Error"].values[: len(x)] err.append(v) # Append computational data to data list(to be sent to plotter) data_comp = {"x": x, "y": y, "err": err, "ylabel": ylabel} @@ -2107,9 +2121,22 @@ def _build_atlas(self, tmp_path, atlas): return atlas - def _get_conv_df(self, mat, size): - # TODO Replace when other transport codes implemented. - code = "mcnp" + def _get_conv_df(self, mat: str, size: int) -> pd.DataFrame: + """ + Method to calculate average and maximum uncertainties + + Parameters + ---------- + mat : str + String denoting material + size : int + Integer denoting size of array + + Returns + ------- + conv_df : pd.DataFrame + Dataframe containing Max Error and Average Error columns + """ conv_df = pd.DataFrame() for lib in self.lib[1:]: if mat != "TLD": @@ -2118,9 +2145,7 @@ def _get_conv_df(self, mat, size): else: if self.testname == "FNG-SiC": v = np.sqrt( - np.square( - self.raw_data[(mat, lib)][16]["Error"].values[:size] - ) + np.square(self.raw_data[(mat, lib)][16]["Error"].values[:size]) + np.square( self.raw_data[(mat, lib)][26]["Error"].values[:size] ) @@ -2128,12 +2153,8 @@ def _get_conv_df(self, mat, size): max = np.max(v) avg = np.mean(v) else: - max = ( - self.raw_data[(mat, lib)][6]["Error"].values[:size].max() - ) - avg = ( - self.raw_data[(mat, lib)][6]["Error"].values[:size].mean() - ) + max = self.raw_data[(mat, lib)][6]["Error"].values[:size].max() + avg = self.raw_data[(mat, lib)][6]["Error"].values[:size].mean() library = self.session.conf.get_lib_name(lib) conv_df.loc["Max Error", library] = max conv_df.loc["Average Error", library] = avg @@ -2141,18 +2162,22 @@ def _get_conv_df(self, mat, size): class MultipleSpectrumOutput(SpectrumOutput): - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ Fill the atlas with the customized plots. Creation and saving of the atlas are handled elsewhere. Parameters ---------- - tmp_path : str + tmp_path : str | os.PathLike path to the temporary folder containing the plots for the atlas - atlas : Atlas + atlas : at.Atlas Object representing the plot Atlas. + Returns + ------- + atlas : at.Atlas + Object representing the plot Atlas. """ self.tables = [] self.groups = pd.read_excel(self.cnf_path) @@ -2166,7 +2191,9 @@ def _build_atlas(self, tmp_path, atlas): return atlas - def _plot_tally_group(self, group, tmp_path, atlas): + def _plot_tally_group( + self, group: list, tmp_path: str | os.PathLike, atlas: at.Atlas + ) -> at.Atlas: """ Plots tallies for a given group of outputs and add to Atlas object @@ -2175,7 +2202,7 @@ def _plot_tally_group(self, group, tmp_path, atlas): group : list list of groups in the experimental benchmark object, outputs are grouped by material, several tallies for each material/group - tmp_path : str + tmp_path : str or os.PathLike path to temporary atlas plot folder atlas : JADE Atlas Atlas object @@ -2237,9 +2264,9 @@ def _plot_tally_group(self, group, tmp_path, atlas): atlas.insert_img(img_path, width=Inches(9)) return atlas - def _define_title(self, input, particle, quantity): + def _define_title(self, input: str, particle: str, quantity: str) -> str: """ - determines which benchmark is being compared and assigns title + Determines which benchmark is being compared and assigns title accordinly Parameters @@ -2289,14 +2316,30 @@ def _define_title(self, input, particle, quantity): return title -class fnghcpboutput(ExperimentalOutput): +class FNGCPBOutput(ExperimentalOutput): + def _processMCNPdata(self, output: MCNPSimOutput) -> None: + """ + Used to override parent function as this is not required. - def _processMCNPdata(self, output): + Parameters + ---------- + output : MCNPSimOutput + MCNP simulation output object + Returns + ------- + None + """ return None - def _pp_excel_comparison(self): - """Produces the Excel document for comparison to experiment.""" + def _pp_excel_comparison(self) -> None: + """ + This method prints C/E tables for shielding benchmark comparisons + + Returns + ------- + None. + """ lib_names_dict = {} column_names = [] @@ -2345,34 +2388,24 @@ def _pp_excel_comparison(self): t = (mat, lib_names_dict[idx_col[0]]) if idx_col[1] == "Value": if mat != "H3": - vals = self.raw_data[t][4]["Value"].values[ - : len(x) - ] + vals = self.raw_data[t][4]["Value"].values[: len(x)] else: # Total activity vals = [] for i in range(4): vals.extend( - ( - self.raw_data[t][84]["Value"].values[ - i::4 - ] - ) + (self.raw_data[t][84]["Value"].values[i::4]) ) df_tab[idx_col] = vals elif idx_col[1] == "C/E Error": if mat != "H3": - errs = self.raw_data[t][4]["Error"].values[ - : len(x) - ] + errs = self.raw_data[t][4]["Error"].values[: len(x)] else: errs = [] for i in range(4): - yerr = self.raw_data[t][84]["Error"].values[ - i::4 - ] + yerr = self.raw_data[t][84]["Error"].values[i::4] errs.extend(yerr) vals1 = np.square(errs) @@ -2385,9 +2418,7 @@ def _pp_excel_comparison(self): # Calculate C/E value else: if mat != "H3": - vals1 = self.raw_data[t][4]["Value"].values[ - : len(x) - ] + vals1 = self.raw_data[t][4]["Value"].values[: len(x)] else: vals1 = [] for i in range(4): @@ -2414,11 +2445,23 @@ def _pp_excel_comparison(self): conv_df.to_excel(writer, sheet_name=sheet_name, startrow=55) # Close the Pandas Excel writer object and output the Excel file - def _build_atlas(self, tmp_path, atlas): + def _build_atlas(self, tmp_path: str | os.PathLike, atlas: at.Atlas) -> at.Atlas: """ - Build the Atlas (PDF) plots. See ExperimentalOutput documentation + Fill the atlas with the customized plots. Creation and saving of the + atlas are handled elsewhere. + + Parameters + ---------- + tmp_path : str | os.PathLike + path to the temporary folder containing the plots for the atlas + atlas : at.Atlas + Object representing the plot Atlas. + + Returns + ------- + atlas : at.Atlas + Object representing the plot Atlas. """ - code = "mcnp" for material in tqdm(self.inputs): # Tritium Activity if material == "H3": @@ -2447,14 +2490,10 @@ def _build_atlas(self, tmp_path, atlas): # y = [] # err = [] # Total tritium production Li6 + Li7 - ycalc = self.raw_data[(material, lib)][84][ - "Value" - ].values[i::4] + ycalc = self.raw_data[(material, lib)][84]["Value"].values[i::4] yerr = np.square( - self.raw_data[(material, lib)][84]["Error"].values[ - i::4 - ] + self.raw_data[(material, lib)][84]["Error"].values[i::4] ) y = ycalc @@ -2509,14 +2548,10 @@ def _build_atlas(self, tmp_path, atlas): y = [] err = [] - ycalc = self.raw_data[(material, lib)][4]["Value"].values[ - : len(x) - ] + ycalc = self.raw_data[(material, lib)][4]["Value"].values[: len(x)] y.append(ycalc) - yerr = self.raw_data[(material, lib)][4]["Error"].values[ - : len(x) - ] + yerr = self.raw_data[(material, lib)][4]["Error"].values[: len(x)] err.append(yerr) # Append computational data to data list(to be sent to plotter) @@ -2538,9 +2573,23 @@ def _build_atlas(self, tmp_path, atlas): atlas.insert_img(img_path) return atlas - def _get_conv_df(self, mat, size): + def _get_conv_df(self, mat: str, size: int) -> pd.DataFrame: + """ + Method to calculate average and maximum uncertainties + + Parameters + ---------- + mat : str + String denoting material + size : int + Integer denoting size of array + + Returns + ------- + conv_df : pd.DataFrame + Dataframe containing Max Error and Average Error columns + """ conv_df = pd.DataFrame() - code = "mcnp" for lib in self.lib[1:]: if mat != "H3": max = self.raw_data[(mat, lib)][4]["Error"].values[:size].max() diff --git a/jade/postprocess.py b/jade/postprocess.py index 7acc50ba..af9e8a32 100644 --- a/jade/postprocess.py +++ b/jade/postprocess.py @@ -187,7 +187,7 @@ def _get_output(action, code, testname, lib, session): elif testname == "FNG-HCPB": if action == "compare": - out = expo.fnghcpboutput(lib, testname, session, multiplerun=True) + out = expo.FNGCPBOutput(lib, testname, session, multiplerun=True) elif action == "pp": print(exp_pp_message) return False diff --git a/tests/expoutput_test.py b/tests/expoutput_test.py index b38f358f..9d30e7e3 100644 --- a/tests/expoutput_test.py +++ b/tests/expoutput_test.py @@ -338,7 +338,7 @@ def test_fnghcpboutput(self, session_mock: MockUpSession): testname = "FNG-HCPB" os.makedirs(session_mock.path_comparison) os.makedirs(session_mock.path_single) - self.benchoutput_comp = expoutput.fnghcpboutput( + self.benchoutput_comp = expoutput.FNGCPBOutput( ["32c", "31c", "00c"], code, testname, session_mock, multiplerun=True ) self.benchoutput_comp.compare() From b7e1e1d5e604762d5eeaf4fef17139650c41abaa Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 14:59:58 +0000 Subject: [PATCH 47/53] Docstrings for output.py, removed ExcelOutputSheet object --- jade/output.py | 419 +++++++++---------------------------------------- 1 file changed, 77 insertions(+), 342 deletions(-) diff --git a/jade/output.py b/jade/output.py index 92601cef..6cf8955b 100644 --- a/jade/output.py +++ b/jade/output.py @@ -214,15 +214,29 @@ def _get_output_files(self, results_path: str | os.PathLike) -> list: List of simulation results files. """ - #TODO Output types @abc.abstractmethod - def parse_output_data(self, results_path : str | os.PathLike): + def parse_output_data(self, results_path : str | os.PathLike) -> tuple[AbstractSimOutput, list, list]: """ - To be executed when a comparison is requested + Abstract function for retrieving simulation output data, tally numbers and tally comments. + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results + + Returns + ------- + sim_output : AbstractSimOutput + Simulation output object, specific to the code being proccessed. This should inherit from AbstractSimOutput. + tally_numbers : list + List containing all tally numbers in the simulation. + tally_comments: list + List containing all tally comments in the simulation. """ def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: - """Retrieve the metadata from the run + """ + Retrieve the metadata from the run Parameters ---------- @@ -250,7 +264,7 @@ def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: return metadata - def single_postprocess(self): + def single_postprocess(self) -> None: """ Execute the full post-processing of a single library (i.e. excel, raw data and atlas) @@ -541,7 +555,8 @@ def _reorder_df(df : pd.DataFrame, x_set : list) -> pd.DataFrame: return df def _print_raw(self) -> None: - """Method to print raw data to json. + """ + Method to print raw data to json. Returns ------- @@ -556,7 +571,13 @@ def _print_raw(self) -> None: json.dump(self.metadata, outfile, indent=4) def _generate_single_excel_output(self) -> None: - # Get excel configuration + """ + Generation of single Excel sheets + + Returns + ------- + None + """ self.outputs = {} self.results = {} self.errors = {} @@ -736,7 +757,13 @@ def _generate_single_excel_output(self) -> None: exsupp.single_excel_writer(outpath, self.lib, self.testname, outputs, stats) def _generate_comparison_excel_output(self) -> None: - # Get excel configuration + """ + Generation of comparsion Excel sheets + + Returns + ------- + None + """ self.outputs = {} self.results = {} self.errors = {} @@ -998,7 +1025,7 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: ) return None - def _get_output_files(self, results_path: str | os.PathLike) -> tuple: + def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -1006,8 +1033,6 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: ---------- results_path : str or path path where the results are contained. - code : str - code that generated the output ('mcnp' or 'openmc') Raises ------ @@ -1020,7 +1045,7 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: path to the first file file2 : path path to the second file - file2 : path + file3 : path path to the third file (only for mcnp meshtal) """ @@ -1047,7 +1072,24 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: return file1, file2, file3 - def parse_output_data(self, results_path): + def parse_output_data(self, results_path : str | os.PathLike) -> tuple[MCNPSimOutput, list, list]: + """ + Function for retrieving MCNP simulation output data, tally numbers and tally comments. + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results + + Returns + ------- + sim_output : MCNPSimOutput + Simulation output object, specific to the code being proccessed. This should inherit from AbstractSimOutput. + tally_numbers : list + List containing all tally numbers in the simulation. + tally_comments: list + List containing all tally comments in the simulation. + """ mfile, ofile, meshtalfile = self._get_output_files(results_path) sim_output = MCNPSimOutput(mfile, ofile, meshtal_file=meshtalfile) tally_numbers = sim_output.tally_numbers @@ -1077,7 +1119,7 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: version = statepoint.version return version - def _get_output_files(self, results_path: str | os.PathLike) -> tuple: + def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -1096,9 +1138,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: Returns ------- file1 : path - path to the first file + path to the output file file2 : path - path to the second file (only for mcnp) + path to the StatePoint file """ file1 = None @@ -1148,7 +1190,17 @@ def parse_output_data( class AbstractSimOutput: tallydata = None totalbin = None - def __init__(self): + def __init__(self) -> None: + """ + Abstract class to enforce inclusion of tallydata and totalbin dictionaries in children simulation output classes + + Raises + ------ + NotImplementedError + if tallydata dictionary is not set in child class + NotImplementedError + if totalbin dictionary is not set in child class + """ if not isinstance(self.tallydata, dict): raise NotImplementedError if not isinstance(self.totalbin, dict): @@ -1160,7 +1212,7 @@ def __init__( mctal_file: str | os.PathLike, output_file: str | os.PathLike, meshtal_file: str | os.PathLike | None = None, - ): + ) -> None: """ Class representing all outputs coming from MCNP run @@ -1263,7 +1315,8 @@ def __init__( class OpenMCSimOutput(AbstractSimOutput): def __init__(self, output_path: str | os.PathLike) -> None: - """Class representing all outputs coming from OpenMC run + """ + Class representing all outputs coming from OpenMC run Parameters ---------- @@ -1284,7 +1337,8 @@ def __init__(self, output_path: str | os.PathLike) -> None: def _create_dataframes( self, tallies: dict ) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: - """Function to create dataframes in JADE format from OpenMC dataframes. + """ + Function to create dataframes in JADE format from OpenMC dataframes. Parameters ---------- @@ -1350,7 +1404,8 @@ def _create_dataframes( return tallydata, totalbin def process_tally(self) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame]]: - """Function to retrieve OpenMC tally dataframes, and re-format for JADE. + """ + Function to retrieve OpenMC tally dataframes, and re-format for JADE. Returns ------- @@ -1363,327 +1418,7 @@ def process_tally(self) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin - -class ExcelOutputSheet: - # Common variables - _starting_free_row = 10 - - def __init__(self, template, outpath): - """ - Excel workbook containing the post-processed results - - Parameters - ---------- - template : path like object - path to the sheet template. - outpath : path like object - dump path for the excel. - - Returns - ------- - None. - - """ - self.outpath = outpath # Path to the excel file - # Open template - shutil.copy(template, outpath) - # self.app = xw.App(visible=False) - self.wb = self.app.books.open(outpath) - # The first open row in current ws - self.free_row = self._starting_free_row - self.ws_free_rows = {} - self.current_ws = None - - def _switch_ws(self, ws_name): - """ - Change active worksheet without loosing parameters informations. - - Parameters - ---------- - ws_name : str - Worksheet name. - - Returns - ------- - ws : xlwings.Sheet - Excel worksheet. - - """ - # Adjourn free row sheet - if self.current_ws is not None: - self.ws_free_rows[self.current_ws.name] = self.free_row - - # Select new sheet - ws = self.wb.sheets[ws_name] - self.current_ws = ws - try: - self.free_row = self.ws_free_rows[ws_name] - except KeyError: - self.free_row = self._starting_free_row - - return ws - - def insert_df( - self, - startcolumn, - df, - ws, - startrow=None, - header=None, - print_index=True, - idx_format="0", - cols_head_size=12, - values_format=None, - ): - """ - Insert a DataFrame (df) into a Worksheet (ws) using xlwings. - - Parameters - ---------- - startcolumn : int or str - Starting column where to insert the DataFrame. It can be expressed - both as an integer as a letter in Excel fashion. - df : pandas.DataFrame - DataFrame to insert in the excel sheet - ws : str - name of the Excel worksheet where to put the DataFrame. - startrow : int - starting row where to put the DataFrame. Default is None that - triggers the use of the memorized first free row in the excel sheet - header : tuple (str, value) - contains the tag of the header and the header value. DEAFAULT is - None - print_index : bool - if True the DataFrame index is printed. DEAFAULT is True. - idx_format : str - how to format the index values. DEAFAULT is '0' (integer) - cols_head_size : int - Font size for columns header. DEAFAULT is 12 - values_format : str - how to format the values. DEAFAULT is None - - Returns - ------- - None - - """ - # Select the worksheet as first thing in order to have the correct - # Free rows computed - ws = self._switch_ws(ws) - - if startrow is None: - startrow = self.free_row - # adjourn free row - add_space = 3 # Includes header - self.free_row = self.free_row + len(df) + add_space - - # Start column can be provided as a letter or number (up to Z) - if isinstance(startcolumn, str): - startcolumn = ord(startcolumn.lower()) - 96 - - anchor = (startrow, startcolumn) - header_anchor_tag = (startrow, 1) - header_anchor = (startrow + 1, 1) - - try: - ws.range(anchor).options(index=print_index, header=True).value = df - rng = ((startrow + 1, startcolumn), (startrow + 1 + len(df), startcolumn)) - # Format values if requested - if values_format is not None: - rng_values = ( - (startrow + 1, startcolumn + 1), - (startrow + 1 + len(df), startcolumn + 1 + len(df.columns)), - ) - ws.range(*rng_values).number_format = values_format - - # Formatting - ws.range(*rng).number_format = idx_format # idx formatting - # Columns headers - anchor_columns = (anchor, (startrow, startcolumn + len(df.columns))) - ws.range(*anchor_columns).api.Font.Size = cols_head_size - ws.range(*anchor_columns).api.Font.Bold = True - ws.range(*anchor_columns).color = (236, 236, 236) - - if header is not None: - ws.range(header_anchor_tag).value = header[0] - ws.range(header_anchor_tag).api.Font.Size = cols_head_size - ws.range(header_anchor_tag).api.Font.Bold = True - ws.range(header_anchor_tag).color = (236, 236, 236) - - ws.range(header_anchor).value = header[1] - ws.range(header_anchor).api.Font.Size = cols_head_size - ws.range(header_anchor_tag).api.Font.Bold = True - ws.range(header_anchor_tag).color = (236, 236, 236) - - except Exception as e: - print(vars(e)) - print(header) - print(df) - - def insert_cutted_df( - self, - startcolumn, - df, - ws, - ylim, - startrow=None, - header=None, - index_name=None, - cols_name=None, - index_num_format="0", - values_format=None, - ): - """ - Insert a DataFrame in the excel cutting its columns - - Parameters - ---------- - startcolumn : str/int - Excel column where to put the first DF column. - df : pd.DataFrame - global DF to insert. - ws : str - Excel worksheet where to insert the DF. - ylim : int - limit of columns to use to cut the DF. - startrow : int, optional - initial Excel row. The default is None, - the first available is used. - header : tuple (str, value) - contains the tag of the header and the header value. DEAFAULT is - None - index_name : str - Name of the Index. DEAFAULT is None - cols_name : str - Name of the columns. DEFAULT is None - index_num_format: str - format of index numbers - values_format : str - how to format the values. DEAFAULT is None - - Returns - ------- - None. - - """ - # First of all we need to switch ws or all calculation of free row - # will be wrongly affected - self._switch_ws(ws) - - res_len = len(df.columns) - start_col = 0 - ylim = int(ylim) - # ws = self.wb.sheets[ws] - # Decode columns for index and columns names - if isinstance(startcolumn, int): - index_col = string.ascii_uppercase[startcolumn] - columns_col = string.ascii_uppercase[startcolumn + 1] - elif isinstance(startcolumn, str): - index_col = startcolumn - columns_col = chr(ord(startcolumn) + 1) - - # Add each DataFrame piece - new_ylim = ylim - while res_len > ylim: - curr_df = df.iloc[:, start_col:new_ylim] - # Memorize anchors for headers name - anchor_index = index_col + str(self.free_row) - anchor_cols = columns_col + str(self.free_row - 1) - end_anchor_cols = chr(ord(columns_col) + len(curr_df.columns) - 1) + str( - self.free_row - 1 - ) - # Insert cutted df - self.insert_df( - startcolumn, - curr_df, - ws, - header=header, - idx_format=index_num_format, - values_format=values_format, - ) - # Insert columns name and index name - self.current_ws.range(anchor_index).value = index_name - self.current_ws.range(anchor_index).api.Font.Size = 12 - self.current_ws.range(anchor_index).api.Font.Bold = True - self.current_ws.range(anchor_index).color = (236, 236, 236) - - self.current_ws.range(anchor_cols).value = cols_name - self.current_ws.range(anchor_cols).api.Font.Size = 12 - self.current_ws.range(anchor_cols).api.Font.Bold = True - self.current_ws.range(anchor_cols).color = (236, 236, 236) - self.current_ws.range(anchor_cols + ":" + end_anchor_cols).merge() - # Adjourn parameters - start_col = start_col + ylim - new_ylim = new_ylim + ylim - res_len = res_len - ylim - - # Add the remaining piece - if res_len != 0: - curr_df = df.iloc[:, -res_len:] - # Memorize anchors for headers name - anchor_index = index_col + str(self.free_row) - anchor_cols = columns_col + str(self.free_row - 1) - end_anchor_cols = chr(ord(columns_col) + len(curr_df.columns) - 1) + str( - self.free_row - 1 - ) - - self.insert_df( - startcolumn, - curr_df, - ws, - header=header, - idx_format=index_num_format, - values_format=values_format, - ) - # Insert columns name and index name - self.current_ws.range(anchor_index).value = index_name - self.current_ws.range(anchor_cols).value = cols_name - # Merge the cols name - self.current_ws.range(anchor_cols + ":" + end_anchor_cols).merge() - - # Adjust lenght - self.current_ws.range(index_col + ":AAA").autofit() - - def copy_sheets(self, wb_origin_path): - """ - Copy all sheets of the selected excel file into the current one - - Parameters - ---------- - wb_origin_path : str/path - Path to excel file containing sheets to add. - - Returns - ------- - None. - - """ - wb = self.app.books.open(wb_origin_path) - for sheet in wb.sheets: - # copy to a new workbook - sheet.api.Copy() - - # copy to an existing workbook by putting it in front of a - # worksheet object - sheet.api.Copy(Before=self.wb.sheets[0].api) - - def save(self): - """ - Save Excel - """ - self.app.calculate() - try: - self.wb.save() - except FileNotFoundError as e: - print(" The following is the original exception:") - print(e) - print("\n it may be due to invalid characters in the file name") - - self.wb.close() - self.app.quit() - - -def fatal_exception(message=None): +def fatal_exception(message : str | None = None) -> None: """ Use this function to exit with a code error from a handled exception From 9acf14beacea650f386af6587013f06800010470 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 15:28:03 +0000 Subject: [PATCH 48/53] sphereoutputs docstrings --- jade/output.py | 6 +- jade/sphereoutput.py | 154 +++++++++++++++++++++++++++++-------------- 2 files changed, 110 insertions(+), 50 deletions(-) diff --git a/jade/output.py b/jade/output.py index 6cf8955b..4f12d0c7 100644 --- a/jade/output.py +++ b/jade/output.py @@ -186,7 +186,8 @@ def __init__(self, lib: str, code: str, testname: str, session: Session) -> None @abc.abstractmethod def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None: - """Abstract function to retrieve code version. Implimentation should be added to child classes for each code. + """ + Abstract function to retrieve code version. Implimentation should be added to child classes for each code. Parameters ---------- @@ -201,7 +202,8 @@ def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None @abc.abstractmethod def _get_output_files(self, results_path: str | os.PathLike) -> list: - """Abstract function to retrieve code output files. Implimentation should be added to child classes for each code. + """ + Abstract function to retrieve code output files. Implimentation should be added to child classes for each code. Parameters ---------- diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 5a3baba8..c326c4b3 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -54,7 +54,7 @@ class AbstractSphereBenchmarkOutput(AbstractBenchmarkOutput): - def __init__(self, lib: str, code: str, testname: str, session: Session): + def __init__(self, lib: str, code: str, testname: str, session: Session) -> None: """ Initialises the SphereOutput class from the general BenchmarkOutput class, see output.py for details on how self variables are assigned @@ -99,13 +99,38 @@ def __init__(self, lib: str, code: str, testname: str, session: Session): # # metadata involved here # self.metadata = None - def _read_get_output_files(self, results_path: str, code: str): - pass + def _get_output_files(self, results_path: str | os.PathLike, code: str) -> list: + """ + Enforced method from inheritance of AbstractBenchmarkOutput, not used in Sphere. + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results folder. - def parse_output_data(self, results_path: str): + Returns + ------- + list + List of simulation results files. + """ pass - def single_postprocess(self): + def parse_output_data(self, results_path : str | os.PathLike): + """ + Abstract function for retrieving simulation output data, tally numbers and tally comments. + Not used in Sphere. + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results + + Returns + ------- + None + """ + + def single_postprocess(self) -> None: """ Execute the full post-processing of a single library (i.e. excel, raw data and atlas) @@ -124,25 +149,26 @@ def single_postprocess(self): @abc.abstractmethod def _read_output(self) -> tuple[dict, list, list, list | None]: - """Reads all outputs for a library. To be implemented for each different code. + """ + Reads all outputs for a library. To be implemented for each different code. Returns ------- - outputs : dic - Dictionary of sphere output objects used in plotting, keys are material name or ZAID number - results : dic - Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for - tallies with postive values only, all Values = 0 for empty tallies, and returns the corresponding - tally bin if it finds any negative values. Contents of the "Values" worksheet. - errors : dic - Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. - stat_checks : dic - Dictionary the MCNP statistical check results for each material/ZAID. Contents of the "Statistical - Checks" Worksheet. + outputs : dic + Dictionary of sphere output objects used in plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + stat_checks : dic + Dictionary the MCNP statistical check results for each material/ZAID. Contents of the "Statistical + Checks" Worksheet. """ pass - def _generate_single_plots(self): + def _generate_single_plots(self) -> None: """ Generate all the requested plots in a temporary folder @@ -207,7 +233,7 @@ def _generate_single_plots(self): self._build_atlas(outpath) - def _build_atlas(self, outpath): + def _build_atlas(self, outpath : str | os.PathLike) -> None: """ Build the atlas using all plots contained in directory @@ -235,7 +261,7 @@ def _build_atlas(self, outpath): # Remove tmp images shutil.rmtree(outpath) - def compare(self): + def compare(self) -> None: """ Execute the full post-processing of a comparison of libraries (i.e. excel, and atlas) @@ -263,7 +289,7 @@ def compare(self): print(" Generating Plots Atlas...") self._generate_plots(allzaids, globalname) - def _generate_plots(self, allzaids, globalname): + def _generate_plots(self, allzaids : list, globalname :str) -> None: """ Generate all the plots requested by the Sphere leakage benchmark @@ -272,13 +298,6 @@ def _generate_plots(self, allzaids, globalname): allzaids : list list of all zaids resulting from the union of the results from both libraries. - outputs : dic - dictionary containing the outputs for each library, for each code - format: { - code1:{library1:[outputs], library2:[outputs], ...}, - code2:{library1:[outputs], library2:[outputs], ...}, - ... - } globalname : str name for the output. @@ -357,12 +376,13 @@ def _generate_plots(self, allzaids, globalname): self._build_atlas(outpath) - def _get_organized_output(self): + def _get_organized_output(self) -> tuple[list, list, list]: """ Organizes the outputs for each library in each code in the outputs object Returns: + -------- libraries: list list of all libraries to be post processed allzaids: list @@ -387,22 +407,29 @@ def _get_organized_output(self): return libraries, allzaids, outputs - def _generate_dataframe(self, results, errors, stat_checks=None): - """Function to turn the output of the read_{code}_output functions into DataFrames - for use with xlsxwriter + def _generate_dataframe(self, results : dict, errors : dict, stat_checks : dict | None = None) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + """ + Function to turn the output of the read_{code}_output functions into DataFrames + for use with xlsxwriter - Arguments - ------ - results (dic): dictionary of tally summaries for each material/ZAID. - errors (dic): dictionaty of average tally errors across all energy bins. - stat_checks (dic, optional): dictionary containing results of MCNP statistical checks + Parameters + ---------- + results : dict + dictionary of tally summaries for each material/ZAID. + errors : dict + dictionaty of average tally errors across all energy bins. + stat_checks : dict or None + dictionary containing results of MCNP statistical checks (MCNP only). Defaults to None. Returns ------- - results (DataFrame): previous dictionary but in DataFrame form - errors (DataFrame): previous dictionary but in DataFrame form - stat_checks (DataFrame): previous dictionary but in DataFrame form + results : pd.DataFrame + previous dictionary but in DataFrame form + errors : pd.DataFrame + previous dictionary but in DataFrame form + stat_checks : pd.DataFrame + previous dictionary but in DataFrame form """ # Generate DataFrames results = pd.DataFrame(results) @@ -430,7 +457,7 @@ def _generate_dataframe(self, results, errors, stat_checks=None): stat_checks.reset_index(inplace=True) return results, errors, stat_checks - def pp_excel_single(self): + def pp_excel_single(self) -> None: """ Generate the single library results excel @@ -474,7 +501,8 @@ def pp_excel_single(self): @abc.abstractmethod def _get_output(self, results_path: str) -> SphereTallyOutput: - """Get the output files for the code being post-processed. + """ + Get the output files for the code being post-processed. Returns ------- @@ -482,7 +510,7 @@ def _get_output(self, results_path: str) -> SphereTallyOutput: """ pass - def pp_excel_comparison(self): + def pp_excel_comparison(self) -> None: """ Compute the data and create the excel for all libraries comparisons. In the meantime, additional data is stored for future plots. @@ -699,10 +727,14 @@ def pp_excel_comparison(self): # ex.save() # "" - def print_raw(self): + def print_raw(self) -> None: """ Assigns a path and prints the post processing data as a .csv + Returns + ------- + None + """ for key, data in self.raw_data.items(): file = os.path.join(self.raw_path, self.code + key + ".csv") @@ -713,7 +745,8 @@ def print_raw(self): json.dump(self.metadata, outfile, indent=4) def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: - """Retrieve the metadata from the run + """ + Retrieve the metadata from the run Parameters ---------- @@ -722,7 +755,7 @@ def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: Returns ------- - dict + metadata : dict metadata dictionary """ # the super can be used, just changing the expected path @@ -740,6 +773,19 @@ def _read_metadata_run(self, simulation_folder: os.PathLike) -> dict: class MCNPSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None: + """ + Function to retrieve MCNP code version. Implimentation should be added to child classes for each code. + + Parameters + ---------- + simulation_folder : str | os.PathLike + Path to simulation results folder. + + Returns + ------- + str | None + Returns the code version, except for sphere benchmark, which returns None + """ output = self._get_output(simulation_folder) try: version = output.out.get_code_version() @@ -755,8 +801,20 @@ def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None ) return None - def _get_output(self, results_path) -> SphereMCNPSimOutput: - # Get mfile + def _get_output(self, results_path : str | os.PathLike) -> SphereMCNPSimOutput: + """ + Method to retrieve output data from MCNP as a SphereMCNPSimOutput + + Parameters + ---------- + results_path : str | os.PathLike + Path to simulation results + + Returns + ------- + output : SphereMCNPSimOutput + SphereMCNPSimOutput output object + """ for file in os.listdir(results_path): if file[-1] == "m": mfile = file From 059b2e3cf1d8a2de35a3803e1a1c471158f64836 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 21:16:15 +0000 Subject: [PATCH 49/53] Docstrings sphereoutput --- jade/output.py | 3 +- jade/sphereoutput.py | 359 +++++++++++++++++++++++++++++-------------- 2 files changed, 242 insertions(+), 120 deletions(-) diff --git a/jade/output.py b/jade/output.py index 4f12d0c7..88ce44f7 100644 --- a/jade/output.py +++ b/jade/output.py @@ -995,7 +995,8 @@ def _generate_comparison_excel_output(self) -> None: class MCNPBenchmarkOutput(AbstractBenchmarkOutput): def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: - """Read MCNP code version from the output file + """ + Read MCNP code version from the output file Parameters ---------- diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index c326c4b3..bf6e8ae3 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -43,6 +43,7 @@ import jade.excelsupport as exsupp import jade.plotter as plotter from jade.output import AbstractBenchmarkOutput, MCNPSimOutput, OpenMCSimOutput +from f4enix.output.mctal import Mctal if TYPE_CHECKING: from jade.main import Session @@ -827,22 +828,22 @@ def _get_output(self, results_path : str | os.PathLike) -> SphereMCNPSimOutput: output = SphereMCNPSimOutput(mfile, outfile) return output - def _read_output(self): + def _read_output(self) -> tuple[dict, dict, dict, dict]: """Reads all MCNP outputs from a library Returns ------- - outputs : dic - Dictionary of MCNP sphere output objects used in plotting, keys are material name or ZAID number - results : dic - Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for - tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding - tally bin if it finds any negative values. Contents of the "Values" worksheet. - errors : dic - Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. - stat_checks : dic - Dictionary the MCNP statistical check results for each material/ZAID. Contents of the "Statistical - Checks" Worksheet. + outputs : dic + Dictionary of MCNP sphere output objects used in plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + stat_checks : dic + Dictionary the MCNP statistical check results for each material/ZAID. Contents of the "Statistical + Checks" Worksheet. """ # Get results results = [] @@ -889,7 +890,7 @@ def _read_output(self): stat_checks.append(st_ck) return outputs, results, errors, stat_checks - def _get_output_files(self, results_path: str | os.PathLike) -> tuple: + def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -897,8 +898,6 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: ---------- results_path : str or path path where the results are contained. - code : str - code that generated the output ('mcnp' or 'openmc') Raises ------ @@ -907,10 +906,11 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: Returns ------- - file1 : path + file1 : str or os.PathLike path to the first file - file2 : path - path to the second file (only for mcnp) + file2 : str or os.PathLike + path to the second file + file3 : str or os.PathLike """ file1 = None @@ -939,13 +939,38 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: class OpenMCSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): def _read_code_version(self, simulation_path: str | os.PathLike) -> str | None: + """ + Read OpenMC code version from the output file + + Parameters + ---------- + simulation_path : str | os.PathLike + Path to OpenMC simulations + + Returns + ------- + str | None + OpenMC code version + """ _, spfile = self._get_output_files(simulation_path) statepoint = omc.OpenMCStatePoint(spfile) version = statepoint.version return version def _get_output(self, results_path: str) -> SphereOpenMCSimOutput: - # Get mfile + """ + Returns SphereOpenMCSimOutput object conating OpenMC simulation output data + + Parameters + ---------- + results_path : str + Path to OpenMC simulation ouputs + + Returns + ------- + output : SphereOpenMCSimOutput + OpenMC simulation data object + """ for file in os.listdir(results_path): if "tallies.out" in file: outfile = file @@ -998,19 +1023,20 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple: return file1, file2 - def _read_output(self): - """Reads all OpenMC outputs from a library + def _read_output(self) -> tuple[dict, dict, dict]: + """ + Reads all OpenMC outputs from a library Returns ------- - outputs : dic - Dictionary of OpenMC sphere output objects used for plotting, keys are material name or ZAID number - results : dic - Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for - tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding - tally bin if it finds any negative values. Contents of the "Values" worksheet. - errors : dic - Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + outputs : dic + Dictionary of OpenMC sphere output objects used for plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. """ # Get results results = [] @@ -1049,21 +1075,22 @@ def _read_output(self): class SerpentSphereBenchmarkOutput(AbstractSphereBenchmarkOutput): - def _read_output(self): - """Reads all Serpent outputs from a library + def _read_output(self) -> tuple[dict, dict, dict]: + """ + Reads all Serpent outputs from a library NOT YET IMPLEMENTED Returns ------- - outputs : dic - Dictionary of Serpent sphere output objects used in plotting, keys are material name or ZAID number - results : dic - Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for - tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding - tally bin if it finds any negative values. Contents of the "Values" worksheet. - errors : dic - Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. + outputs : dic + Dictionary of Serpent sphere output objects used in plotting, keys are material name or ZAID number + results : dic + Dictionary of overview of Tally values for each material/ZAID, returns either all values > 0 for + tallies with postiive values only, all Values = 0 for empty tallies, and returns the corresponding + tally bin if it finds any negative values. Contents of the "Values" worksheet. + errors : dic + Dictionary of average errors for each tally for each material/Zaid. Contents of the "Errors" worksheet. """ # Get results results = [] @@ -1078,22 +1105,31 @@ def _read_output(self): class SphereTallyOutput: - def __init__(self): + def __init__(self) -> None: + """_summary_ + + Raises + ------ + RuntimeError + If SphereTallyObject is initialised + """ raise RuntimeError("SphereTallyOutput cannot be instantiated") - def get_single_excel_data(self, tallies2pp): + def get_single_excel_data(self, tallies2pp : list) -> tuple[dict, dict]: """ Get the excel data of a single MCNP output + Parameters + ---------- + tallies2pp : list + list of tally numbers to post proccess + Returns ------- - results : dic - Excel result for different tallies - errors : dic - Error average in all tallies - + tuple[dict, dict] + _description_ """ - + #TODO this doesn't seem like it will work now... data = self.tallydata.set_index(["Energy"]) results = {} # Store excel results of different tallies errors = {} # Store average error in different tallies @@ -1151,10 +1187,17 @@ def get_single_excel_data(self, tallies2pp): return results, errors - def get_comparison_data(self, tallies2pp, code): + def get_comparison_data(self, tallies2pp : list, code : str) -> tuple[list, list]: """ Get Data for single zaid to be used in comparison. + Parameters + ---------- + talies2pp : list + List of tally numbers to postproccess + code : str + Either 'mcnp' or 'openmc' to select which tally numbers to use + Returns ------- results : list @@ -1215,16 +1258,37 @@ def get_comparison_data(self, tallies2pp, code): class SphereMCNPSimOutput(MCNPSimOutput, SphereTallyOutput): - def __init__(self, mfile, outfile): + def __init__(self, mfile : str | os.PathLike, outfile : str | os.PathLike) -> None: + """ + Initialisation function for SphereMCNPSimOutput to create tallydata and totalbin dictionaries. + + Parameters + ---------- + mfile : str | os.PathLike + path to mctal file + outfile : str | os.PathLike + path to output file + """ super().__init__(mfile, outfile) self.tallydata, self.totalbin = self._get_tallydata(self.mctal) - def _get_tallydata(self, mctal): + def _get_tallydata(self, mctal : Mctal) -> tuple[pd.DataFrame, pd.DataFrame]: """ Retrieve and organize mctal data. Simplified for sphere leakage case - Returns: DataFrame containing the organized data + Parameters + ---------- + mctal : Mctal + F4Eninx Mctal object + + Returns + ------- + tallydata : pd.DataFrame + Pandas dataframe containing organised tally data + totalbin : pd.DataFrame + Pandas dataframe containing total tally data """ + # Extract data rows = [] rowstotal = [] @@ -1296,26 +1360,41 @@ def _get_tallydata(self, mctal): class SphereOpenMCSimOutput(OpenMCSimOutput, SphereTallyOutput): - def __init__(self, output_path): + def __init__(self, output_path : str | os.PathLike) -> None: + """ + Initialisation function for SphereOpenMCSimOutput class + + Parameters + ---------- + output_path : str | os.PathLike + Path to OpenC simulation output files + + Returns + ------- + None + """ self.output = omc.OpenMCSphereStatePoint(output_path) self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None - def _create_dataframe(self, rows): - """Creates dataframe from the data in each output passed through as + def _create_dataframe(self, rows : list) -> tuple[pd.DataFrame, pd.DataFrame]: + """ + Creates dataframe from the data in each output passed through as a list of lists from the process_tally function - Args: - rows: list - list of list containing the rows of information from an output file - - Returns: - df: DataFrame - dataframe containing the information from each output + Parameters + ---------- + rows : list + list of list containing the rows of information from an output file - dftotal: DataFrame - dataframe containing the sum of all values and errors for each output + Returns + ------- + df : pd.DataFrame + dataframe containing the information from each output + dftotal : pd.DataFrame + dataframe containing the sum of all values and errors for each output """ + df = pd.DataFrame( rows, columns=["Tally N.", "Tally Description", "Energy", "Value", "Error"] ) @@ -1336,35 +1415,17 @@ def _create_dataframe(self, rows): ) return df, dftotal - def process_tally(self): + def process_tally(self) -> tuple[pd.DataFrame, pd.DataFrame]: """ - Reads data from output file and stores it as a list of lists - to be turned into a dataframe - - Returns: - tallydata: Dataframe - see df in _create_dataframe() + Creates dataframe from the data in each output passed through as + a list of lists from the process_tally function - totalbin: Dataframe - see dftotal in _create_dataframe() - """ - """ - rows = [] - for line in self.output_file_data: - if "tally" in line.lower(): - parts = line.split() - tally_n = int(parts[2].replace(":", "")) - tally_description = " ".join([parts[3].title(), parts[4].title()]) - if "incoming energy" in line.lower(): - parts = line.split() - energy = 1e-6 * float(parts[3].replace(")", "")) - if "flux" in line.lower(): - if ":" in line: - continue - else: - parts = line.split() - value, error = float(parts[1]), float(parts[3]) - rows.append([tally_n, tally_description, energy, value, error]) + Returns + ------- + df : pd.DataFrame + dataframe containing the information from each output + dftotal : pd.DataFrame + dataframe containing the sum of all values and errors for each output """ rows = self.output.tally_to_rows() tallydata, totalbin = self._create_dataframe(rows) @@ -1382,7 +1443,7 @@ class SphereSDDROutput(MCNPSphereBenchmarkOutput): "10y": "6.0", } - def pp_excel_single(self): + def pp_excel_single(self) -> None: """ Generate the single library results excel @@ -1420,7 +1481,7 @@ def pp_excel_single(self): outpath, lib_name, results, errors, stat_checks ) - def pp_excel_comparison(self): + def pp_excel_comparison(self) -> None: """ Generate the excel comparison output @@ -1468,9 +1529,18 @@ def pp_excel_comparison(self): outpath, name, final, absdiff, std_dev, single_pp_files ) - def _get_organized_output(self): + def _get_organized_output(self) -> tuple[list, list, list]: """ Simply recover a list of the zaids and libraries involved + + Returns + ------- + libs : list + list of libraries + zaids : list + list of zaids + outputs : list + list of outputs """ zaids = [] @@ -1483,12 +1553,19 @@ def _get_organized_output(self): return libs, zaids, outputs - def _generate_single_plots(self): + def _generate_single_plots(self) -> None: + """ + Method to generate single plots + + Returns + ------- + None + """ libs, allzaids, outputs = self._get_organized_output() globalname = self.lib self._generate_plots(allzaids, globalname) - def _generate_plots(self, allzaids, globalname): + def _generate_plots(self, allzaids : list, globalname : str) -> None: """ Generate all the plots requested by the Sphere SDDR benchmark @@ -1795,19 +1872,29 @@ def _generate_plots(self, allzaids, globalname): # Remove tmp images shutil.rmtree(outpath) - def _extract_data4plots(self, zaid, mt, lib, time): - """_summary_ + def _extract_data4plots(self, zaid : str, mt : str, lib : str, time : float) -> tuple[float, float, float]: + """ + Method to extract data for plots - Args: - zaid (str): zaid of output - mt (str): mt - lib (str): library being postprocessed - time (float): timestep + Parameters + ---------- + zaid : str + zaid + mt : str + mt reaction number + lib : str + library + time : float + timestep - Returns: - nflux (float): neutron flux - pflux (float): proton flux - sddr (float): shut down dose rate + Returns + ------- + nflux : float + neutron flux + pflux : float + proton flux + sddr : float + shut down dose rate """ tallies = self.outputs[zaid, mt, lib].tallydata # Extract values @@ -1876,7 +1963,7 @@ def _compute_single_results( return outputs, results, errors, stat_checks - def _compute_compare_result(self, reflib, tarlib): + def _compute_compare_result(self, reflib : str, tarlib : str) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: """ Given a reference lib and a target lib, both absolute and relative comparison are computed @@ -1894,7 +1981,7 @@ def _compute_compare_result(self, reflib, tarlib): relative comparison table. absdiff : pd.DataFrame absolute comparison table. - std_dev: + std_dev: pd.DataFrame comparison in std. dev. from mean table """ @@ -1948,13 +2035,15 @@ def _compute_compare_result(self, reflib, tarlib): return final, absdiff, std_dev @staticmethod - def _sort_df(df): + def _sort_df(df : pd.DataFrame) -> None: """ Sorts the values in the passed dataframe by the Parent column, then sets 3 index columns - Args: - df (DataFrame): Dataframe containing output data + Parameters + ---------- + df : pd.DataFrame + Dataframe containing output data """ df["index"] = pd.to_numeric(df["Parent"].values, errors="coerce") df.sort_values("index", inplace=True) @@ -1964,7 +2053,20 @@ def _sort_df(df): df.reset_index(inplace=True) @staticmethod - def _sortfunc_zaidMTcouples(item): + def _sortfunc_zaidMTcouples(item : tuple | list) -> tuple[bool, str | int, str | int]: + """ + Function to sort zaid couples + + Parameters + ---------- + item : tuple | list + list of zaid couples + + Returns + ------- + tuple[bool, str | int, str | int] + (flag, zaid, mt) + """ try: zaid = int(item[0]) except ValueError: @@ -1981,7 +2083,7 @@ def _sortfunc_zaidMTcouples(item): return (flag, zaid, mt) - def _parserunmcnp(self, test_path, lib): + def _parserunmcnp(self, test_path : str | os.PathLike, lib : str) -> tuple[dict, list, list, list]: """ given a MCNP run folder the parsing of the different outputs is performed @@ -1990,8 +2092,6 @@ def _parserunmcnp(self, test_path, lib): ---------- test_path : path or str path to the test. - folder : str - name of the folder to parse inside test_path. lib : str library. @@ -2054,10 +2154,14 @@ def _parserunmcnp(self, test_path, lib): return outputs, results, errors, stat_checks - def print_raw(self): + def print_raw(self) -> None: """ Assigns a path and prints the post processing data as a .csv + Returns + ------- + None + """ for key, data in self.raw_data.items(): # Follow the same structure of other benchmarks @@ -2073,11 +2177,28 @@ def print_raw(self): class SphereSDDRMCNPOutput(SphereMCNPSimOutput): - def _get_tallydata(self, mctal): + def _get_tallydata(self) -> tuple[dict, dict]: + """_summary_ + + Returns + ------- + self.tallydata : dict + dictionary of pandas dataframes containing tally data + self.totalbin : dict + dictionary of pandas dataframes containing tally total data + """ return self.tallydata, self.totalbin @staticmethod - def _drop_total_rows(df: pd.DataFrame): + def _drop_total_rows(df: pd.DataFrame) -> None: + """ + Method to drop total rows fro dataframes + + Parameters + ---------- + df : pd.DataFrame + Dataframe to be sorted + """ # drop all total rows for key in ["User", "Time", "Energy"]: try: @@ -2085,7 +2206,7 @@ def _drop_total_rows(df: pd.DataFrame): except KeyError: pass - def get_single_excel_data(self): + def get_single_excel_data(self) -> tuple[pd.Series, pd.Series]: """ Return the data that will be used in the single post-processing excel output for a single reaction From 48c054fc181ceb8ada40db3816c0ae2b6ff843c1 Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Wed, 6 Nov 2024 21:16:49 +0000 Subject: [PATCH 50/53] Black format output and sphereoutput --- jade/output.py | 26 ++++++++++++++----- jade/sphereoutput.py | 62 ++++++++++++++++++++++++++------------------ 2 files changed, 56 insertions(+), 32 deletions(-) diff --git a/jade/output.py b/jade/output.py index 88ce44f7..6bf5638e 100644 --- a/jade/output.py +++ b/jade/output.py @@ -217,7 +217,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> list: """ @abc.abstractmethod - def parse_output_data(self, results_path : str | os.PathLike) -> tuple[AbstractSimOutput, list, list]: + def parse_output_data( + self, results_path: str | os.PathLike + ) -> tuple[AbstractSimOutput, list, list]: """ Abstract function for retrieving simulation output data, tally numbers and tally comments. @@ -518,7 +520,7 @@ def compare(self) -> None: shutil.rmtree(outpath) @staticmethod - def _reorder_df(df : pd.DataFrame, x_set : list) -> pd.DataFrame: + def _reorder_df(df: pd.DataFrame, x_set: list) -> pd.DataFrame: """Method to re-organise pandas data frame. Parameters @@ -579,7 +581,7 @@ def _generate_single_excel_output(self) -> None: Returns ------- None - """ + """ self.outputs = {} self.results = {} self.errors = {} @@ -1028,7 +1030,9 @@ def _read_code_version(self, simulation_folder: os.PathLike) -> str | None: ) return None - def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: + def _get_output_files( + self, results_path: str | os.PathLike + ) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -1075,7 +1079,9 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.P return file1, file2, file3 - def parse_output_data(self, results_path : str | os.PathLike) -> tuple[MCNPSimOutput, list, list]: + def parse_output_data( + self, results_path: str | os.PathLike + ) -> tuple[MCNPSimOutput, list, list]: """ Function for retrieving MCNP simulation output data, tally numbers and tally comments. @@ -1122,7 +1128,9 @@ def _read_code_version(self, simulation_path: os.PathLike) -> str | None: version = statepoint.version return version - def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike]: + def _get_output_files( + self, results_path: str | os.PathLike + ) -> tuple[str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -1190,9 +1198,11 @@ def parse_output_data( tally_comments = sim_output.output.tally_comments return sim_output, tally_numbers, tally_comments + class AbstractSimOutput: tallydata = None totalbin = None + def __init__(self) -> None: """ Abstract class to enforce inclusion of tallydata and totalbin dictionaries in children simulation output classes @@ -1209,6 +1219,7 @@ def __init__(self) -> None: if not isinstance(self.totalbin, dict): raise NotImplementedError + class MCNPSimOutput(AbstractSimOutput): def __init__( self, @@ -1421,7 +1432,8 @@ def process_tally(self) -> tuple[dict[int, pd.DataFrame], dict[int, pd.DataFrame tallydata, totalbin = self._create_dataframes(tallies) return tallydata, totalbin -def fatal_exception(message : str | None = None) -> None: + +def fatal_exception(message: str | None = None) -> None: """ Use this function to exit with a code error from a handled exception diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index bf6e8ae3..a5897a0a 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -116,7 +116,7 @@ def _get_output_files(self, results_path: str | os.PathLike, code: str) -> list: """ pass - def parse_output_data(self, results_path : str | os.PathLike): + def parse_output_data(self, results_path: str | os.PathLike): """ Abstract function for retrieving simulation output data, tally numbers and tally comments. Not used in Sphere. @@ -234,7 +234,7 @@ def _generate_single_plots(self) -> None: self._build_atlas(outpath) - def _build_atlas(self, outpath : str | os.PathLike) -> None: + def _build_atlas(self, outpath: str | os.PathLike) -> None: """ Build the atlas using all plots contained in directory @@ -290,7 +290,7 @@ def compare(self) -> None: print(" Generating Plots Atlas...") self._generate_plots(allzaids, globalname) - def _generate_plots(self, allzaids : list, globalname :str) -> None: + def _generate_plots(self, allzaids: list, globalname: str) -> None: """ Generate all the plots requested by the Sphere leakage benchmark @@ -408,7 +408,9 @@ def _get_organized_output(self) -> tuple[list, list, list]: return libraries, allzaids, outputs - def _generate_dataframe(self, results : dict, errors : dict, stat_checks : dict | None = None) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + def _generate_dataframe( + self, results: dict, errors: dict, stat_checks: dict | None = None + ) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: """ Function to turn the output of the read_{code}_output functions into DataFrames for use with xlsxwriter @@ -419,7 +421,7 @@ def _generate_dataframe(self, results : dict, errors : dict, stat_checks : dict dictionary of tally summaries for each material/ZAID. errors : dict dictionaty of average tally errors across all energy bins. - stat_checks : dict or None + stat_checks : dict or None dictionary containing results of MCNP statistical checks (MCNP only). Defaults to None. @@ -802,7 +804,7 @@ def _read_code_version(self, simulation_folder: str | os.PathLike) -> str | None ) return None - def _get_output(self, results_path : str | os.PathLike) -> SphereMCNPSimOutput: + def _get_output(self, results_path: str | os.PathLike) -> SphereMCNPSimOutput: """ Method to retrieve output data from MCNP as a SphereMCNPSimOutput @@ -814,7 +816,7 @@ def _get_output(self, results_path : str | os.PathLike) -> SphereMCNPSimOutput: Returns ------- output : SphereMCNPSimOutput - SphereMCNPSimOutput output object + SphereMCNPSimOutput output object """ for file in os.listdir(results_path): if file[-1] == "m": @@ -890,7 +892,9 @@ def _read_output(self) -> tuple[dict, dict, dict, dict]: stat_checks.append(st_ck) return outputs, results, errors, stat_checks - def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: + def _get_output_files( + self, results_path: str | os.PathLike + ) -> tuple[str | os.PathLike, str | os.PathLike, str | os.PathLike]: """ Recover the output files from a directory @@ -910,7 +914,7 @@ def _get_output_files(self, results_path: str | os.PathLike) -> tuple[str | os.P path to the first file file2 : str or os.PathLike path to the second file - file3 : str or os.PathLike + file3 : str or os.PathLike """ file1 = None @@ -1115,7 +1119,7 @@ def __init__(self) -> None: """ raise RuntimeError("SphereTallyOutput cannot be instantiated") - def get_single_excel_data(self, tallies2pp : list) -> tuple[dict, dict]: + def get_single_excel_data(self, tallies2pp: list) -> tuple[dict, dict]: """ Get the excel data of a single MCNP output @@ -1129,7 +1133,7 @@ def get_single_excel_data(self, tallies2pp : list) -> tuple[dict, dict]: tuple[dict, dict] _description_ """ - #TODO this doesn't seem like it will work now... + # TODO this doesn't seem like it will work now... data = self.tallydata.set_index(["Energy"]) results = {} # Store excel results of different tallies errors = {} # Store average error in different tallies @@ -1187,7 +1191,7 @@ def get_single_excel_data(self, tallies2pp : list) -> tuple[dict, dict]: return results, errors - def get_comparison_data(self, tallies2pp : list, code : str) -> tuple[list, list]: + def get_comparison_data(self, tallies2pp: list, code: str) -> tuple[list, list]: """ Get Data for single zaid to be used in comparison. @@ -1258,7 +1262,7 @@ def get_comparison_data(self, tallies2pp : list, code : str) -> tuple[list, list class SphereMCNPSimOutput(MCNPSimOutput, SphereTallyOutput): - def __init__(self, mfile : str | os.PathLike, outfile : str | os.PathLike) -> None: + def __init__(self, mfile: str | os.PathLike, outfile: str | os.PathLike) -> None: """ Initialisation function for SphereMCNPSimOutput to create tallydata and totalbin dictionaries. @@ -1272,7 +1276,7 @@ def __init__(self, mfile : str | os.PathLike, outfile : str | os.PathLike) -> No super().__init__(mfile, outfile) self.tallydata, self.totalbin = self._get_tallydata(self.mctal) - def _get_tallydata(self, mctal : Mctal) -> tuple[pd.DataFrame, pd.DataFrame]: + def _get_tallydata(self, mctal: Mctal) -> tuple[pd.DataFrame, pd.DataFrame]: """ Retrieve and organize mctal data. Simplified for sphere leakage case @@ -1360,7 +1364,7 @@ def _get_tallydata(self, mctal : Mctal) -> tuple[pd.DataFrame, pd.DataFrame]: class SphereOpenMCSimOutput(OpenMCSimOutput, SphereTallyOutput): - def __init__(self, output_path : str | os.PathLike) -> None: + def __init__(self, output_path: str | os.PathLike) -> None: """ Initialisation function for SphereOpenMCSimOutput class @@ -1368,7 +1372,7 @@ def __init__(self, output_path : str | os.PathLike) -> None: ---------- output_path : str | os.PathLike Path to OpenC simulation output files - + Returns ------- None @@ -1377,7 +1381,7 @@ def __init__(self, output_path : str | os.PathLike) -> None: self.tallydata, self.totalbin = self.process_tally() self.stat_checks = None - def _create_dataframe(self, rows : list) -> tuple[pd.DataFrame, pd.DataFrame]: + def _create_dataframe(self, rows: list) -> tuple[pd.DataFrame, pd.DataFrame]: """ Creates dataframe from the data in each output passed through as a list of lists from the process_tally function @@ -1392,7 +1396,7 @@ def _create_dataframe(self, rows : list) -> tuple[pd.DataFrame, pd.DataFrame]: df : pd.DataFrame dataframe containing the information from each output dftotal : pd.DataFrame - dataframe containing the sum of all values and errors for each output + dataframe containing the sum of all values and errors for each output """ df = pd.DataFrame( @@ -1425,7 +1429,7 @@ def process_tally(self) -> tuple[pd.DataFrame, pd.DataFrame]: df : pd.DataFrame dataframe containing the information from each output dftotal : pd.DataFrame - dataframe containing the sum of all values and errors for each output + dataframe containing the sum of all values and errors for each output """ rows = self.output.tally_to_rows() tallydata, totalbin = self._create_dataframe(rows) @@ -1565,7 +1569,7 @@ def _generate_single_plots(self) -> None: globalname = self.lib self._generate_plots(allzaids, globalname) - def _generate_plots(self, allzaids : list, globalname : str) -> None: + def _generate_plots(self, allzaids: list, globalname: str) -> None: """ Generate all the plots requested by the Sphere SDDR benchmark @@ -1872,7 +1876,9 @@ def _generate_plots(self, allzaids : list, globalname : str) -> None: # Remove tmp images shutil.rmtree(outpath) - def _extract_data4plots(self, zaid : str, mt : str, lib : str, time : float) -> tuple[float, float, float]: + def _extract_data4plots( + self, zaid: str, mt: str, lib: str, time: float + ) -> tuple[float, float, float]: """ Method to extract data for plots @@ -1963,7 +1969,9 @@ def _compute_single_results( return outputs, results, errors, stat_checks - def _compute_compare_result(self, reflib : str, tarlib : str) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + def _compute_compare_result( + self, reflib: str, tarlib: str + ) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: """ Given a reference lib and a target lib, both absolute and relative comparison are computed @@ -2035,7 +2043,7 @@ def _compute_compare_result(self, reflib : str, tarlib : str) -> tuple[pd.DataFr return final, absdiff, std_dev @staticmethod - def _sort_df(df : pd.DataFrame) -> None: + def _sort_df(df: pd.DataFrame) -> None: """ Sorts the values in the passed dataframe by the Parent column, then sets 3 index columns @@ -2053,7 +2061,9 @@ def _sort_df(df : pd.DataFrame) -> None: df.reset_index(inplace=True) @staticmethod - def _sortfunc_zaidMTcouples(item : tuple | list) -> tuple[bool, str | int, str | int]: + def _sortfunc_zaidMTcouples( + item: tuple | list, + ) -> tuple[bool, str | int, str | int]: """ Function to sort zaid couples @@ -2083,7 +2093,9 @@ def _sortfunc_zaidMTcouples(item : tuple | list) -> tuple[bool, str | int, str | return (flag, zaid, mt) - def _parserunmcnp(self, test_path : str | os.PathLike, lib : str) -> tuple[dict, list, list, list]: + def _parserunmcnp( + self, test_path: str | os.PathLike, lib: str + ) -> tuple[dict, list, list, list]: """ given a MCNP run folder the parsing of the different outputs is performed From 96ba4cb5c6b6de929292fa4d4bf9b7aad63cce7a Mon Sep 17 00:00:00 2001 From: Steven Bradnam Date: Thu, 7 Nov 2024 16:44:54 +0000 Subject: [PATCH 51/53] Fixed bug in SphereSDDROutput --- jade/sphereoutput.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index a5897a0a..4ce51810 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -2189,6 +2189,7 @@ def print_raw(self) -> None: class SphereSDDRMCNPOutput(SphereMCNPSimOutput): + ''' def _get_tallydata(self) -> tuple[dict, dict]: """_summary_ @@ -2200,6 +2201,7 @@ def _get_tallydata(self) -> tuple[dict, dict]: dictionary of pandas dataframes containing tally total data """ return self.tallydata, self.totalbin + ''' @staticmethod def _drop_total_rows(df: pd.DataFrame) -> None: From 90a7c92d1f8fe17b26a579f5384f2e8141bf6b2d Mon Sep 17 00:00:00 2001 From: Alex Valentine Date: Mon, 18 Nov 2024 14:12:52 +0000 Subject: [PATCH 52/53] fix spheresddr tests --- jade/sphereoutput.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/jade/sphereoutput.py b/jade/sphereoutput.py index 4ce51810..0e8e2cc0 100644 --- a/jade/sphereoutput.py +++ b/jade/sphereoutput.py @@ -2189,8 +2189,8 @@ def print_raw(self) -> None: class SphereSDDRMCNPOutput(SphereMCNPSimOutput): - ''' - def _get_tallydata(self) -> tuple[dict, dict]: + + def _get_tallydata(self, mctal: Mctal) -> tuple[dict, dict]: """_summary_ Returns @@ -2201,7 +2201,6 @@ def _get_tallydata(self) -> tuple[dict, dict]: dictionary of pandas dataframes containing tally total data """ return self.tallydata, self.totalbin - ''' @staticmethod def _drop_total_rows(df: pd.DataFrame) -> None: From 0549c65270f303e7126073fa886ba3a4f870c64c Mon Sep 17 00:00:00 2001 From: Alex Valentine Date: Mon, 18 Nov 2024 14:31:57 +0000 Subject: [PATCH 53/53] naming fix --- jade/expoutput.py | 2 +- jade/postprocess.py | 2 +- tests/expoutput_test.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/jade/expoutput.py b/jade/expoutput.py index 02ae9a38..c3f77345 100644 --- a/jade/expoutput.py +++ b/jade/expoutput.py @@ -2316,7 +2316,7 @@ def _define_title(self, input: str, particle: str, quantity: str) -> str: return title -class FNGCPBOutput(ExperimentalOutput): +class FNGHCPBOutput(ExperimentalOutput): def _processMCNPdata(self, output: MCNPSimOutput) -> None: """ Used to override parent function as this is not required. diff --git a/jade/postprocess.py b/jade/postprocess.py index af9e8a32..b610c3a6 100644 --- a/jade/postprocess.py +++ b/jade/postprocess.py @@ -187,7 +187,7 @@ def _get_output(action, code, testname, lib, session): elif testname == "FNG-HCPB": if action == "compare": - out = expo.FNGCPBOutput(lib, testname, session, multiplerun=True) + out = expo.FNGHCPBOutput(lib, testname, session, multiplerun=True) elif action == "pp": print(exp_pp_message) return False diff --git a/tests/expoutput_test.py b/tests/expoutput_test.py index 9d30e7e3..5911f28d 100644 --- a/tests/expoutput_test.py +++ b/tests/expoutput_test.py @@ -338,7 +338,7 @@ def test_fnghcpboutput(self, session_mock: MockUpSession): testname = "FNG-HCPB" os.makedirs(session_mock.path_comparison) os.makedirs(session_mock.path_single) - self.benchoutput_comp = expoutput.FNGCPBOutput( + self.benchoutput_comp = expoutput.FNGHCPBOutput( ["32c", "31c", "00c"], code, testname, session_mock, multiplerun=True ) self.benchoutput_comp.compare()