Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Combining changes between edits done 4 weeks ago and 2 weeks ago with latest versions of LiAISON ReEDS HIPSTER #25

Merged
merged 14 commits into from
Jan 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@
*.pyc
*.7z
*.bak
/LiAISON-Scope123/export
/LiAISON-Scope123/data/output
102 changes: 57 additions & 45 deletions LiAISON-ReEDS/code/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,15 @@
#import the config file
import argparse
parser = argparse.ArgumentParser(description='Execute LIAISON model')
parser.add_argument('--database', help='Name of database to be created.')
parser.add_argument('--datapath', help='Path to the input and output data folder.')
parser.add_argument('--envpath', help='Path to the environments folder.')
parser.add_argument('--lca_config_file', help='Name of life cycle information config file in data folder.')
args = parser.parse_args()
tim0 = time.time()
print('Starting the Code',flush=True)
# YAML filenameß
# YAML filename
config_yaml_filename = os.path.join(args.datapath, args.lca_config_file)

data_yaml_filename = os.path.join(args.datapath, 'data_dir.yaml')
try:
with open(config_yaml_filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
Expand All @@ -25,7 +24,11 @@
data_dirs = config.get('data_directories', {})
inputs = config.get('input_filenames', {})
outputs = config.get('output_filenames', {})

options = config.get('additional_options', {})

with open(data_yaml_filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
data_dirs = config.get('data_directories', {})

except IOError as err:
print(f'Could not open {config_yaml_filename} for configuration. Exiting with status code 1.')
Expand All @@ -43,40 +46,53 @@
from reeds_ecoinvent_updater.main_database_editor import reeds_updater
from liaison.liaison_model import main_run

# scenario parameters from the yaml file
lca_project_name = scenario_params.get('lca_project_name')
primary_process_under_study = scenario_params.get('primary_process_to_study')
updated_database = scenario_params.get('updated_database')
updated_project_name = scenario_params.get('updated_project_name')
mc_runs = int(scenario_params.get('mc_runs'))
functional_unit = float(scenario_params.get('functional_unit'))
base_database = scenario_params.get('base_database')
base_project = scenario_params.get('base_project')
location_under_study = scenario_params.get('location_under_study')
initial_year = scenario_params.get('initial_year')
year_of_study = scenario_params.get('year')
unit_under_study = scenario_params.get('unit')
reeds_yaml_data_filename = os.path.join(args.datapath,inputs.get('reeds_yaml_data'))
region = location_under_study

#Hardcoding these project names to reduce yaml file complexity
base_database = "premise_base"
base_project = "premise_base"
# These modified projects and databases are used to save the large modified ecoinvent databases for future LCA calculations.
updated_database = "premise_updated_ecoinvent_"+str(year_of_study) # These project name is used to create a new project after major modifications to original ecoinvent. These can include premise updates or ReEDS grid mix updates
updated_project_name = "premise_updated_ecoinvent_"+str(year_of_study) # These project name is used to create a new project after major modifications to original ecoinvent. These can include premise updates or ReEDS grid mix updates

# Reading the ReEDS yaml file name
try:
with open(reeds_yaml_data_filename, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
reeds_inputs = config.get('input_filenames', {})
except IOError as err:
print(f'Could not open {reeds_yaml_data_filename} for configuration. Exiting with status code 1.')
exit(1)

# Filenames for inventories
creation_inventory_filename = os.path.join(args.datapath,
data_dirs.get('liaisondata'),
data_dirs.get('reeds_data'),
inputs.get('creation_inventory'))
reeds_inputs.get('creation_inventory'))
foreground_inventory_filename = os.path.join(args.datapath,
data_dirs.get('liaisondata'),
inputs.get('foreground_inventory'))
modification_inventory_filename = os.path.join(args.datapath,
data_dirs.get('liaisondata'),
data_dirs.get('reeds_data'),
inputs.get('modification_inventory'))
reeds_inputs.get('modification_inventory'))
modification_inventory_filename_us = os.path.join(args.datapath,
data_dirs.get('liaisondata'),
data_dirs.get('reeds_data'),
inputs.get('modification_inventory_us'))
reeds_inputs.get('modification_inventory_us'))
ecoinvent_file = os.path.join(args.datapath,
data_dirs.get('ecoinvent_data'))


# ## Fix this
# Fix this
ecoinvent_file = "/Users/tghosh/Library/CloudStorage/OneDrive-NREL/work_NREL/liaison/hipster_hpc_files/ecoinvent/ecoinvent 3.8_cutoff_ecoSpold02/datasets"


Expand All @@ -85,20 +101,17 @@
data_dirs.get('output'))
data_dir = os.path.join(args.datapath,
data_dirs.get('liaisondata'))
## Fix this
creation_inventory_filename = os.path.join("/Users/tghosh/Library/CloudStorage/OneDrive-NREL/work_NREL/liaison/hipster_hpc_files/reeds_to_hipster_dev/reedsdata/",inputs.get('creation_inventory'))


run_database_reader = flags.get('database_reader')
run_database_editor = flags.get('database_editor')

run_database_reader = flags.get('ecoinvent_reader')
uncertainty_corrections = flags.get('correct uncertainty')
mc_foreground_flag = flags.get('mc_foreground')
mc_foreground_flag = options.get('mc_foreground')
lca_flag=flags.get('lca')
lca_activity_modification=flags.get('modify_ecoinvent_grid_mix')
regional_sensitivity_flag=flags.get('regional_sensitivity')
create_new_database=flags.get('create_new_database')
premise_editor= flags.get('premise_editor')
reeds_grid_mix_creator = flags.get('create_reeds_grid_mix')
premise_editor= flags.get('update_ecoinvent_using_premise')
reeds_grid_mix_creator = flags.get('reeds_us_electricity_grid_mix')
region_sensitivity_flag = options.get('region_sensitivity_flag')
edit_ecoinvent_user_controlled = options.get('edit_ecoinvent_user_controlled')

print('All input data parameters read', flush = True)

Expand All @@ -113,24 +126,23 @@
)

#Running database editor for modifying base databases with IMAGE information and future scenario
if run_database_editor:
print('Running db editor', flush = True)
reeds_updater(
initial_year=initial_year,
results_filename=results_filename,
reeds_grid_mix_creator = reeds_grid_mix_creator,
lca_activity_modification=lca_activity_modification,
create_new_database=create_new_database,
data_dir=data_dir,
inventory_filename = creation_inventory_filename,
modification_inventory_filename = modification_inventory_filename,
modification_inventory_filename_us = modification_inventory_filename_us,
premise_editor=premise_editor,
base_database=base_database,
base_project = base_project,
database_new = updated_database,
project_new = updated_project_name,
bw=bw)

print('Running db editor', flush = True)
reeds_updater(
year_of_study=year_of_study,
results_filename=results_filename,
reeds_grid_mix_creator = reeds_grid_mix_creator,
data_dir=data_dir,
inventory_filename = creation_inventory_filename,
modification_inventory_filename = modification_inventory_filename,
modification_inventory_filename_us = modification_inventory_filename_us,
premise_editor=premise_editor,
base_database=base_database,
base_project = base_project,
database_new = updated_database,
project_new = updated_project_name,
bw=bw
)


#Create results directory
Expand All @@ -152,12 +164,12 @@

main_run(lca_project=lca_project_name,
updated_project_name=updated_project_name,
initial_year=initial_year,
year_of_study=year_of_study,
results_filename=results_filename,
mc_foreground_flag=mc_foreground_flag,
lca_flag=lca_flag,
lca_activity_modification=lca_activity_modification,
regional_sensitivity_flag=regional_sensitivity_flag,
region_sensitivity_flag=region_sensitivity_flag,
edit_ecoinvent_user_controlled = edit_ecoinvent_user_controlled,
region=region,
data_dir=data_dir,
primary_process=primary_process_under_study,
Expand Down
114 changes: 114 additions & 0 deletions LiAISON-ReEDS/code/liaison/edit_activity_ecoinvent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
import sys
import pandas as pd




def user_controlled_editing_ecoinvent_activity(process_selected_as_foreground,year_of_study,data_dir):
"""
This function searches for activities and edits the ecoinvent activity as a foreground process in the chosen location
It extracts every flow in the chosen foreground process, creates a dataframe from it and changes the location
It also changes the electricity flow name

Parameters:
----------
dictionary: dictionary
This contains the entire ecoinvent database as a dictionary with key name as processes and locations

Returns:
-------
"""

new_location = "USA"

# These variables are used to create inventory dataframe
process = []
flow = []
value = []
unit = []
input_1 = []
year = []
comments = []
type_1 = []
process_location = []
supplying_location = []
flow_code = []

# These variables are used to create the emissions and process bridge dataframes
common_name = []
ecoinvent_name = []
common_emission_name = []
ecoinvent_emission_name = []
code = []
code_emission = []


#Extracting ecoinvent database for activity and flows and creating a LiAISON friendly dataframe
for key in process_selected_as_foreground.keys():
for exch in process_selected_as_foreground[key].exchanges():
process.append(process_selected_as_foreground[key]['name'])
value.append(exch['amount'])
unit.append(exch['unit'])

#Changing name of electricity flow
name_of_flow = exch['name']
flow.append(name_of_flow)

if exch['type'] == 'production':
input_1.append(False)
type_1.append('production')
supplying_location.append(exch['location'])
flow_code.append(exch['input'][1])


elif exch['type'] =='technosphere':
input_1.append(True)
type_1.append('technosphere')
common_name.append(name_of_flow)
ecoinvent_name.append(name_of_flow)
code.append(exch['input'][1])

#Appending 0 to flow code for technosphere since we want to change the location of these flows.
#They need to be searched in the proper location
flow_code.append(0)
supplying_location.append(new_location)


elif exch['type'] =='biosphere':
input_1.append(False)
type_1.append('biosphere')
supplying_location.append('None')
common_emission_name.append(exch['name'])
ecoinvent_emission_name.append(exch['name'])
code_emission.append(exch['input'][1])
flow_code.append(exch['input'][1])


year.append(year_of_study)
comments.append('None')
process_location.append(new_location)


# Creating of the inventory dataframe
example = pd.DataFrame(columns=['process', 'flow', 'value', 'unit', 'input', 'year', 'comments', 'type',
'process_location', 'supplying_location'])

example['process'] = process
example['flow'] = flow
example['value'] = value
example['unit'] = unit
example['input'] = input_1
example['year'] = year
example['comments'] = comments
example['type'] = type_1
example['process_location'] = process_location
example['supplying_location'] = supplying_location
example['code'] = flow_code

example = example[example['type'] != "technosphere"]
#Sanity check to write the dataframe. Can be deleted later
example.to_csv(data_dir+'example_user_edited_process.csv',index=False)
run_filename = example


return run_filename
12 changes: 3 additions & 9 deletions LiAISON-ReEDS/code/liaison/lci_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def search_dictionary(db,bw):

ei_cf_36_db = bw.Database(db)
database_dict,process_database_dict = search_index_creator(ei_cf_36_db)
return database_dict
return database_dict,process_database_dict

def liaison_calc(db,run_filename,bw):

Expand Down Expand Up @@ -187,14 +187,8 @@ def liaison_calc(db,run_filename,bw):
print('creating inventory withing the database---',db,flush=True)
database_dict,process_database_dict = search_index_creator(ei_cf_36_db)

# Reading from the REEDS output csv files

if type(run_filename) == str:
print('Reading from ' + run_filename,flush = True)
inventory = pd.read_csv(run_filename)
else:
inventory = run_filename
inventory = inventory.sort_values(by=['process','process_location'])

inventory = run_filename.sort_values(by=['process','process_location'])
# Step 1 is to create new processes or datasets
# The new processes and their information should be in the filtered product dataset
processes = inventory[inventory['type'] == 'production']
Expand Down
Loading