diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 00086e1..4c5f0a9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ ci: autoupdate_schedule: monthly repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -12,7 +12,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: "v0.5.6" + rev: "v0.9.4" hooks: # Run the linter. - id: ruff @@ -23,7 +23,7 @@ repos: - repo: https://github.com/nbQA-dev/nbQA - rev: 1.8.5 + rev: 1.9.1 hooks: - id: nbqa-mypy args: ["--ignore-missing-imports"] diff --git a/cesm2/process/create-scepter-clim_ts.ipynb b/cesm2/process/create-scepter-clim_ts.ipynb index 4b8ac62..627e8be 100644 --- a/cesm2/process/create-scepter-clim_ts.ipynb +++ b/cesm2/process/create-scepter-clim_ts.ipynb @@ -101,10 +101,15 @@ "outputs": [], "source": [ "# --- function to generate synthetic runoff and soil moisture data\n", - "def generate_synthetic_data(mean_runoff, mean_soil_moisture, \n", - " peak_day_runoff, peak_day_soil_moisture, \n", - " amplitude_runoff=1, amplitude_soil_moisture=1, \n", - " years=1):\n", + "def generate_synthetic_data(\n", + " mean_runoff,\n", + " mean_soil_moisture,\n", + " peak_day_runoff,\n", + " peak_day_soil_moisture,\n", + " amplitude_runoff=1,\n", + " amplitude_soil_moisture=1,\n", + " years=1,\n", + "):\n", " \"\"\"\n", " Generate synthetic monthly runoff and soil moisture data using sine waves.\n", "\n", @@ -124,35 +129,47 @@ "\n", " # Generate timestamps for each month\n", " dates = pd.date_range(start=\"1/1/2000\", periods=12 * years, freq=\"MS\")\n", - " \n", + "\n", " # Calculate day of year for each month midpoint\n", " day_of_year = dates.day_of_year + (dates.days_in_month / 2 - 0.5)\n", "\n", " # Normalize day of year to radians\n", - " radians_runoff = (2 * np.pi * (day_of_year - peak_day_runoff) / days_per_year)\n", - " radians_soil_moisture = (2 * np.pi * (day_of_year - peak_day_soil_moisture) / days_per_year)\n", + " radians_runoff = 2 * np.pi * (day_of_year - peak_day_runoff) / days_per_year\n", + " radians_soil_moisture = (\n", + " 2 * np.pi * (day_of_year - peak_day_soil_moisture) / days_per_year\n", + " )\n", "\n", " # Calculate sine wave values\n", " runoff = mean_runoff + amplitude_runoff * np.sin(radians_runoff)\n", - " soil_moisture = mean_soil_moisture + amplitude_soil_moisture * np.sin(radians_soil_moisture)\n", - " \n", + " soil_moisture = mean_soil_moisture + amplitude_soil_moisture * np.sin(\n", + " radians_soil_moisture\n", + " )\n", + "\n", " # Create DataFrame\n", - " df = pd.DataFrame({\n", - " \"year\": dates.year,\n", - " \"month\": dates.month,\n", - " \"runoff\": runoff,\n", - " \"soil_moisture\": soil_moisture\n", - " })\n", - " \n", + " df = pd.DataFrame(\n", + " {\n", + " \"year\": dates.year,\n", + " \"month\": dates.month,\n", + " \"runoff\": runoff,\n", + " \"soil_moisture\": soil_moisture,\n", + " }\n", + " )\n", + "\n", " return df\n", "\n", + "\n", "# same as above, but can pass it a 1d array of times as the fraction through the year\n", - "def generate_synthetic_data_v2(mean_runoff, mean_soil_moisture, \n", - " peak_day_runoff, peak_day_soil_moisture, \n", - " amplitude_runoff=1, amplitude_soil_moisture=1, \n", - " time_fraction=None):\n", + "def generate_synthetic_data_v2(\n", + " mean_runoff,\n", + " mean_soil_moisture,\n", + " peak_day_runoff,\n", + " peak_day_soil_moisture,\n", + " amplitude_runoff=1,\n", + " amplitude_soil_moisture=1,\n", + " time_fraction=None,\n", + "):\n", " \"\"\"\n", - " Generate synthetic runoff and soil moisture data using sine waves, \n", + " Generate synthetic runoff and soil moisture data using sine waves,\n", " supporting a 1D array of time as the fraction of the year.\n", "\n", " Parameters:\n", @@ -178,22 +195,27 @@ " day_of_year = (time_fraction % 1) * days_per_year\n", "\n", " # Normalize day of year to radians\n", - " radians_runoff = (2 * np.pi * (day_of_year - peak_day_runoff) / days_per_year)\n", - " radians_soil_moisture = (2 * np.pi * (day_of_year - peak_day_soil_moisture) / days_per_year)\n", + " radians_runoff = 2 * np.pi * (day_of_year - peak_day_runoff) / days_per_year\n", + " radians_soil_moisture = (\n", + " 2 * np.pi * (day_of_year - peak_day_soil_moisture) / days_per_year\n", + " )\n", "\n", " # Calculate sine wave values\n", " runoff = mean_runoff + amplitude_runoff * np.sin(radians_runoff)\n", - " soil_moisture = mean_soil_moisture + amplitude_soil_moisture * np.sin(radians_soil_moisture)\n", + " soil_moisture = mean_soil_moisture + amplitude_soil_moisture * np.sin(\n", + " radians_soil_moisture\n", + " )\n", "\n", " # Create DataFrame\n", - " df = pd.DataFrame({\n", - " \"time_fraction\": time_fraction,\n", - " \"runoff\": runoff,\n", - " \"soil_moisture\": soil_moisture\n", - " })\n", - " \n", - " return df\n", - "\n" + " df = pd.DataFrame(\n", + " {\n", + " \"time_fraction\": time_fraction,\n", + " \"runoff\": runoff,\n", + " \"soil_moisture\": soil_moisture,\n", + " }\n", + " )\n", + "\n", + " return df" ] }, { @@ -227,12 +249,12 @@ "# (must be the same length as len(df))\n", "mean_runoff = [12, 12, 12, 12]\n", "mean_soil_moisture = [165, 165, 165, 165]\n", - "peak_day_runoff = [300,15,200,100]\n", - "peak_day_soil_moisture = [300,15,200,100]\n", - "amplitude_runoff = [11,1,1,11]\n", + "peak_day_runoff = [300, 15, 200, 100]\n", + "peak_day_soil_moisture = [300, 15, 200, 100]\n", + "amplitude_runoff = [11, 1, 1, 11]\n", "amplitude_soil_moisture = [160, 10, 10, 160]\n", "\n", - "# print(df.head())\n" + "# print(df.head())" ] }, { @@ -300,15 +322,24 @@ " # set yr array and save\n", " yrs = np.linspace(yrmin, yrmax, nsteps)\n", " # update arr if synthetic\n", - " if use_synthetic_hydroclim and thisvar.colname_var in ['moisture(mm/m)', 'runoff(mm/month)']:\n", + " if use_synthetic_hydroclim and thisvar.colname_var in [\n", + " \"moisture(mm/m)\",\n", + " \"runoff(mm/month)\",\n", + " ]:\n", " # get climate ts\n", - " dfclim_tmp = generate_synthetic_data_v2(mean_runoff[idx], mean_soil_moisture[idx], \n", - " peak_day_runoff[idx], peak_day_soil_moisture[idx], amplitude_runoff[idx], \n", - " amplitude_soil_moisture[idx], time_fraction=yrs)\n", + " dfclim_tmp = generate_synthetic_data_v2(\n", + " mean_runoff[idx],\n", + " mean_soil_moisture[idx],\n", + " peak_day_runoff[idx],\n", + " peak_day_soil_moisture[idx],\n", + " amplitude_runoff[idx],\n", + " amplitude_soil_moisture[idx],\n", + " time_fraction=yrs,\n", + " )\n", " if thisvar.colname_var == \"moisture(mm/m)\":\n", - " tmp = np.array(dfclim_tmp['soil_moisture'])\n", - " elif thisvar.colname_var == 'runoff(mm/month)':\n", - " tmp = np.array(dfclim_tmp['runoff'])\n", + " tmp = np.array(dfclim_tmp[\"soil_moisture\"])\n", + " elif thisvar.colname_var == \"runoff(mm/month)\":\n", + " tmp = np.array(dfclim_tmp[\"runoff\"])\n", " # name the output directory\n", " if lterm_mean:\n", " yr_string = (\n", @@ -518,9 +549,9 @@ "source": [ "mean_runoff = [12, 12, 12, 12]\n", "mean_soil_moisture = [165, 165, 165, 165]\n", - "peak_day_runoff = [300,15,200,100]\n", - "peak_day_soil_moisture = [300,15,200,100]\n", - "amplitude_runoff = [11,1,1,11]\n", + "peak_day_runoff = [300, 15, 200, 100]\n", + "peak_day_soil_moisture = [300, 15, 200, 100]\n", + "amplitude_runoff = [11, 1, 1, 11]\n", "amplitude_soil_moisture = [160, 10, 10, 160]" ] }, @@ -546,12 +577,18 @@ " max_yr = 1\n", " pvar = \"soil_moisture\"\n", "\n", - " dfclim_tmp = generate_synthetic_data_v2(mean_runoff[idx], mean_soil_moisture[idx], \n", - " peak_day_runoff[idx], peak_day_soil_moisture[idx], amplitude_runoff[idx], \n", - " amplitude_soil_moisture[idx], time_fraction=yrs)\n", + " dfclim_tmp = generate_synthetic_data_v2(\n", + " mean_runoff[idx],\n", + " mean_soil_moisture[idx],\n", + " peak_day_runoff[idx],\n", + " peak_day_soil_moisture[idx],\n", + " amplitude_runoff[idx],\n", + " amplitude_soil_moisture[idx],\n", + " time_fraction=yrs,\n", + " )\n", "\n", - " dfc = dfclim_tmp[dfclim_tmp['time_fraction'] < max_yr]\n", - " plt.plot(dfc['time_fraction'], dfc[pvar], label=idx)\n", + " dfc = dfclim_tmp[dfclim_tmp[\"time_fraction\"] < max_yr]\n", + " plt.plot(dfc[\"time_fraction\"], dfc[pvar], label=idx)\n", " plt.legend()" ] }, diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/dir_notes.txt b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/dir_notes.txt index 6763502..fdfe9d7 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/dir_notes.txt +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/dir_notes.txt @@ -68,4 +68,3 @@ - cc vs gbas - counterfactual cc (~0.4 ton/ha/yr) - same, low upstream emissions for each - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__001/_calc_inputs.res index 1bfa806..ebc83af 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__001/_calc_inputs.res @@ -17,4 +17,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__002/_calc_inputs.res index f099e1d..2935ae8 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__002/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__003/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__003/_calc_inputs.res index 3672e80..ef47c52 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__003/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__003/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__004/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__004/_calc_inputs.res index 3c36dc2..d155e16 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__004/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__004/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__005/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__005/_calc_inputs.res index b7b526e..2f6a283 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__005/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__005/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__006/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__006/_calc_inputs.res index 5d9dce2..de2cd60 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__006/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__006/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__007/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__007/_calc_inputs.res index 2a39bb9..e644470 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__007/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__007/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__008/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__008/_calc_inputs.res index 5d9dce2..de2cd60 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__008/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__008/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__009/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__009/_calc_inputs.res index 5d9dce2..de2cd60 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__009/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__009/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__010/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__010/_calc_inputs.res index 402734b..846d6a3 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__010/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__010/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__011/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__011/_calc_inputs.res index 96f41e4..de623d3 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__011/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__011/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__012/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__012/_calc_inputs.res index f60b3bf..ba7dcca 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__012/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__012/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__multiyear_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__multiyear_001/_calc_inputs.res index 661c99e..7a031e3 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__multiyear_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert__multiyear_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_cdrpot_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_cdrpot_001/_calc_inputs.res index f5c09c3..75b3fd7 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_cdrpot_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_cdrpot_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_multiyear_multiyear_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_multiyear_multiyear_001/_calc_inputs.res index 661c99e..7a031e3 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_multiyear_multiyear_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_hiFert_multiyear_multiyear_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__001/_calc_inputs.res index 893a251..233be02 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__001/_calc_inputs.res @@ -17,4 +17,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__002/_calc_inputs.res index aea2a1e..8324a72 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__002/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__003/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__003/_calc_inputs.res index 41c070b..7f7476f 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__003/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__003/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__004/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__004/_calc_inputs.res index 71a24cf..8b37433 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__004/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__004/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__005/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__005/_calc_inputs.res index e2402de..6406a58 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__005/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__005/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__006/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__006/_calc_inputs.res index f1848ab..c2afc1a 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__006/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__006/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__007/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__007/_calc_inputs.res index 2043169..8c42615 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__007/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__007/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__008/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__008/_calc_inputs.res index f1848ab..c2afc1a 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__008/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__008/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__009/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__009/_calc_inputs.res index f1848ab..c2afc1a 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__009/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__009/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__010/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__010/_calc_inputs.res index 92a6559..f72dee5 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__010/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__010/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__011/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__011/_calc_inputs.res index 483edd6..075dac8 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__011/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__011/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__012/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__012/_calc_inputs.res index e86eeab..245afd3 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__012/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert__012/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_001/_calc_inputs.res index 99dcb57..0a1aa29 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_002/_calc_inputs.res index 99dcb57..0a1aa29 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_002/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_003/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_003/_calc_inputs.res index ee7203f..4c72f14 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_003/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_003/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_004/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_004/_calc_inputs.res index 1717489..ffcc7c0 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_004/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_004/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_005/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_005/_calc_inputs.res index ae9cd07..033ae26 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_005/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_005/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_006/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_006/_calc_inputs.res index a0affdb..b79a7c0 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_006/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_006/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 250.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_007/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_007/_calc_inputs.res index a9410a5..cd73408 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_007/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_007/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_008/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_008/_calc_inputs.res index f3de259..158e955 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_008/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_008/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_009/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_009/_calc_inputs.res index ae9cd07..033ae26 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_009/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_lowFert_cdrpot_009/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 500.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__001/_calc_inputs.res index 71dd4d1..849d551 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__001/_calc_inputs.res @@ -17,4 +17,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__002/_calc_inputs.res index ff0a33d..e6cbad8 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__002/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__multiyear_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__multiyear_001/_calc_inputs.res index 220d5f6..f955752 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__multiyear_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert__multiyear_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert_multiyear_multiyear_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert_multiyear_multiyear_001/_calc_inputs.res index d125ca2..8745a8a 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert_multiyear_multiyear_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/meanAnn_shortRun_noFert_multiyear_multiyear_001/_calc_inputs.res @@ -24,4 +24,3 @@ Emissions (calcite): barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_001/_calc_inputs.res index bdc9d6d..45f7e66 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_001/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_002/_calc_inputs.res index 14b038b..2de96c4 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_002/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 0.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_003/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_003/_calc_inputs.res index f3e6582..52744d1 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_003/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_003/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_004/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_004/_calc_inputs.res index 5d8bf07..6d2e2ef 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_004/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_004/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_005/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_005/_calc_inputs.res index d48e249..696a7fb 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_005/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_005/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_006/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_006/_calc_inputs.res index 9f85648..e833c02 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_006/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_006/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_007/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_007/_calc_inputs.res index 7f28442..36141b7 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_007/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_007/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_008/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_008/_calc_inputs.res index 356272e..0626969 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_008/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_008/_calc_inputs.res @@ -19,4 +19,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_wet_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_wet_001/_calc_inputs.res index 45faeed..0cabc4d 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_wet_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_hiFert_wet_001/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_001/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_001/_calc_inputs.res index b87c69f..08ce68a 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_001/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_001/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_002/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_002/_calc_inputs.res index 7c8d4f6..e4fc3b0 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_002/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_002/_calc_inputs.res @@ -16,4 +16,3 @@ Emissions: barge_km: 200.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_003/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_003/_calc_inputs.res index ea8e151..164a0ae 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_003/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_003/_calc_inputs.res @@ -18,4 +18,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_004/_calc_inputs.res b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_004/_calc_inputs.res index 630f045..3e47400 100644 --- a/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_004/_calc_inputs.res +++ b/scepter/process/runs/batch_postprocResults/cc-sil_psize_apprate/old/meanAnn_shortRun_lowFert_004/_calc_inputs.res @@ -19,4 +19,3 @@ Emissions: barge_km: 100.0 barge_diesel_km: 0 Efactor_org: MRO - diff --git a/scepter/process/runs/generate-overview-plots-noBatchFile.ipynb b/scepter/process/runs/generate-overview-plots-noBatchFile.ipynb index 59614d6..57c8ab3 100644 --- a/scepter/process/runs/generate-overview-plots-noBatchFile.ipynb +++ b/scepter/process/runs/generate-overview-plots-noBatchFile.ipynb @@ -306,7 +306,6 @@ " save_transparent=False,\n", " plot_prefix=\"PROF_\",\n", "):\n", - "\n", " # --- turn off interactive mode\n", " plt.ioff()\n", "\n", diff --git a/scepter/process/runs/generate-overview-plots.ipynb b/scepter/process/runs/generate-overview-plots.ipynb index be53899..50b7d87 100644 --- a/scepter/process/runs/generate-overview-plots.ipynb +++ b/scepter/process/runs/generate-overview-plots.ipynb @@ -680,7 +680,6 @@ " save_transparent=False,\n", " plot_prefix=\"PROF_\",\n", "):\n", - "\n", " # --- turn off interactive mode\n", " plt.ioff()\n", "\n", @@ -1117,8 +1116,9 @@ "runname_in = dfin[\"newrun_id_full\"][0]\n", "dirname = runname_in # + \"_\" + domain_in\n", "results_path = os.path.join(resdir, dirname)\n", - "flx_path, prof_path = os.path.join(results_path, \"flx\"), os.path.join(\n", - " results_path, \"prof\"\n", + "flx_path, prof_path = (\n", + " os.path.join(results_path, \"flx\"),\n", + " os.path.join(results_path, \"prof\"),\n", ")\n", "\n", "# create an output list\n", diff --git a/scepter/process/spinups/generate-overview-plots.ipynb b/scepter/process/spinups/generate-overview-plots.ipynb index 2afaee0..f401d84 100644 --- a/scepter/process/spinups/generate-overview-plots.ipynb +++ b/scepter/process/spinups/generate-overview-plots.ipynb @@ -353,7 +353,6 @@ " save_transparent=False,\n", " plot_prefix=\"PROF_\",\n", "):\n", - "\n", " # --- turn off interactive mode\n", " plt.ioff()\n", "\n", diff --git a/scepter/process/spinups/spinup_basicplot.ipynb b/scepter/process/spinups/spinup_basicplot.ipynb index e07c6e4..1d4429c 100644 --- a/scepter/process/spinups/spinup_basicplot.ipynb +++ b/scepter/process/spinups/spinup_basicplot.ipynb @@ -330,8 +330,9 @@ "# where results are stored\n", "resx = \"/home/tykukla/SCEPTER/scepter_output\"\n", "results_path = os.path.join(resx, thissite)\n", - "flx_path, prof_path = os.path.join(results_path, \"flx\"), os.path.join(\n", - " results_path, \"prof\"\n", + "flx_path, prof_path = (\n", + " os.path.join(results_path, \"flx\"),\n", + " os.path.join(results_path, \"prof\"),\n", ")\n", "\n", "# define file name pattern\n", diff --git a/scepter/setup/batch-setup/batch_helperFxns.py b/scepter/setup/batch-setup/batch_helperFxns.py index e5c9132..45e7bd6 100644 --- a/scepter/setup/batch-setup/batch_helperFxns.py +++ b/scepter/setup/batch-setup/batch_helperFxns.py @@ -1,22 +1,29 @@ # -------------------------------------------- -# +# # helper functions for building the batch # input .csv files -# +# # -------------------------------------------- -import os -import pandas as pd import itertools +import os import warnings +import pandas as pd + ## [1] BUILD DATAFRAME -def build_df(pref: str, const_dict: dict, sites: list, - by_site: dict, all_combinations: dict, add_ctrl: bool) -> pd.DataFrame: +def build_df( + pref: str, + const_dict: dict, + sites: list, + by_site: dict, + all_combinations: dict, + add_ctrl: bool, +) -> pd.DataFrame: """ - Turn dictionaries into a .csv file that serves as a batch input for - running SCEPTER. Vars are either constant for all runs, only vary by - site, or vary from one run to the next (within a site). + Turn dictionaries into a .csv file that serves as a batch input for + running SCEPTER. Vars are either constant for all runs, only vary by + site, or vary from one run to the next (within a site). All dicts should be structured such that the key is the column name, the value is a cell value @@ -25,22 +32,22 @@ def build_df(pref: str, const_dict: dict, sites: list, pref : str prefix for each run name const_dict : dict - dictionary for the values that are held constant + dictionary for the values that are held constant across all simulations sites : list list of the site names across which to run the simulations by_site : dict dictionary for all the values that only vary by site - (each value should be a list of len(sites) where the - first value corresponds to the first site indexed, and + (each value should be a list of len(sites) where the + first value corresponds to the first site indexed, and so on) all_combinations : dict dictionary for all the values to vary such that every - unique combination of these values is tested for each + unique combination of these values is tested for each site add_ctrl : bool - [True | False] whether to add a control simulation with zero - dust application for each site (NOTE, you may not want to add + [True | False] whether to add a control simulation with zero + dust application for each site (NOTE, you may not want to add 0 to your dust app rate list because it will needlessly repeat for every other var in all_combinations) @@ -48,7 +55,7 @@ def build_df(pref: str, const_dict: dict, sites: list, ------- pd.DataFrame This is the file that will become the .csv batch input. Each column - should be a variable that the SCEPTER python scripts can recognize + should be a variable that the SCEPTER python scripts can recognize (no typos!) """ # [1] generate all combinations from the dict's vectors @@ -58,40 +65,45 @@ def build_df(pref: str, const_dict: dict, sites: list, # [2] add site-specific vars # add site labels - df['site'] = [site for site in sites for _ in range(len(all_combos_list))] + df["site"] = [site for site in sites for _ in range(len(all_combos_list))] # for each key in by_site, alternate the values for the corresponding site for key, values in by_site.items(): - df[key] = [values[site_idx] for site_idx in range(len(sites)) for _ in range(len(all_combos_list))] + df[key] = [ + values[site_idx] + for site_idx in range(len(sites)) + for _ in range(len(all_combos_list)) + ] # [3] add constant vars for key, value in const_dict.items(): df[key] = value - + # [4] add control cases (no dust application) if add_ctrl is True if add_ctrl: # loop through sites for thissite in reversed(sites): - tmp_row = df[df['site'] == thissite].iloc[0] + tmp_row = df[df["site"] == thissite].iloc[0] # set dust to zero - tmp_row['dustrate'] = 0.0 + tmp_row["dustrate"] = 0.0 # concat to top row df = pd.concat([pd.DataFrame([tmp_row]), df], ignore_index=True) # [5] add the newrun ID df = newrun_id_fxn(df, pref, None) # check that all the run ids are unique and return a warning if not - if not df['newrun_id'].is_unique: - warnings.warn("Column newrun_id contains duplicate entries. The latter run ID will likely overwrite the former") + if not df["newrun_id"].is_unique: + warnings.warn( + "Column newrun_id contains duplicate entries. The latter run ID will likely overwrite the former" + ) return df - # [2] MAKE NEWRUN ID def newrun_id_fxn(df: pd.DataFrame, pref: str, clim_tag: str) -> pd.DataFrame: """ Generate an ID string for a given run based on the inputs (especially - the application rate, duration, and dust radius -- put the dust type in + the application rate, duration, and dust radius -- put the dust type in the prefix, though note it can get added in SCEPTER py script anyway.) Parameters @@ -103,47 +115,66 @@ def newrun_id_fxn(df: pd.DataFrame, pref: str, clim_tag: str) -> pd.DataFrame: prefix for the run (this will often include the dust type; e.g., 'gbas') it can be None, but that's not recommended clim_tag : str - the tag for the years of climate model output used (e.g., '1950-2020'). + the tag for the years of climate model output used (e.g., '1950-2020'). If no climate model data is used, set this to None - + Returns ------- pd.DataFrame This is the file that will become the .csv batch input. Each column - should be a variable that the SCEPTER python scripts can recognize + should be a variable that the SCEPTER python scripts can recognize (no typos!) """ # create empty column - df['newrun_id'] = None + df["newrun_id"] = None # loop through each row for index, row in df.iterrows(): # assign the dust flux if it exists - dstflx = "app_" + str(row['dustrate']).replace('.', 'p') if 'dustrate' in row else None + dstflx = ( + "app_" + str(row["dustrate"]).replace(".", "p") + if "dustrate" in row + else None + ) # assign the particle size if it exists - psize = "psize_" + str(row['dustrad']).replace('.', 'p') if 'dustrad' in row else None + psize = ( + "psize_" + str(row["dustrad"]).replace(".", "p") + if "dustrad" in row + else None + ) # pull out other vars - site = row['climatefiles'] - dur = "tau_" + str(row['duration']).replace('.', 'p') if 'duration' in row else None + site = row["climatefiles"] + dur = ( + "tau_" + str(row["duration"]).replace(".", "p") + if "duration" in row + else None + ) # (not using dur for now because the python script in SCEPTER repo adds it in itself) # create the new ID - this_id = '_'.join([s for s in [pref, site, clim_tag, dstflx, psize] if s is not None]) - + this_id = "_".join( + [s for s in [pref, site, clim_tag, dstflx, psize] if s is not None] + ) + # add it to the pandas df - df.at[index, 'newrun_id'] = this_id - + df.at[index, "newrun_id"] = this_id + # return result return df - + # [3] SAVE DATAFRAME AS CSV -def save_df(df: pd.DataFrame, savepath_batch: str, fn: str, - multi_run_split: bool, max_iters_per_set: int=None): +def save_df( + df: pd.DataFrame, + savepath_batch: str, + fn: str, + multi_run_split: bool, + max_iters_per_set: int = None, +): """ Save the pandas DataFrame as a .csv file to use for batch inputs for SCEPTER. If you're filename already exists, the function will - warn you and ask if you want to proceed with overwriting it. + warn you and ask if you want to proceed with overwriting it. Parameters ---------- @@ -158,9 +189,9 @@ def save_df(df: pd.DataFrame, savepath_batch: str, fn: str, multi_run_split : bool T/F; whether or not to split the df into multiple CSV files max_iters_per_set : int - if multi_run_split is true, then how many iters should we allow per + if multi_run_split is true, then how many iters should we allow per csv file (default is None, assuming multi_run_split is False) - + Returns ------- @@ -177,7 +208,7 @@ def save_df(df: pd.DataFrame, savepath_batch: str, fn: str, for i, df_chunk in enumerate(dfs): tot_dfs = len(dfs) savefn_nosuff = fn.rstrip(".csv") - savefn_out = f"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv" + savefn_out = f"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv" # print(os.path.join(savepath_batch, savefn_out)) savepath = os.path.join(savepath_batch, savefn_out) # save and prevent accidentally overwriting another file @@ -188,14 +219,14 @@ def save_df(df: pd.DataFrame, savepath_batch: str, fn: str, else: savepath_all = os.path.join(savepath_batch, fn) prevent_accidental_overwrite(df, savepath_all, fn) - + # FUNCTION check if a file already exists def prevent_accidental_overwrite(df: pd.DataFrame, path_fn: str, fn: str): """ - This function is used within the save_df function to check that we're + This function is used within the save_df function to check that we're not going to accidentally overwrite another file upon save. If we are, - we ask the user to give permission to proceed. + we ask the user to give permission to proceed. Parameters ---------- @@ -209,14 +240,20 @@ def prevent_accidental_overwrite(df: pd.DataFrame, path_fn: str, fn: str): just the name of the file (ex: "file.csv") - Returns + Returns ------- """ # check if the file already exists at this path if os.path.exists(path_fn): # ask the user whether they want to continue / overwrite the file - response = input(f"A file with this name already exists ({fn}). Do you want to overwrite it? (Y/N): ").strip().upper() + response = ( + input( + f"A file with this name already exists ({fn}). Do you want to overwrite it? (Y/N): " + ) + .strip() + .upper() + ) if response == "N": warnings.warn("File save canceled to prevent overwrite") return @@ -227,6 +264,6 @@ def prevent_accidental_overwrite(df: pd.DataFrame, path_fn: str, fn: str): df.to_csv(path_fn, index=False) print("Thanks for your response, the file was saved.") # if it doesn't exist, allow the save to happen - else: + else: df.to_csv(path_fn, index=False) - print("File successfully saved") \ No newline at end of file + print("File successfully saved") diff --git a/scepter/setup/batch-setup/make-dustflx-csv.py b/scepter/setup/batch-setup/make-dustflx-csv.py index 52ac527..45bbb4a 100644 --- a/scepter/setup/batch-setup/make-dustflx-csv.py +++ b/scepter/setup/batch-setup/make-dustflx-csv.py @@ -1,14 +1,14 @@ # %% # ------------------------------------------------------- -# -# Script to make a csv for dust flux data over time -# -# Note that as of 10/20/2024 it only works with the +# +# Script to make a csv for dust flux data over time +# +# Note that as of 10/20/2024 it only works with the # SCEPTER/rock_buff_dust-ts_multiyear.py script -# +# # ------------------------------------------------------- import os -import numpy as np + import pandas as pd # %% @@ -18,50 +18,50 @@ savename = "cc_15yr_1app_no2nd_001.csv" # *************************************************************** -# %% +# %% # --- DEFINE TIME STEPS # [1] the total amount of time to split up into sub-runs -max_time = 15 # [years] the end of the batch simulation +max_time = 15 # [years] the end of the batch simulation # [2] list of the years where a new sub-run starts (values # cannot exceed max_time) -start_times = [0, 1] # [years] +start_times = [0, 1] # [years] # --- DEFINE DUST APPLICATION # (note, lists must have same length as start_times) -# [1] define the dust species applied at each timestep -dustsp = ['cc', 'cc'] -# [2] define the dust rates -- note these will override -# the default value from the default dict (or from +# [1] define the dust species applied at each timestep +dustsp = ["cc", "cc"] +# [2] define the dust rates -- note these will override +# the default value from the default dict (or from # the batch .csv) unless the entry is non-numeric ( # (suggest to make those values 'defer' so it's clear # we're deferring to the default) -dustrate = ['defer', 0] -# [3] define dust radius +dustrate = ["defer", 0] +# [3] define dust radius # as above, 'defer' or other non-numeric means the default # entry will be selected -dustrad = ['defer', 'defer'] +dustrad = ["defer", "defer"] # [4] define second dust species -dustsp_2nd = [] # leaving it empty means we use the default (though saying 'defer' should work too) -dustrate_2nd = [] # leaving it empty means we use the defualt (though saying 'defer' should work too) +dustsp_2nd = [] # leaving it empty means we use the default (though saying 'defer' should work too) +dustrate_2nd = [] # leaving it empty means we use the defualt (though saying 'defer' should work too) -# %% -# --- calculate timestep durations +# %% +# --- calculate timestep durations timestep_dur = [] # Loop through the start_times and calculate the difference for i in range(len(start_times)): if i < len(start_times) - 1: # calculate time difference to the next start_time - interval = start_times[i+1] - start_times[i] + interval = start_times[i + 1] - start_times[i] else: # calculate time difference to max_time for the last start_time interval = max_time - start_times[i] - + # Append the interval to the list timestep_dur.append(interval) -# %% -# --- BRING TOGETHER +# %% +# --- BRING TOGETHER # [1] bring lists into a dictionary list_dict = { "yr_start": start_times, @@ -70,9 +70,9 @@ "dustrate": dustrate, "dustrad": dustrad, "dustsp_2nd": dustsp_2nd, - "dustrate_2nd": dustrate_2nd + "dustrate_2nd": dustrate_2nd, } -# [2] remove empty lists from the dictionary +# [2] remove empty lists from the dictionary filtered_data = {key: value for key, value in list_dict.items() if value} # [3] create pd.Dataframe df = pd.DataFrame(filtered_data) diff --git a/scepter/setup/batch-setup/make_batch_input_basalt_fixedRate_series.ipynb b/scepter/setup/batch-setup/make_batch_input_basalt_fixedRate_series.ipynb index c84c49e..bbc4166 100644 --- a/scepter/setup/batch-setup/make_batch_input_basalt_fixedRate_series.ipynb +++ b/scepter/setup/batch-setup/make_batch_input_basalt_fixedRate_series.ipynb @@ -921,7 +921,7 @@ " for i, df_chunk in enumerate(outdfs):\n", " tot_dfs = len(outdfs)\n", " savefn_nosuff = savefn.rstrip(\".csv\")\n", - " savefn_out = f\"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv\"\n", + " savefn_out = f\"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv\"\n", " print(os.path.join(savepath_batch, savefn_out))\n", " df_chunk.to_csv(os.path.join(savepath_batch, savefn_out), index=False)\n", " # also save total\n", diff --git a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate.py b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate.py index 7b14838..0e95402 100644 --- a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate.py +++ b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate.py @@ -1,35 +1,33 @@ # %% # --------------------------------------------------- -# +# # Generate batch input .csv files for SCEPTER run -# -# provide var vectors and assume we want every +# +# provide var vectors and assume we want every # combination of them, or by site -# +# # T Kukla (CarbonPlan, 2024) -# +# # --------------------------------------------------- -import os -import numpy as np -import pandas as pd -import itertools import batch_helperFxns as bhf -# %% +# %% # --- USER INPUTS # [1] vars to update, constant for all runs -fertLevel = "hi" # name for how much fertilizer is applied -dustsp = "cc" # name for dust species to apply (must be from accepted list) +fertLevel = "hi" # name for how much fertilizer is applied +dustsp = "cc" # name for dust species to apply (must be from accepted list) extra_tag = "AWS-TEST" # another distinguishing tag pref = f"{fertLevel}Fert_{dustsp}_{extra_tag}" -clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used - # (if clim files are not used, set to None) +clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used +# (if clim files are not used, set to None) # save vars file_prefix = f"meanAnn_{dustsp}_shortRun_{extra_tag}_{fertLevel}Fert_gs+apprate" # prefix of output run names fn = file_prefix + "_v0.csv" savepath_batch = "/home/tykukla/aglime-swap-cdr/scepter/batch-inputs" -multi_run_split = False # whether to split the csv into multiple files -max_iters_per_set = 20 # [int] the number of runs per csv (only used if multi_run_split is True) +multi_run_split = False # whether to split the csv into multiple files +max_iters_per_set = ( + 20 # [int] the number of runs per csv (only used if multi_run_split is True) +) const_dict = { "duration": 15, # duration of run (starts from earliest year) @@ -42,35 +40,45 @@ "include_psd_full": False, "include_psd_bulk": False, "climatedir": "NA", - # --- compute specific - 'aws_save': "move", # ["move", "copy", None] whether to "move" file to aws, just copy it, or nothing at all - 'aws_bucket': "s3://carbonplan-carbon-removal/SCEPTER/scepter_output_scratch/", # where to save at AWS (only used if 'aws_save'=True) + "aws_save": "move", # ["move", "copy", None] whether to "move" file to aws, just copy it, or nothing at all + "aws_bucket": "s3://carbonplan-carbon-removal/SCEPTER/scepter_output_scratch/", # where to save at AWS (only used if 'aws_save'=True) } -# %% +# %% # [2] vars to vary by site -sites = ['site_311a', 'site_311b'] -by_site = { # values must have same order as 'sites' var +sites = ["site_311a", "site_311b"] +by_site = { # values must have same order as 'sites' var "cec": [21.10329, 6.96125], "spinrun": ["site_311a_pr9_spintuneup4", "site_311b_pr9_spintuneup4"], - "climatefiles": ["site_311a", "site_311b"] # these serve as the site name when there is no cliamte file to use + "climatefiles": [ + "site_311a", + "site_311b", + ], # these serve as the site name when there is no cliamte file to use } -# %% +# %% # [3] vars to vary within site (we'll get every combination of these two) -dustrate_ton_ha_yr = [0.3, 3, 30] # [0.3, 0.6, 1, 2, 5, 7, 10, 15, 25, 35, 45, 60, 100] +dustrate_ton_ha_yr = [ + 0.3, + 3, + 30, +] # [0.3, 0.6, 1, 2, 5, 7, 10, 15, 25, 35, 45, 60, 100] all_combinations = { - "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] - "dustrad": [10, 100, 1000] # [1, 10, 30, 50, 75, 100, 125, 150, 200] # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) + "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] + "dustrad": [ + 10, + 100, + 1000, + ], # [1, 10, 30, 50, 75, 100, 125, 150, 200] # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) } -# %% +# %% # --- BUILD DATAFRAME AND SAVE df = bhf.build_df(pref, const_dict, sites, by_site, all_combinations, add_ctrl=True) df -# %% -# save +# %% +# save bhf.save_df(df, savepath_batch, fn, multi_run_split, max_iters_per_set) # %% diff --git a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_cdrPot.py b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_cdrPot.py index 9a46a8d..bab8b8f 100644 --- a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_cdrPot.py +++ b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_cdrPot.py @@ -1,25 +1,22 @@ # %% # --------------------------------------------------- -# +# # Generate batch input .csv files for SCEPTER run -# -# provide var vectors and assume we want every +# +# provide var vectors and assume we want every # combination of them, or by site -# +# # T Kukla (CarbonPlan, 2024) -# +# # --------------------------------------------------- -import os -import numpy as np -import pandas as pd -import itertools -import batch_helperFxns as bhf +import batch_helperFxns as bhf +import numpy as np # --------------------------------------------------- -# This script is meant to generate application rates for gbas -# and cc that yield the same CDR potential for the different feedstocks. -# I include notes on calculating the CDR potential for each here. +# This script is meant to generate application rates for gbas +# and cc that yield the same CDR potential for the different feedstocks. +# I include notes on calculating the CDR potential for each here. # # potential constants relative to cations: # CO2:Ca2+ mass --> 2.196 # 44.009x2 g CO2/mol over 40.078 g Ca/mol @@ -32,15 +29,15 @@ # CO2:MgO mass --> 2.184 # 44.009x2 g CO2/mol over 40.304 g MgO/mol # CO2:Na2O mass --> 1.420 # 44.009x2 g CO2/mol over 61.979 g Na2O/mol # (CO2 mwt is multiplied by 2 to account for 2 moles of Na in the oxide) # CO2:K2O mass --> 0.934 # 44.009x2 g CO2/mol over 94.195 g K2O/mol # (CO2 mwt is multiplied by 2 to account for 2 moles of K in the oxide) -# -# +# +# # cdr potential, calcite: # molar mass of calcite: 100.0869 # CO2 from Ca2+ --> 44.01 # 40.078 * 2.196 / 2 (accounting for half efficiency due to calcite-carbon) # ---- # Total --> 44.01 -# CDR potential --> [[ 0.440 ]] # total g CO2 / g cations over molar mass -# +# CDR potential --> [[ 0.440 ]] # total g CO2 / g cations over molar mass +# # cdr potential, basalt: # molar mass of basalt: 120.496 # CO2 from CaO --> 21.470 # 13.675 g CaO/mol basalt * 1.570 g CO2 / g CaO @@ -48,98 +45,115 @@ # CO2 from Na2O --> 3.560 # 2.507 g Na2O/mol basalt * 1.420 g CO2 / g Na2O # CO2 from K2O --> 0.371 # 0.397 g K2O/mol basalt * 0.934 g CO2 / g K2O # ---- -# Total --> 49.357 -# CDR potential --> [[ 0.410 ]] # total g CO2 / g cations over molar mass -# +# Total --> 49.357 +# CDR potential --> [[ 0.410 ]] # total g CO2 / g cations over molar mass +# # ******************************************************* -# Notes on the basalt cation oxide molar masses (from -# comments in cflx_proc.py) : -# - # Note: gbas molar mass depends on whether Dmod_bas_cmp is defined in the makefile! - # if the makefile has `CPFLAGS += -Dmod_basalt_cmp`, then scepter.f90 - # uses basalt_defines.h by default (see scepter.f90 lines ~81, 82). Otherwise, - # another basalt composition (hard-coded in scepter.f90) is used. - # - # This flag was set in my makefile, so I use the molar mass we compute from - # basalt_defines.h. - # - # --- molar mass is calculated following line ~358 in scepter.f90: - # (note the terms divided by 2 account for the fact that the cation oxide - # has 2 moles of the cation, not one) - # mwtgbas = (fr_si_gbas*mwtamsi + fr_al_gbas/2*mwtal2o3 + fr_na_gbas/2*mwtna2o - # + fr_k_gbas/2*mwtk2o + fr_ca_gbas*mwtcao + fr_mg_gbas*mwtmgo - # + fr_fe2_gbas*mwtfe2o) - # where mwt* is molar mass of species, and fr_*_gbas is the fraction from basalt_defines.h - # - # si: 1.0 * 60.085 = 60.085 - # al: 0.4683117231/2 * 101.962 = 23.875 - # na: 0.08088553318/2 * 61.979 = 2.5066 - # k: 0.008431137573/2 * 94.195 = 0.3971 - # ca: 0.2438545566 * 56.079 = 13.675 - # mg: 0.2721686927 * 40.304 = 10.969 - # fe2:0.1251095225 * 71.846 = 8.9886 - # - # SUM --------------------------> = 120.496 +# Notes on the basalt cation oxide molar masses (from +# comments in cflx_proc.py) : +# +# Note: gbas molar mass depends on whether Dmod_bas_cmp is defined in the makefile! +# if the makefile has `CPFLAGS += -Dmod_basalt_cmp`, then scepter.f90 +# uses basalt_defines.h by default (see scepter.f90 lines ~81, 82). Otherwise, +# another basalt composition (hard-coded in scepter.f90) is used. +# +# This flag was set in my makefile, so I use the molar mass we compute from +# basalt_defines.h. +# +# --- molar mass is calculated following line ~358 in scepter.f90: +# (note the terms divided by 2 account for the fact that the cation oxide +# has 2 moles of the cation, not one) +# mwtgbas = (fr_si_gbas*mwtamsi + fr_al_gbas/2*mwtal2o3 + fr_na_gbas/2*mwtna2o +# + fr_k_gbas/2*mwtk2o + fr_ca_gbas*mwtcao + fr_mg_gbas*mwtmgo +# + fr_fe2_gbas*mwtfe2o) +# where mwt* is molar mass of species, and fr_*_gbas is the fraction from basalt_defines.h +# +# si: 1.0 * 60.085 = 60.085 +# al: 0.4683117231/2 * 101.962 = 23.875 +# na: 0.08088553318/2 * 61.979 = 2.5066 +# k: 0.008431137573/2 * 94.195 = 0.3971 +# ca: 0.2438545566 * 56.079 = 13.675 +# mg: 0.2721686927 * 40.304 = 10.969 +# fe2:0.1251095225 * 71.846 = 8.9886 +# +# SUM --------------------------> = 120.496 -# %% +# %% # --- SET FEEDSTOCK POTENTIALS -fs_pots = { # [kg CO2 / kg rock] - "gbas": 0.41, - "cc": 0.44, +fs_pots = { # [kg CO2 / kg rock] + "gbas": 0.41, + "cc": 0.44, } -# %% +# %% # --- USER INPUTS # [1] vars to update, constant for all runs -fertLevel = "low" # name for how much fertilizer is applied -dustsp = "cc" # name for dust species to apply (must be from accepted list) +fertLevel = "low" # name for how much fertilizer is applied +dustsp = "cc" # name for dust species to apply (must be from accepted list) pref = f"{fertLevel}Fert_{dustsp}_cdrpot" -clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used - # (if clim files are not used, set to None) +clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used +# (if clim files are not used, set to None) # save vars file_prefix = f"meanAnn_{dustsp}_shortRun_cdrpot_{fertLevel}Fert_gs+apprate" # prefix of output run names fn = file_prefix + "_v0.csv" savepath_batch = "/home/tykukla/aglime-swap-cdr/scepter/batch-inputs" -multi_run_split = False # whether to split the csv into multiple files -max_iters_per_set = 20 # [int] the number of runs per csv (only used if multi_run_split is True) +multi_run_split = False # whether to split the csv into multiple files +max_iters_per_set = ( + 20 # [int] the number of runs per csv (only used if multi_run_split is True) +) const_dict = { "duration": 15, # duration of run (starts from earliest year) "dustsp": dustsp, "dustsp_2nd": "amnt", - "dustrate_2nd": 6.0, # 30.0, + "dustrate_2nd": 6.0, # 30.0, "add_secondary": False, "imix": 1, "singlerun_seasonality": False, "include_psd_full": False, "include_psd_bulk": False, - "climatedir": "NA" + "climatedir": "NA", } -# %% +# %% # [2] vars to vary by site -sites = ['site_311a', 'site_311b'] -by_site = { # values must have same order as 'sites' var +sites = ["site_311a", "site_311b"] +by_site = { # values must have same order as 'sites' var "cec": [21.10329, 6.96125], "spinrun": ["site_311a_pr9_spintuneup4", "site_311b_pr9_spintuneup4"], - "climatefiles": ["site_311a", "site_311b"] # these serve as the site name when there is no cliamte file to use + "climatefiles": [ + "site_311a", + "site_311b", + ], # these serve as the site name when there is no cliamte file to use } -# %% +# %% # [3] vars to vary within site (we'll get every combination of these two) dustrate_ton_ha_yr_CO2pot = [0.1, 0.3, 0.5, 1, 2.5, 4, 5.5, 7, 11, 17, 22, 30, 50] -dustrate_ton_ha_yr = [np.round((1 / fs_pots[dustsp]) * x, 4) for x in dustrate_ton_ha_yr_CO2pot] +dustrate_ton_ha_yr = [ + np.round((1 / fs_pots[dustsp]) * x, 4) for x in dustrate_ton_ha_yr_CO2pot +] all_combinations = { - "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] - "dustrad": [1, 10, 30, 50, 75, 100, 125, 150, 200] # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) + "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] + "dustrad": [ + 1, + 10, + 30, + 50, + 75, + 100, + 125, + 150, + 200, + ], # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) } -# %% +# %% # --- BUILD DATAFRAME AND SAVE df = bhf.build_df(pref, const_dict, sites, by_site, all_combinations, add_ctrl=True) df -# %% -# save +# %% +# save bhf.save_df(df, savepath_batch, fn, multi_run_split, max_iters_per_set) # %% diff --git a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_multiyear.py b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_multiyear.py index 1fc991e..4121190 100644 --- a/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_multiyear.py +++ b/scepter/setup/batch-setup/make_batch_input_grainsize+apprate_multiyear.py @@ -1,43 +1,41 @@ # %% # --------------------------------------------------- -# +# # Generate batch input .csv files for SCEPTER run # using the rock_buff_dust_ts-multiyear.py script -# -# provide var vectors and assume we want every +# +# provide var vectors and assume we want every # combination of them, or by site -# +# # T Kukla (CarbonPlan, 2024) -# +# # --------------------------------------------------- -import os -import numpy as np -import pandas as pd -import itertools import batch_helperFxns as bhf -# %% +# %% # --- USER INPUTS # [1] vars to update, constant for all runs -fertLevel = "hi" # name for how much fertilizer is applied -dustsp = "cc" # name for dust species to apply (must be from accepted list) +fertLevel = "hi" # name for how much fertilizer is applied +dustsp = "cc" # name for dust species to apply (must be from accepted list) extra_tag = "multiyear" # generally "multiyear" or "" (latter if not multiyear) pref = f"{fertLevel}Fert_{dustsp}_{extra_tag}" -clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used - # (if clim files are not used, set to None) +clim_tag = None # [string] year-span (e.g., 1950-2020) for clim input if climate files are used +# (if clim files are not used, set to None) # save vars file_prefix = f"meanAnn_{dustsp}_shortRun_{extra_tag}_{fertLevel}Fert_gs+apprate" # prefix of output run names fn = file_prefix + "_v0.csv" savepath_batch = "/home/tykukla/aglime-swap-cdr/scepter/batch-inputs" -multi_run_split = False # whether to split the csv into multiple files -max_iters_per_set = 20 # [int] the number of runs per csv (only used if multi_run_split is True) +multi_run_split = False # whether to split the csv into multiple files +max_iters_per_set = ( + 20 # [int] the number of runs per csv (only used if multi_run_split is True) +) const_dict = { "duration": 15, # duration of run (starts from earliest year) # -- dust timeseries inputs "dust_ts_dir": "/home/tykukla/aglime-swap-cdr/scepter/dust-inputs", "dust_ts_fn": f"{dustsp}_15yr_1app_no2nd_001.csv", - # -- + # -- "dustsp": dustsp, "dustsp_2nd": "amnt", "dustrate_2nd": 30.0, @@ -46,35 +44,47 @@ "singlerun_seasonality": False, "include_psd_full": False, "include_psd_bulk": False, - "climatedir": "NA" + "climatedir": "NA", } -# %% +# %% # [2] vars to vary by site -sites = ['site_311a'] # ['site_311a', 'site_311b'] -by_site = { # values must have same order as 'sites' var +sites = ["site_311a"] # ['site_311a', 'site_311b'] +by_site = { # values must have same order as 'sites' var "cec": [21.10329, 6.96125], "spinrun": ["site_311a_pr9_spintuneup4", "site_311b_pr9_spintuneup4"], - "climatefiles": ["site_311a", "site_311b"] # these serve as the site name when there is no cliamte file to use + "climatefiles": [ + "site_311a", + "site_311b", + ], # these serve as the site name when there is no cliamte file to use } -# %% +# %% # [3] vars to vary within site (we'll get every combination of these two) dustrate_ton_ha_yr = [0.3, 0.6, 1, 2, 5, 7, 10, 15, 25, 35, 45, 60, 100] all_combinations = { - "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] - "dustrad": [1, 10, 30, 50, 75, 100, 125, 150, 200] # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) + "dustrate": [x * 100 for x in dustrate_ton_ha_yr], # [ton ha-1 yr-1 * 100 = g m-2] + "dustrad": [ + 1, + 10, + 30, + 50, + 75, + 100, + 125, + 150, + 200, + ], # [diameter, microns] i think this gets applied to gbas and amnt equally (though amnt is fast-reacting so maybe not a big deal? ) } -# %% +# %% # --- BUILD DATAFRAME AND SAVE df = bhf.build_df(pref, const_dict, sites, by_site, all_combinations, add_ctrl=True) df - # %% -# save +# save bhf.save_df(df, savepath_batch, fn, multi_run_split, max_iters_per_set) # %% diff --git a/scepter/setup/batch-setup/make_batch_input_meanAnnCao_fixedRate_series.ipynb b/scepter/setup/batch-setup/make_batch_input_meanAnnCao_fixedRate_series.ipynb index aa30dab..71e55ee 100644 --- a/scepter/setup/batch-setup/make_batch_input_meanAnnCao_fixedRate_series.ipynb +++ b/scepter/setup/batch-setup/make_batch_input_meanAnnCao_fixedRate_series.ipynb @@ -485,7 +485,7 @@ " for i, df_chunk in enumerate(outdfs):\n", " tot_dfs = len(outdfs)\n", " savefn_nosuff = savefn.rstrip(\".csv\")\n", - " savefn_out = f\"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv\"\n", + " savefn_out = f\"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv\"\n", " print(os.path.join(savepath_batch, savefn_out))\n", " df_chunk.to_csv(os.path.join(savepath_batch, savefn_out), index=False)\n", " # also save total\n", diff --git a/scepter/setup/batch-setup/make_batch_input_meanAnnbasalt_fixedRate_series.ipynb b/scepter/setup/batch-setup/make_batch_input_meanAnnbasalt_fixedRate_series.ipynb index 4d73e13..80b6031 100644 --- a/scepter/setup/batch-setup/make_batch_input_meanAnnbasalt_fixedRate_series.ipynb +++ b/scepter/setup/batch-setup/make_batch_input_meanAnnbasalt_fixedRate_series.ipynb @@ -485,7 +485,7 @@ " for i, df_chunk in enumerate(outdfs):\n", " tot_dfs = len(outdfs)\n", " savefn_nosuff = savefn.rstrip(\".csv\")\n", - " savefn_out = f\"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv\"\n", + " savefn_out = f\"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv\"\n", " print(os.path.join(savepath_batch, savefn_out))\n", " df_chunk.to_csv(os.path.join(savepath_batch, savefn_out), index=False)\n", " # also save total\n", diff --git a/scepter/setup/batch-setup/make_batch_input_meanAnndolomite_fixedRate_series.ipynb b/scepter/setup/batch-setup/make_batch_input_meanAnndolomite_fixedRate_series.ipynb index 6e863db..557a3d9 100644 --- a/scepter/setup/batch-setup/make_batch_input_meanAnndolomite_fixedRate_series.ipynb +++ b/scepter/setup/batch-setup/make_batch_input_meanAnndolomite_fixedRate_series.ipynb @@ -485,7 +485,7 @@ " for i, df_chunk in enumerate(outdfs):\n", " tot_dfs = len(outdfs)\n", " savefn_nosuff = savefn.rstrip(\".csv\")\n", - " savefn_out = f\"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv\"\n", + " savefn_out = f\"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv\"\n", " print(os.path.join(savepath_batch, savefn_out))\n", " df_chunk.to_csv(os.path.join(savepath_batch, savefn_out), index=False)\n", " # also save total\n", diff --git a/scepter/setup/batch-setup/make_batch_input_meanAnnliming_fixedRate_series.ipynb b/scepter/setup/batch-setup/make_batch_input_meanAnnliming_fixedRate_series.ipynb index a5a9363..951d803 100644 --- a/scepter/setup/batch-setup/make_batch_input_meanAnnliming_fixedRate_series.ipynb +++ b/scepter/setup/batch-setup/make_batch_input_meanAnnliming_fixedRate_series.ipynb @@ -485,7 +485,7 @@ " for i, df_chunk in enumerate(outdfs):\n", " tot_dfs = len(outdfs)\n", " savefn_nosuff = savefn.rstrip(\".csv\")\n", - " savefn_out = f\"{savefn_nosuff}_set{i+1}of{tot_dfs}.csv\"\n", + " savefn_out = f\"{savefn_nosuff}_set{i + 1}of{tot_dfs}.csv\"\n", " print(os.path.join(savepath_batch, savefn_out))\n", " df_chunk.to_csv(os.path.join(savepath_batch, savefn_out), index=False)\n", " # also save total\n", diff --git a/scepter/setup/build_composite_multiyear.py b/scepter/setup/build_composite_multiyear.py index 9f5da90..53a925c 100644 --- a/scepter/setup/build_composite_multiyear.py +++ b/scepter/setup/build_composite_multiyear.py @@ -66,8 +66,8 @@ def build_composite(basename, outdir): # ... first get all the outdir paths # (these will be passed to the function in the main script!) # outdir = "/home/tykukla/SCEPTER/scepter_output" - # -- we're having an issue where a basename like - # mydir_10 returns "mydir_10_startyear*" but it also + # -- we're having an issue where a basename like + # mydir_10 returns "mydir_10_startyear*" but it also # returns "mydir_100_startyear*". So we add an under- # score to prevent this but only for finding alldirs runname_base_underscore = basename + "_" @@ -204,16 +204,16 @@ def build_composite(basename, outdir): # check if file exists savedst_tmp = os.path.join(dst_main_flx, fn) filecheck = os.path.isfile(savedst_tmp) - if filecheck: # then read in the existing df and append the new one + if filecheck: # then read in the existing df and append the new one # read the existing file into a dataframe existing_df = preprocess_txt(savedst_tmp) # join the existing and source dfs together by column # (join='outer' means a column that only exists in df2 will be kept, with nans in df1) # and axis=0 means it's concatenated row-wise, not column-wise) - new_df = pd.concat([existing_df, dfsrc], axis=0, join='outer') + new_df = pd.concat([existing_df, dfsrc], axis=0, join="outer") new_df.to_csv( # default is mode='w' which will overwrite the existing file (that's fine because we've merged it with the new data) savedst_tmp, index=None, sep="\t" - ) + ) else: # if the file doesn't exist, save and include the header dfsrc.to_csv( savedst_tmp, index=None, sep="\t", mode="a" @@ -222,6 +222,7 @@ def build_composite(basename, outdir): # return the new output dirs return dst_main_field, dst_main_lab + # # save result or append if one exists # if filecheck: # dfsrc.to_csv(dst, header=None, index=None, sep='\t', mode='a') diff --git a/scepter/setup/cflx_proc.py b/scepter/setup/cflx_proc.py index 94bdf67..fc691d6 100644 --- a/scepter/setup/cflx_proc.py +++ b/scepter/setup/cflx_proc.py @@ -1,81 +1,78 @@ # ------------------------------------------------ -# +# # Functions to calculate / synthesize CDR-relevant -# fluxes for single SCEPTER simulations. -# -# In order to make the functions usable for a wide -# range of SCEPTER run files, fluxes are NOT -# compared to a control scenario. Only absolute +# fluxes for single SCEPTER simulations. +# +# In order to make the functions usable for a wide +# range of SCEPTER run files, fluxes are NOT +# compared to a control scenario. Only absolute # fluxes for the single run are considered. -# +# # These functions are called within the SCEPTER -# run .py scripts, creating a `postproc_*` dir +# run .py scripts, creating a `postproc_*` dir # in the run output directory. -# +# # ------------------------------------------------ # %% -import math import os import re -import glob -import fnmatch -# from typing import Union, Literal -from scipy.integrate import cumulative_trapezoid import numpy as np import pandas as pd import xarray as xr -import pickle -# %% +# from typing import Union, Literal +from scipy.integrate import cumulative_trapezoid + +# %% # --- Dict for molar masses of solid species (from Kanzaki et al., 2022; table 1) molar_mass_dict = { - 'amsi': 60.085, - 'qtz': 60.085, - 'gb': 78.004, - 'gt': 88.854, - 'hm': 159.692, - 'gps': 172.168, - 'arg': 100.089, - 'cc': 100.089, - 'dlm': 184.403, - 'ab': 262.225, - 'kfs': 278.33, - 'an': 278.311, - 'fo': 140.694, - 'fa': 203.778, - 'en': 100.389, - 'fer': 131.931, - 'dp': 216.553, - 'hb': 248.09, - 'tm': 812.374, - 'antp': 780.976, - 'mscv': 398.311, - 'plgp': 417.262, - 'ct': 277.113, - 'ka': 258.162, - 'anl': 220.155, - 'nph': 142.055, - 'nabd': 367.609, - 'kbd': 372.978, - 'cabd': 366.625, - 'mgbd': 363.996, - 'ill': 383.90, - 'g1': 30, - 'g2': 30, - 'g3': 30, - 'amnt': 80.043, # Kanzaki et al., 2023, table 3 + "amsi": 60.085, + "qtz": 60.085, + "gb": 78.004, + "gt": 88.854, + "hm": 159.692, + "gps": 172.168, + "arg": 100.089, + "cc": 100.089, + "dlm": 184.403, + "ab": 262.225, + "kfs": 278.33, + "an": 278.311, + "fo": 140.694, + "fa": 203.778, + "en": 100.389, + "fer": 131.931, + "dp": 216.553, + "hb": 248.09, + "tm": 812.374, + "antp": 780.976, + "mscv": 398.311, + "plgp": 417.262, + "ct": 277.113, + "ka": 258.162, + "anl": 220.155, + "nph": 142.055, + "nabd": 367.609, + "kbd": 372.978, + "cabd": 366.625, + "mgbd": 363.996, + "ill": 383.90, + "g1": 30, + "g2": 30, + "g3": 30, + "amnt": 80.043, # Kanzaki et al., 2023, table 3 # --- see treatment of gbas below (not in table 1 of Kanzaki et al., 2022) - 'gbas': 120.496 - # Note: gbas molar mass depends on whether Dmod_bas_cmp is defined in the makefile! + "gbas": 120.496, + # Note: gbas molar mass depends on whether Dmod_bas_cmp is defined in the makefile! # if the makefile has `CPFLAGS += -Dmod_basalt_cmp`, then scepter.f90 - # uses basalt_defines.h by default (see scepter.f90 lines ~81, 82). Otherwise, - # another basalt composition (hard-coded in scepter.f90) is used. - # - # This flag was set in my makefile, so I use the molar mass we compute from - # basalt_defines.h. - # - # --- molar mass is calculated following line ~358 in scepter.f90: + # uses basalt_defines.h by default (see scepter.f90 lines ~81, 82). Otherwise, + # another basalt composition (hard-coded in scepter.f90) is used. + # + # This flag was set in my makefile, so I use the molar mass we compute from + # basalt_defines.h. + # + # --- molar mass is calculated following line ~358 in scepter.f90: # (note the terms divided by 2 account for the fact that the cation oxide # has 2 moles of the cation, not one) # mwtgbas = (fr_si_gbas*mwtamsi + fr_al_gbas/2*mwtal2o3 + fr_na_gbas/2*mwtna2o @@ -90,7 +87,7 @@ # ca: 0.2438545566 * 56.079 = 13.675 # mg: 0.2721686927 * 40.304 = 10.969 # fe2:0.1251095225 * 71.846 = 8.9886 - # + # # SUM --------------------------> = 120.496 } @@ -102,7 +99,7 @@ def preprocess_txt( runname: str, fn: str, run_subdir: str = "flx", - map_numeric: bool = True + map_numeric: bool = True, ) -> pd.DataFrame: """ Convert SCEPTER output .txt files to pandas DataFrame for further @@ -118,9 +115,9 @@ def preprocess_txt( name of the file in the flux directory to input run_subdir : str name of the subdirectory in the run output directory to find the file (either "prof" or "flux") - default is "flx". + default is "flx". map_numeric : bool - if True, all columns are mapped to numeric values. This should only be false for dust.txt which has + if True, all columns are mapped to numeric values. This should only be false for dust.txt which has columns for the dust species Returns @@ -128,7 +125,7 @@ def preprocess_txt( pd.DataFrame same format as the SCEPTER .txt file """ - + data = [] # Initialize a list to store the processed data # Initialize a flag to determine if we are reading the header @@ -138,16 +135,16 @@ def preprocess_txt( file_path = os.path.join(outdir, runname, run_subdir, fn) # Read the file line by line and process the data - with open(file_path, 'r') as file: + with open(file_path, "r") as file: for line in file: line = line.strip() # Remove leading/trailing whitespace if is_header: # Split the first line into column names - column_names = re.split(r'\s+', line) + column_names = re.split(r"\s+", line) is_header = False else: # Split the other lines into data values - values = re.split(r'\s+', line) + values = re.split(r"\s+", line) data.append(values) # Create a DataFrame with the processed data and set column names @@ -158,19 +155,20 @@ def preprocess_txt( # return return df + def get_data( outdir: str, runname: str, var_fn: str, cdvar: str, run_subdir: str = "flx", - get_int: bool = True + get_int: bool = True, ) -> pd.DataFrame: """ Get the SCEPTER data from defined text files. both timeseries and integrated flux files (e.g., - and int_-) are brought in. Both pandas dataframes are returned. - + Parameters ---------- @@ -196,17 +194,18 @@ def get_data( integrated timeseries of the carbon flux metric """ # get the txt file as a pandas dataframe - fn = f"{var_fn}-{cdvar}.txt" # flux timeseries - df = preprocess_txt(outdir, runname, fn, run_subdir = run_subdir) + fn = f"{var_fn}-{cdvar}.txt" # flux timeseries + df = preprocess_txt(outdir, runname, fn, run_subdir=run_subdir) if get_int: - fn_int = f"int_{fn}" # integrated flux timeseries - dfint = preprocess_txt(outdir, runname, fn_int, run_subdir = run_subdir) + fn_int = f"int_{fn}" # integrated flux timeseries + dfint = preprocess_txt(outdir, runname, fn_int, run_subdir=run_subdir) # return result if get_int: return df, dfint else: return df, _ + def get_data_prof( outdir: str, runname: str, @@ -214,8 +213,8 @@ def get_data_prof( time_index: int, run_subdir: str = "prof", ) -> pd.DataFrame: - ''' - Return data from a SCEPTER/scepter_output/prof/*.txt file + """ + Return data from a SCEPTER/scepter_output/prof/*.txt file in pandas dataframe format. Parameters @@ -226,23 +225,23 @@ def get_data_prof( name of the SCEPTER run (equivalent to the directory within outdir). Generally _field. var_prefix : str prefix of the file to read in the 'prof' subdirectory. Includes all characters up to `*-xxx.txt` - where xxx is a 3 digit numeric from 001 to 020. - time_index : int + where xxx is a 3 digit numeric from 001 to 020. + time_index : int [1:20] single integer between 1 and 20 denoting a timestep that was output as a profile. There is one profile file per timestep. run_subdir : str name of the subdirectory that holds the profile files ("prof" by default) - - Returns + + Returns ------- pd.DataFrame profile values defined over depth and time. - ''' + """ # generate file path - fn = f"{var_prefix}-{time_index:03d}.txt" # create filename - infile = os.path.join(outdir, runname, run_subdir, fn) # paste filename to path + fn = f"{var_prefix}-{time_index:03d}.txt" # create filename + infile = os.path.join(outdir, runname, run_subdir, fn) # paste filename to path # read in data - returnme = preprocess_txt(outdir, runname, fn, run_subdir = run_subdir) + returnme = preprocess_txt(outdir, runname, fn, run_subdir=run_subdir) # rename z to depth returnme = returnme.rename(columns={"z": "depth"}) @@ -261,29 +260,29 @@ def co2_flx( organic_sp_list: list = ["g1", "g2", "g3"], inorganic_sp_list: list = ["arg", "cc", "dlm"], convert_units: bool = True, - co2_g_mol: float=44.01, # molar_mass_dict not used bc the input file should be in mol co2 / m2 / yr + co2_g_mol: float = 44.01, # molar_mass_dict not used bc the input file should be in mol co2 / m2 / yr ) -> pd.DataFrame: """ - Get the CO2 diffusive and advective flux over time. Uses the *flx_gas* - files and pco2 variable by default. files are in mol/m2/yr and output + Get the CO2 diffusive and advective flux over time. Uses the *flx_gas* + files and pco2 variable by default. files are in mol/m2/yr and output is in g/m2/yr if convert_units=False; ton/ha/yr if convert_units=True. - - You can use *flx_co2sp* files and DIC variable as well but it does - not include aqueous complexation between CO2 species and some cations, - so there can be some difference from the flx_gas results. - + + You can use *flx_co2sp* files and DIC variable as well but it does + not include aqueous complexation between CO2 species and some cations, + so there can be some difference from the flx_gas results. + int_* fluxes are multiplied by time so the output is the time-integral. ************************************************************** --- derivation notes --- the flux balance can be written as: adv = -dif - [sources] - tflx - where positive values indicate net advection out. - + where positive values indicate net advection out. + if our only sources are respiration and cc dissolution, we get: adv = -dif - resp - cc - tflx so the advection flux without the inorganic contribution would simply be - adv_noinorg = -dif - resp - tflx + adv_noinorg = -dif - resp - tflx [or] adv_noinorg = adv + cc ************************************************************** @@ -298,14 +297,14 @@ def co2_flx( cdvar : str name of the variable used for CDR metric. Should align with var_fn such that format is "[basename]-[cdvar].txt" organic_sp_list : dict - dict where keys are the IDs for all possible organic species (required for - resp calculation) and values are molar masses in g/mol - (required to go from mol/m2/yr to mass/m2/yr) (for keys and values, see Kanzaki + dict where keys are the IDs for all possible organic species (required for + resp calculation) and values are molar masses in g/mol + (required to go from mol/m2/yr to mass/m2/yr) (for keys and values, see Kanzaki et al., 2022; table 1) inorganic_sp_list : dict - dict where keys are the IDs for all carbon-bearing minerals - and values are their molar masses in g/mol (required to go - from mol/m2/yr to mass/m2/yr) (for keys and values, see Kanzaki + dict where keys are the IDs for all carbon-bearing minerals + and values are their molar masses in g/mol (required to go + from mol/m2/yr to mass/m2/yr) (for keys and values, see Kanzaki et al., 2022; table 1) convert_units : bool if True, convert mol/m2/yr to ton/ha/yr, if False convert mol/m2/yr to g/m2/yr @@ -320,12 +319,12 @@ def co2_flx( # ***************************************** # define unit conversion constants # for g/m2/yr to ton/ha/yr - ton_g = 1 / 1e6 # [ton g-1] - m2_ha = 10e3 # [m2 ha-1] - conv_factor = ton_g * m2_ha + ton_g = 1 / 1e6 # [ton g-1] + m2_ha = 10e3 # [m2 ha-1] + conv_factor = ton_g * m2_ha # ***************************************** - # get the txt file as a pandas dataframe + # get the txt file as a pandas dataframe df, dfint = get_data(outdir, runname, var_fn, cdvar) # find how many organic species are present @@ -336,61 +335,87 @@ def co2_flx( inorg_sp_int = list(set(inorganic_sp_list) & set(dfint.columns)) # pull out the relevant components - tdf = df.loc[:, ['time', 'dif', 'tflx', 'adv']].rename(columns={'time': 'time', - 'dif': 'co2flx_dif', 'tflx': 'co2flx_tflx', - 'adv': 'co2flx_adv'}) - tdfint = dfint.loc[:, ['time', 'dif', 'tflx', 'adv']].rename(columns={'time': 'time', - 'dif': 'co2flx_dif', 'tflx': 'co2flx_tflx', - 'adv': 'co2flx_adv'}) - - + tdf = df.loc[:, ["time", "dif", "tflx", "adv"]].rename( + columns={ + "time": "time", + "dif": "co2flx_dif", + "tflx": "co2flx_tflx", + "adv": "co2flx_adv", + } + ) + tdfint = dfint.loc[:, ["time", "dif", "tflx", "adv"]].rename( + columns={ + "time": "time", + "dif": "co2flx_dif", + "tflx": "co2flx_tflx", + "adv": "co2flx_adv", + } + ) + # --- organic and inorganic contributions # compute resp component and add to tdf(int) if len(org_sp) > 0: # if no org species are present, skip for sp in org_sp: tdf[sp] = df[sp] - tdf['co2flx_resp'] = df[org_sp].sum(axis=1) # add inorg fluxes together + tdf["co2flx_resp"] = df[org_sp].sum(axis=1) # add inorg fluxes together if len(org_sp_int) > 0: # if no org species are present, skip for sp in org_sp_int: tdfint[sp] = dfint[sp] - tdfint['co2flx_resp'] = dfint[org_sp_int].sum(axis=1) # add inorg fluxes together - + tdfint["co2flx_resp"] = dfint[org_sp_int].sum( + axis=1 + ) # add inorg fluxes together + # compute inorg component and add to tdf(int) if len(inorg_sp) > 0: # if no org species are present, skip for sp in inorg_sp: tdf[sp] = df[sp] - tdf['co2flx_inorg'] = df[inorg_sp].sum(axis=1) # add inorg fluxes together - tdf['co2flx_adv_noinorg'] = tdf['co2flx_adv'] + tdf['co2flx_inorg'] + tdf["co2flx_inorg"] = df[inorg_sp].sum(axis=1) # add inorg fluxes together + tdf["co2flx_adv_noinorg"] = tdf["co2flx_adv"] + tdf["co2flx_inorg"] if len(inorg_sp_int) > 0: # if no org species are present, skip for sp in inorg_sp_int: tdfint[sp] = dfint[sp] - tdfint['co2flx_inorg'] = dfint[inorg_sp_int].sum(axis=1) # add inorg fluxes together - tdfint['co2flx_adv_noinorg'] = tdfint['co2flx_adv'] + tdfint['co2flx_inorg'] - # --- convert units + tdfint["co2flx_inorg"] = dfint[inorg_sp_int].sum( + axis=1 + ) # add inorg fluxes together + tdfint["co2flx_adv_noinorg"] = tdfint["co2flx_adv"] + tdfint["co2flx_inorg"] + # --- convert units if convert_units: # convert mol/m2/yr to ton/ha/yr - tdfint = tdfint.apply(lambda x: x * co2_g_mol * conv_factor if x.name not in ['time', 'units'] else x) - tdf = tdf.apply(lambda x: x * co2_g_mol * conv_factor if x.name not in ['time', 'units'] else x) + tdfint = tdfint.apply( + lambda x: x * co2_g_mol * conv_factor + if x.name not in ["time", "units"] + else x + ) + tdf = tdf.apply( + lambda x: x * co2_g_mol * conv_factor + if x.name not in ["time", "units"] + else x + ) # add units columns - tdfint['units'] = 'ton ha-1' - tdf['units'] = 'ton ha-1 yr-1' - else: # convert mol/m2/yr to g/m2/yr - tdfint = tdfint.apply(lambda x: x * co2_g_mol if x.name not in ['time', 'units'] else x) - tdf = tdf.apply(lambda x: x * co2_g_mol if x.name not in ['time', 'units'] else x) + tdfint["units"] = "ton ha-1" + tdf["units"] = "ton ha-1 yr-1" + else: # convert mol/m2/yr to g/m2/yr + tdfint = tdfint.apply( + lambda x: x * co2_g_mol if x.name not in ["time", "units"] else x + ) + tdf = tdf.apply( + lambda x: x * co2_g_mol if x.name not in ["time", "units"] else x + ) # add units columns - tdfint['units'] = 'g m-2' - tdf['units'] = 'g m-2 yr-1' - + tdfint["units"] = "g m-2" + tdf["units"] = "g m-2 yr-1" + # multiply tdfint columns by time (required to output time-integrated fluxes) - tdfint = tdfint.apply(lambda x: x * tdfint['time'] if x.name not in ['time', 'units'] else x) + tdfint = tdfint.apply( + lambda x: x * tdfint["time"] if x.name not in ["time", "units"] else x + ) - # --- tidy up output - tdfint['flx_type'] = "int_flx" - tdf['flx_type'] = "flx" - tdfint['runname'] = tdf['runname'] = runname - tdfint['var'] = tdf['var'] = cdvar - - # combine + tdfint["flx_type"] = "int_flx" + tdf["flx_type"] = "flx" + tdfint["runname"] = tdf["runname"] = runname + tdfint["var"] = tdf["var"] = cdvar + + # combine outdf = pd.concat([tdf, tdfint], axis=0, ignore_index=True) return outdf @@ -401,13 +426,13 @@ def sld_flx( feedstock: str, var_fn: str = "flx_sld", dust_from_file: bool = True, - molar_mass_dict: dict = molar_mass_dict + molar_mass_dict: dict = molar_mass_dict, ) -> pd.DataFrame: """ Get the feedstock fluxes. Uses the *flx_sld* files for the feedstock. Only - time-integrated fluxes are returned. integrated dust application can be - computed from the dust.txt file if "dust_from_file"=True (this is - required for re-application runs which are composites of multiple + time-integrated fluxes are returned. integrated dust application can be + computed from the dust.txt file if "dust_from_file"=True (this is + required for re-application runs which are composites of multiple individual runs) @@ -422,13 +447,13 @@ def sld_flx( var_fn : str base name of the SCEPTER flx file. Format is "[basename]-[cdvar].txt" dust_from_file : bool - if True, then compute integrated dust fluxes from the dust.txt file. Should be - set to True for re-application runs since the int_sld* files are patched + if True, then compute integrated dust fluxes from the dust.txt file. Should be + set to True for re-application runs since the int_sld* files are patched together from multiple 1-year runs, so they don't reflect the true time integral (just the timeSTEP integral). This ensures that the integral is correct molar_mass_dict : dict dictionary where keys are species IDs and values are molar masses in [g mol-1] - taken from Kanzaki et al., 2022 GMD; Table 1. + taken from Kanzaki et al., 2022 GMD; Table 1. Returns ------- @@ -438,107 +463,131 @@ def sld_flx( # ***************************************** # define unit conversion constants # for g/m2/yr to ton/ha/yr - ton_g = 1 / 1e6 # [ton g-1] - m2_ha = 10e3 # [m2 ha-1] - conv_factor = ton_g * m2_ha + ton_g = 1 / 1e6 # [ton g-1] + m2_ha = 10e3 # [m2 ha-1] + conv_factor = ton_g * m2_ha # ***************************************** - # get the txt file as a pandas dataframe + # get the txt file as a pandas dataframe df, dfint = get_data(outdir, runname, var_fn, cdvar=feedstock) # multiply tdfint columns by time (required to output time-integrated fluxes) - dfint = dfint.apply(lambda x: x * dfint['time'] if x.name != 'time' else x) - + dfint = dfint.apply(lambda x: x * dfint["time"] if x.name != "time" else x) + # add dust if needed - # [note] no unit conversion from mol/m2/yr to g/m2/yr is needed if - # dust_from_file because that is calculated based on application in g/m2/yr - if dust_from_file: - dfdust0 = preprocess_txt(outdir, runname, fn='dust.txt', - run_subdir = "flx", map_numeric = False) + # [note] no unit conversion from mol/m2/yr to g/m2/yr is needed if + # dust_from_file because that is calculated based on application in g/m2/yr + if dust_from_file: + dfdust0 = preprocess_txt( + outdir, runname, fn="dust.txt", run_subdir="flx", map_numeric=False + ) # map numeric columns to numerics - dfnum = dfdust0.drop(columns=['dustsp1', 'dustsp2']).map(pd.to_numeric) + dfnum = dfdust0.drop(columns=["dustsp1", "dustsp2"]).map(pd.to_numeric) # add other columns back dfdust = dfnum.copy() - dfdust['dustsp1'] = dfdust0['dustsp1'].copy() - dfdust['dustsp2'] = dfdust0['dustsp2'].copy() - + dfdust["dustsp1"] = dfdust0["dustsp1"].copy() + dfdust["dustsp2"] = dfdust0["dustsp2"].copy() + # --- identify the feedstock index - columns_with_fs = [col for col in dfdust.columns if (dfdust[col] == feedstock).any()] + columns_with_fs = [ + col for col in dfdust.columns if (dfdust[col] == feedstock).any() + ] if len(columns_with_fs) > 0: - fscol = columns_with_fs[0] # should only return one column, so we take the 0 index + fscol = columns_with_fs[ + 0 + ] # should only return one column, so we take the 0 index else: fscol = "not found" if fscol == "dustsp1": - dust_dx = '1' + dust_dx = "1" elif fscol == "dustsp2": - dust_dx = '2' - else: # assume it's the first index if it doesn't exist - dust_dx = '1' + dust_dx = "2" + else: # assume it's the first index if it doesn't exist + dust_dx = "1" # re-calculate integral because right now it's integrated by timestep, not # by the entire run itself - dfdust['int_dust_g_m2_yr'] = cumulative_trapezoid(dfdust[f'dust{dust_dx}_g_m2_yr'], dfdust['time'], initial=0) - # add dust data - if len(dfdust) != len(df): # if mis-matched, then match them + dfdust["int_dust_g_m2_yr"] = cumulative_trapezoid( + dfdust[f"dust{dust_dx}_g_m2_yr"], dfdust["time"], initial=0 + ) + # add dust data + if len(dfdust) != len(df): # if mis-matched, then match them # drop duplicates in the 'time' column, keeping first occurrence - dfdust_nodup = dfdust.drop_duplicates(subset='time', keep='first') - # then interpolate the data + dfdust_nodup = dfdust.drop_duplicates(subset="time", keep="first") + # then interpolate the data # merge the DataFrames on 'time' using outer join to keep all time points - df_merged = pd.merge(df, dfdust_nodup, on='time', how='outer', suffixes=('', '_orig')) - # interpolate the columns of interest - df_merged['int_dust_g_m2_yr'] = df_merged['int_dust_g_m2_yr'].interpolate() + df_merged = pd.merge( + df, dfdust_nodup, on="time", how="outer", suffixes=("", "_orig") + ) + # interpolate the columns of interest + df_merged["int_dust_g_m2_yr"] = df_merged["int_dust_g_m2_yr"].interpolate() # keep only the points in the df time steps - intdust = df_merged[df_merged['time'].isin(df['time'])]['int_dust_g_m2_yr'].values / 100 # divide by 100 to convert g/m2/yr to ton/ha/yr + intdust = ( + df_merged[df_merged["time"].isin(df["time"])]["int_dust_g_m2_yr"].values + / 100 + ) # divide by 100 to convert g/m2/yr to ton/ha/yr else: - intdust = dfdust['int_dust_g_m2_yr'].values / 100 # divide by 100 to convert g/m2/yr to ton/ha/yr + intdust = ( + dfdust["int_dust_g_m2_yr"].values / 100 + ) # divide by 100 to convert g/m2/yr to ton/ha/yr # add integrated dust - dfint['int_dust_ton_ha_yr'] = intdust + dfint["int_dust_ton_ha_yr"] = intdust else: # use rain dust (already multiplied by time, so it's time-integrated) # multiply by -1 to get positive values into the soil column # multiply by molar_mass_dict[feedstock] to get mol/m2/yr in g/m2/yr # multiply by conv_factor go get g/m2/yr to ton/ha/yr - dfint['int_dust_ton_ha_yr'] = -1 * dfint['rain'] * molar_mass_dict[feedstock] * conv_factor # (note, we multiplied rain by time earlier so we don't have to do it here) + dfint["int_dust_ton_ha_yr"] = ( + -1 * dfint["rain"] * molar_mass_dict[feedstock] * conv_factor + ) # (note, we multiplied rain by time earlier so we don't have to do it here) - # convert other variables to ton/ha/yr - dfint['adv'] = dfint['adv'] * molar_mass_dict[feedstock] * conv_factor + # convert other variables to ton/ha/yr + dfint["adv"] = dfint["adv"] * molar_mass_dict[feedstock] * conv_factor dfint[feedstock] = dfint[feedstock] * molar_mass_dict[feedstock] * conv_factor # pull out just the columns we want - tdfint = dfint.loc[:, ['time', 'int_dust_ton_ha_yr', 'adv', feedstock]] + tdfint = dfint.loc[:, ["time", "int_dust_ton_ha_yr", "adv", feedstock]] # compute dissolution - tdfint['dust_minus_adv'] = tdfint['int_dust_ton_ha_yr'] - tdfint['adv'] # dust that's left after solid advection - tdfint['total_dissolution'] = tdfint[feedstock] # net dissolution - tdfint['fraction_sld_advected'] = tdfint['adv'] / tdfint['int_dust_ton_ha_yr'] - tdfint['fraction_sld_remaining'] = (tdfint['int_dust_ton_ha_yr'] - tdfint['adv'] - tdfint[feedstock]) / tdfint['int_dust_ton_ha_yr'] + tdfint["dust_minus_adv"] = ( + tdfint["int_dust_ton_ha_yr"] - tdfint["adv"] + ) # dust that's left after solid advection + tdfint["total_dissolution"] = tdfint[feedstock] # net dissolution + tdfint["fraction_sld_advected"] = tdfint["adv"] / tdfint["int_dust_ton_ha_yr"] + tdfint["fraction_sld_remaining"] = ( + tdfint["int_dust_ton_ha_yr"] - tdfint["adv"] - tdfint[feedstock] + ) / tdfint["int_dust_ton_ha_yr"] # fraction of non-advected rock that gets dissolved - tdfint['fraction_remaining_dissolved'] = tdfint['total_dissolution'] / tdfint['dust_minus_adv'] + tdfint["fraction_remaining_dissolved"] = ( + tdfint["total_dissolution"] / tdfint["dust_minus_adv"] + ) # fraction of total applied rock that gets dissolved - tdfint['fraction_total_dissolved'] = tdfint['total_dissolution'] / tdfint['int_dust_ton_ha_yr'] + tdfint["fraction_total_dissolved"] = ( + tdfint["total_dissolution"] / tdfint["int_dust_ton_ha_yr"] + ) # --- return result return tdfint - - + + def carbAlk_adv( outdir: str, runname: str, var_fn: str = "flx_co2sp", cdvar: str = "ALK", convert_units: bool = True, - co2potential_g_mol_sil: float=88.02, # potential grams of CO2 per mole of alkalinity assuming 2:1 DIC_CO2:ALK - co2potential_g_mol_cc: float=44.01, # potential grams of CO2 per mole of alkalinity assuming 1:1 DIC_CO2:ALK + co2potential_g_mol_sil: float = 88.02, # potential grams of CO2 per mole of alkalinity assuming 2:1 DIC_CO2:ALK + co2potential_g_mol_cc: float = 44.01, # potential grams of CO2 per mole of alkalinity assuming 1:1 DIC_CO2:ALK ) -> pd.DataFrame: """ - Get the advective and storage fluxes of carbonate alkalinity over time. - Uses the *flx_co2sp* files and ALK variable by default. files are in + Get the advective and storage fluxes of carbonate alkalinity over time. + Uses the *flx_co2sp* files and ALK variable by default. files are in mol/m2/yr and output is same or in [mass]/m2/yr for co2potential. - - carbonate alkalinity is defined as: ALK = [HCO3]- + 2[CO3]--. - + + carbonate alkalinity is defined as: ALK = [HCO3]- + 2[CO3]--. + int_* fluxes are multiplied by time so the output is the time-integral. - Note, diffusive flux is generally negligibly small and source / sink - contributions are not computed in the input files so they're ignored here. + Note, diffusive flux is generally negligibly small and source / sink + contributions are not computed in the input files so they're ignored here. Parameters ---------- @@ -553,12 +602,12 @@ def carbAlk_adv( convert_units : bool if True, convert mol/m2/yr to ton/ha/yr, if False convert mol/m2/yr to g/m2/yr co2potential_g_mol_sil : float - molar mass of CO2 [g mol-1] used to convert mol/m2/yr to g/m2/yr. This refers to the potential - CO2 sequestration by a silicate feedstock where you could get two moles of DIC from CO2 per mole + molar mass of CO2 [g mol-1] used to convert mol/m2/yr to g/m2/yr. This refers to the potential + CO2 sequestration by a silicate feedstock where you could get two moles of DIC from CO2 per mole alkalinity co2potential_g_mol_cc : float - molar mass of CO2 [g mol-1] used to convert mol/m2/yr to g/m2/yr. This refers to the potential - CO2 sequestration by a carbonate feedstock where you could get one mole of DIC from CO2 per mole + molar mass of CO2 [g mol-1] used to convert mol/m2/yr to g/m2/yr. This refers to the potential + CO2 sequestration by a carbonate feedstock where you could get one mole of DIC from CO2 per mole alkalinity Returns @@ -569,63 +618,78 @@ def carbAlk_adv( # ***************************************** # define unit conversion constants # for g/m2/yr to ton/ha/yr - ton_g = 1 / 1e6 # [ton g-1] - m2_ha = 10e3 # [m2 ha-1] - conv_factor = ton_g * m2_ha + ton_g = 1 / 1e6 # [ton g-1] + m2_ha = 10e3 # [m2 ha-1] + conv_factor = ton_g * m2_ha # ***************************************** - # get the txt file as a pandas dataframe + # get the txt file as a pandas dataframe df, dfint = get_data(outdir, runname, var_fn, cdvar) # pull out the relevant components - tdf = df.loc[:, ['time', 'tflx', 'adv']].rename(columns={'time': 'time', - 'tflx': 'calkflx_tflx', - 'adv': 'calkflx_adv'}) - tdfint = dfint.loc[:, ['time', 'tflx', 'adv']].rename(columns={'time': 'time', - 'tflx': 'calkflx_tflx', - 'adv': 'calkflx_adv'}) + tdf = df.loc[:, ["time", "tflx", "adv"]].rename( + columns={"time": "time", "tflx": "calkflx_tflx", "adv": "calkflx_adv"} + ) + tdfint = dfint.loc[:, ["time", "tflx", "adv"]].rename( + columns={"time": "time", "tflx": "calkflx_tflx", "adv": "calkflx_adv"} + ) # get total carbonate alkalinity flux (add the storage back in) - tdf['calkflx_tot'] = tdf['calkflx_adv'] + tdf['calkflx_tflx'] - tdfint['calkflx_tot'] = tdfint['calkflx_adv'] + tdfint['calkflx_tflx'] + tdf["calkflx_tot"] = tdf["calkflx_adv"] + tdf["calkflx_tflx"] + tdfint["calkflx_tot"] = tdfint["calkflx_adv"] + tdfint["calkflx_tflx"] # compute potential flux if convert_units: # get co2 potentials in ton/ha/yr # advective flx - tdf['co2pot_adv_tonHaYr_sil'] = tdf['calkflx_adv'] * co2potential_g_mol_sil * conv_factor - tdf['co2pot_adv_tonHaYr_cc'] = tdf['calkflx_adv'] * co2potential_g_mol_cc * conv_factor - tdfint['co2pot_adv_tonHa_sil'] = tdfint['calkflx_adv'] * co2potential_g_mol_sil * conv_factor - tdfint['co2pot_adv_tonHa_cc'] = tdfint['calkflx_adv'] * co2potential_g_mol_cc * conv_factor + tdf["co2pot_adv_tonHaYr_sil"] = ( + tdf["calkflx_adv"] * co2potential_g_mol_sil * conv_factor + ) + tdf["co2pot_adv_tonHaYr_cc"] = ( + tdf["calkflx_adv"] * co2potential_g_mol_cc * conv_factor + ) + tdfint["co2pot_adv_tonHa_sil"] = ( + tdfint["calkflx_adv"] * co2potential_g_mol_sil * conv_factor + ) + tdfint["co2pot_adv_tonHa_cc"] = ( + tdfint["calkflx_adv"] * co2potential_g_mol_cc * conv_factor + ) # advective + storage flx - tdf['co2pot_tot_tonHaYr_sil'] = tdf['calkflx_tot'] * co2potential_g_mol_sil * conv_factor - tdf['co2pot_tot_tonHaYr_cc'] = tdf['calkflx_tot'] * co2potential_g_mol_cc * conv_factor - tdfint['co2pot_tot_tonHa_sil'] = tdfint['calkflx_tot'] * co2potential_g_mol_sil * conv_factor - tdfint['co2pot_tot_tonHa_cc'] = tdfint['calkflx_tot'] * co2potential_g_mol_cc * conv_factor + tdf["co2pot_tot_tonHaYr_sil"] = ( + tdf["calkflx_tot"] * co2potential_g_mol_sil * conv_factor + ) + tdf["co2pot_tot_tonHaYr_cc"] = ( + tdf["calkflx_tot"] * co2potential_g_mol_cc * conv_factor + ) + tdfint["co2pot_tot_tonHa_sil"] = ( + tdfint["calkflx_tot"] * co2potential_g_mol_sil * conv_factor + ) + tdfint["co2pot_tot_tonHa_cc"] = ( + tdfint["calkflx_tot"] * co2potential_g_mol_cc * conv_factor + ) - else: # get co2 potentials in g/m2/yr + else: # get co2 potentials in g/m2/yr # advective flx - tdf['co2pot_adv_gm2Yr_sil'] = tdf['calkflx_adv'] * co2potential_g_mol_sil - tdf['co2pot_adv_gm2Yr_cc'] = tdf['calkflx_adv'] * co2potential_g_mol_cc - tdfint['co2pot_adv_gm2_sil'] = tdfint['calkflx_adv'] * co2potential_g_mol_sil - tdfint['co2pot_adv_gm2_cc'] = tdfint['calkflx_adv'] * co2potential_g_mol_cc + tdf["co2pot_adv_gm2Yr_sil"] = tdf["calkflx_adv"] * co2potential_g_mol_sil + tdf["co2pot_adv_gm2Yr_cc"] = tdf["calkflx_adv"] * co2potential_g_mol_cc + tdfint["co2pot_adv_gm2_sil"] = tdfint["calkflx_adv"] * co2potential_g_mol_sil + tdfint["co2pot_adv_gm2_cc"] = tdfint["calkflx_adv"] * co2potential_g_mol_cc # advective + storage flx - tdf['co2pot_tot_gm2Yr_sil'] = tdf['calkflx_tot'] * co2potential_g_mol_sil - tdf['co2pot_tot_gm2Yr_cc'] = tdf['calkflx_tot'] * co2potential_g_mol_cc - tdfint['co2pot_tot_gm2_sil'] = tdfint['calkflx_tot'] * co2potential_g_mol_sil - tdfint['co2pot_tot_gm2_cc'] = tdfint['calkflx_tot'] * co2potential_g_mol_cc + tdf["co2pot_tot_gm2Yr_sil"] = tdf["calkflx_tot"] * co2potential_g_mol_sil + tdf["co2pot_tot_gm2Yr_cc"] = tdf["calkflx_tot"] * co2potential_g_mol_cc + tdfint["co2pot_tot_gm2_sil"] = tdfint["calkflx_tot"] * co2potential_g_mol_sil + tdfint["co2pot_tot_gm2_cc"] = tdfint["calkflx_tot"] * co2potential_g_mol_cc # multiply tdfint columns by time (required to output time-integrated fluxes) - tdfint = tdfint.apply(lambda x: x * tdfint['time'] if x.name not in ['time'] else x) + tdfint = tdfint.apply(lambda x: x * tdfint["time"] if x.name not in ["time"] else x) - # --- tidy up output - tdf['units'] = 'mol m-2 yr' - tdfint['units'] = 'mol m-2' - tdfint['flx_type'] = "int_flx" - tdf['flx_type'] = "flx" - tdfint['runname'] = tdf['runname'] = runname - tdfint['var'] = tdf['var'] = cdvar - - # combine + tdf["units"] = "mol m-2 yr" + tdfint["units"] = "mol m-2" + tdfint["flx_type"] = "int_flx" + tdf["flx_type"] = "flx" + tdfint["runname"] = tdf["runname"] = runname + tdfint["var"] = tdf["var"] = cdvar + + # combine outdf = pd.concat([tdf, tdfint], axis=0, ignore_index=True) return outdf @@ -635,17 +699,17 @@ def sumCat_adv( runname: str, var_fn: str = "flx_aq", catvars_charge: dict = {"ca": 2, "mg": 2, "k": 1, "na": 1}, - convert_units: bool = True + convert_units: bool = True, ) -> pd.DataFrame: """ - Get the advective and storage fluxes of the sum of cations over time. - Uses the *flx_aq* files and cation variable by default. files are in - mol/m2/yr and output is same or in [mass]/m2/yr for co2potential. - + Get the advective and storage fluxes of the sum of cations over time. + Uses the *flx_aq* files and cation variable by default. files are in + mol/m2/yr and output is same or in [mass]/m2/yr for co2potential. + int_* fluxes are multiplied by time so the output is the time-integral. - Note, diffusive flux is generally negligibly small and source / sink - contributions are not computed in the input files so they're ignored here. + Note, diffusive flux is generally negligibly small and source / sink + contributions are not computed in the input files so they're ignored here. Parameters ---------- @@ -656,7 +720,7 @@ def sumCat_adv( var_fn : str base name of the SCEPTER flx file. Format is "[basename]-[cdvar].txt" catvars_charge : dict - dict of variable used for cation metric. key is the variable, value is the charge. + dict of variable used for cation metric. key is the variable, value is the charge. Should align with var_fn such that format is "[basename]-[cdvar].txt" convert_units : bool if True, convert mol/m2/yr to ton/ha/yr, if False convert mol/m2/yr to g/m2/yr @@ -671,59 +735,76 @@ def sumCat_adv( # ***************************************** # define unit conversion constants # for g/m2/yr to ton/ha/yr - ton_g = 1 / 1e6 # [ton g-1] - m2_ha = 10e3 # [m2 ha-1] - conv_factor = ton_g * m2_ha + ton_g = 1 / 1e6 # [ton g-1] + m2_ha = 10e3 # [m2 ha-1] + conv_factor = ton_g * m2_ha # ***************************************** # find which cations are tracked - catvars_exist = find_cations(outdir, runname, var_fn=var_fn, catvars_charge=catvars_charge) - + catvars_exist = find_cations( + outdir, runname, var_fn=var_fn, catvars_charge=catvars_charge + ) + # loop through cation files and compile out_dict = {} for cat in catvars_exist: # get cation charge ccharge = catvars_charge[cat] - # get the txt file as a pandas dataframe + # get the txt file as a pandas dataframe tmpdf, tmpdfint = get_data(outdir, runname, var_fn, cat) - # get the summary file for this cation that we'll save - outdf_cat, nonintdf, intdf = build_cation_df(runname, cat, ccharge, tmpdf, tmpdfint) + # get the summary file for this cation that we'll save + outdf_cat, nonintdf, intdf = build_cation_df( + runname, cat, ccharge, tmpdf, tmpdfint + ) # add to the output dictionary out_dict[cat] = outdf_cat - # get the columns we want and append with charge because we'll multiply them by their charge shortly - tdf = nonintdf.loc[:, ['time', 'tflx', 'adv', 'carbsld_source', 'noncarbsld_source']].rename( - columns={'time': 'time', 'tflx': 'tflx_charge', 'adv': 'adv_charge', - 'carbsld_source': 'carbsld_source_charge', 'noncarbsld_source': 'noncarbsld_source_charge'} + tdf = nonintdf.loc[ + :, ["time", "tflx", "adv", "carbsld_source", "noncarbsld_source"] + ].rename( + columns={ + "time": "time", + "tflx": "tflx_charge", + "adv": "adv_charge", + "carbsld_source": "carbsld_source_charge", + "noncarbsld_source": "noncarbsld_source_charge", + } ) - tdfint = intdf.loc[:, ['time', 'tflx', 'adv', 'carbsld_source', 'noncarbsld_source']].rename( - columns={'time': 'time', 'tflx': 'tflx_charge', 'adv': 'adv_charge', - 'carbsld_source': 'carbsld_source_charge', 'noncarbsld_source': 'noncarbsld_source_charge'} + tdfint = intdf.loc[ + :, ["time", "tflx", "adv", "carbsld_source", "noncarbsld_source"] + ].rename( + columns={ + "time": "time", + "tflx": "tflx_charge", + "adv": "adv_charge", + "carbsld_source": "carbsld_source_charge", + "noncarbsld_source": "noncarbsld_source_charge", + } ) # get their charge flux - tdf = tdf.apply(lambda x: x * ccharge if x.name not in ['time'] else x) - tdfint = tdfint.apply(lambda x: x * ccharge if x.name not in ['time'] else x) + tdf = tdf.apply(lambda x: x * ccharge if x.name not in ["time"] else x) + tdfint = tdfint.apply(lambda x: x * ccharge if x.name not in ["time"] else x) # --- compute charge adjusted cation flux # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) # compute potential flux if convert_units: # get co2 potentials in ton/ha/yr # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) - tdf['co2pot_adv_tonHaYr'] = nonintdf['co2pot_adv_tonHaYr'] - tdfint['co2pot_adv_tonHa'] = intdf['co2pot_adv_tonHa'] + tdf["co2pot_adv_tonHaYr"] = nonintdf["co2pot_adv_tonHaYr"] + tdfint["co2pot_adv_tonHa"] = intdf["co2pot_adv_tonHa"] # advective plus storage flx (sum of -noncarbsld_source -carbsld_source/2) - tdf['co2pot_tot_tonHaYr'] = nonintdf['co2pot_tot_tonHaYr'] - tdfint['co2pot_tot_tonHa'] = intdf['co2pot_tot_tonHa'] + tdf["co2pot_tot_tonHaYr"] = nonintdf["co2pot_tot_tonHaYr"] + tdfint["co2pot_tot_tonHa"] = intdf["co2pot_tot_tonHa"] - else: # get co2 potentials in g/m2/yr + else: # get co2 potentials in g/m2/yr # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) - tdf['co2pot_adv_gm2Yr'] = nonintdf['co2pot_adv_gm2Yr'] - tdfint['co2pot_adv_gm2'] = intdf['co2pot_adv_gm2'] + tdf["co2pot_adv_gm2Yr"] = nonintdf["co2pot_adv_gm2Yr"] + tdfint["co2pot_adv_gm2"] = intdf["co2pot_adv_gm2"] # advective plus storage flx (sum of -noncarbsld_source -carbsld_source/2) - tdf['co2pot_tot_gm2Yr'] = nonintdf['co2pot_tot_gm2Yr'] - tdfint['co2pot_tot_gm2'] = intdf['co2pot_tot_gm2'] + tdf["co2pot_tot_gm2Yr"] = nonintdf["co2pot_tot_gm2Yr"] + tdfint["co2pot_tot_gm2"] = intdf["co2pot_tot_gm2"] # if it's the first loop, create the output dataframe if cat == catvars_exist[0]: @@ -731,33 +812,38 @@ def sumCat_adv( outdfint = tdfint.copy() else: # add all columns except time (effectively adding the new cation into the mix) - outdf = outdf.drop(columns='time').copy() + tdf.drop(columns='time').copy() - outdfint = outdfint.drop(columns='time').copy() + tdfint.drop(columns='time').copy() + outdf = outdf.drop(columns="time").copy() + tdf.drop(columns="time").copy() + outdfint = ( + outdfint.drop(columns="time").copy() + + tdfint.drop(columns="time").copy() + ) # add time back in - outdf.insert(0, 'time', tdf['time']) - outdfint.insert(0, 'time', tdfint['time']) - + outdf.insert(0, "time", tdf["time"]) + outdfint.insert(0, "time", tdfint["time"]) + # multiply tdfint columns by time (required to output time-integrated fluxes) - outdfint = outdfint.apply(lambda x: x * outdfint['time'] if x.name not in ['time'] else x) + outdfint = outdfint.apply( + lambda x: x * outdfint["time"] if x.name not in ["time"] else x + ) # --- tidy up output - outdf['units'] = 'mol m-2 yr x charge' - outdfint['units'] = 'mol m-2 x charge' - outdfint['flx_type'] = "int_flx" - outdf['flx_type'] = "flx" - outdfint['runname'] = outdf['runname'] = runname - outdfint['var'] = outdf['var'] = '+'.join(catvars_exist) - - # combine + outdf["units"] = "mol m-2 yr x charge" + outdfint["units"] = "mol m-2 x charge" + outdfint["flx_type"] = "int_flx" + outdf["flx_type"] = "flx" + outdfint["runname"] = outdf["runname"] = runname + outdfint["var"] = outdf["var"] = "+".join(catvars_exist) + + # combine outdfx = pd.concat([outdf, outdfint], axis=0, ignore_index=True) return outdfx, out_dict - + def find_cations( outdir: str, runname: str, var_fn: str = "flx_aq", - catvars_charge: dict = {"ca": 2, "mg": 2, "k": 1, "na": 1} + catvars_charge: dict = {"ca": 2, "mg": 2, "k": 1, "na": 1}, ) -> list: """ Get a list of cation files present in the flux output. @@ -771,19 +857,19 @@ def find_cations( var_fn : str base name of the SCEPTER flx file. Format is "[basename]-[cdvar].txt" catvars_charge : dict - dict of variable used for cation metric. key is the variable, value is the charge. + dict of variable used for cation metric. key is the variable, value is the charge. Should align with var_fn such that format is "[basename]-[cdvar].txt" - + Returns ------- - list - list of catvars that are presenet in the output. + list + list of catvars that are presenet in the output. """ # create empty list to hold existing catvars outlist = [] for key, val in catvars_charge.items(): # build the path / fn - fn = f"{var_fn}-{key}.txt" # flux timeseries + fn = f"{var_fn}-{key}.txt" # flux timeseries file_path = os.path.join(outdir, runname, "flx", fn) if os.path.exists(file_path): outlist.append(key) @@ -792,16 +878,16 @@ def find_cations( def build_cation_df( - runname: str, + runname: str, cation: str, ccharge: float, - tmpdf: pd.DataFrame, + tmpdf: pd.DataFrame, tmpdfint: pd.DataFrame, - inorganic_sp_list: list = ['cc', 'dlm', 'arg'], + inorganic_sp_list: list = ["cc", "dlm", "arg"], negligible_val: float = 1e-7, - co2potential_g_mol: float=44.01, - convert_units: bool=True -)-> pd.DataFrame: + co2potential_g_mol: float = 44.01, + convert_units: bool = True, +) -> pd.DataFrame: """ read in the two cation dataframes, clean them up, return them @@ -834,88 +920,105 @@ def build_cation_df( # ***************************************** # define unit conversion constants # for g/m2/yr to ton/ha/yr - ton_g = 1 / 1e6 # [ton g-1] - m2_ha = 10e3 # [m2 ha-1] - conv_factor = ton_g * m2_ha + ton_g = 1 / 1e6 # [ton g-1] + m2_ha = 10e3 # [m2 ha-1] + conv_factor = ton_g * m2_ha # ***************************************** # list of columns we cannot trim columns_to_keep = ["time", "adv", "tflx", "res"] # remove empty or negligible columns (to simplify) (but keep "time" and "res" no matter what) - tmpdf = tmpdf.loc[:, (np.abs(tmpdf) > 1e-7).any(axis=0) | tmpdf.columns.isin(columns_to_keep)].copy() - tmpdfint = tmpdfint.loc[:, (np.abs(tmpdfint) > 1e-7).any(axis=0) | tmpdfint.columns.isin(columns_to_keep)].copy() - + tmpdf = tmpdf.loc[ + :, (np.abs(tmpdf) > 1e-7).any(axis=0) | tmpdf.columns.isin(columns_to_keep) + ].copy() + tmpdfint = tmpdfint.loc[ + :, + (np.abs(tmpdfint) > 1e-7).any(axis=0) | tmpdfint.columns.isin(columns_to_keep), + ].copy() + # find out how many carbon-bearing minerals are present inorg_sp = list(set(inorganic_sp_list) & set(tmpdf.columns)) inorg_sp_int = list(set(inorganic_sp_list) & set(tmpdfint.columns)) # get other sources - exclude_cols = inorg_sp + ['time', 'tflx', 'adv', 'res'] - exclude_cols_int = inorg_sp_int + ['time', 'tflx', 'adv', 'res'] + exclude_cols = inorg_sp + ["time", "tflx", "adv", "res"] + exclude_cols_int = inorg_sp_int + ["time", "tflx", "adv", "res"] noninorg_sources = tmpdf.columns.difference(exclude_cols, sort=False) noninorg_sources_int = tmpdfint.columns.difference(exclude_cols, sort=False) # add columns for individual sources times the charge # -- noninorganic sources if len(noninorg_sources) > 0: - tmpdf.loc[:, 'noncarbsld_source'] = tmpdf[noninorg_sources].sum(axis=1) + tmpdf.loc[:, "noncarbsld_source"] = tmpdf[noninorg_sources].sum(axis=1) else: - tmpdf['noncarbsld_source'] = 0. + tmpdf["noncarbsld_source"] = 0.0 if len(noninorg_sources_int) > 0: - tmpdfint.loc[:, 'noncarbsld_source'] = tmpdfint[noninorg_sources_int].sum(axis=1) + tmpdfint.loc[:, "noncarbsld_source"] = tmpdfint[noninorg_sources_int].sum( + axis=1 + ) else: - tmpdfint['noncarbsld_source'] = 0. - + tmpdfint["noncarbsld_source"] = 0.0 + # -- inorganic sources if len(inorg_sp) > 0: - tmpdf.loc[:, 'carbsld_source'] = tmpdf[inorg_sp].sum(axis=1) + tmpdf.loc[:, "carbsld_source"] = tmpdf[inorg_sp].sum(axis=1) else: - tmpdf['carbsld_source'] = 0. + tmpdf["carbsld_source"] = 0.0 if len(inorg_sp_int) > 0: - tmpdfint.loc[:, 'carbsld_source'] = tmpdfint[inorg_sp_int].sum(axis=1) + tmpdfint.loc[:, "carbsld_source"] = tmpdfint[inorg_sp_int].sum(axis=1) else: - tmpdfint['carbsld_source'] = 0. + tmpdfint["carbsld_source"] = 0.0 # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) - adv_cat = -tmpdf['tflx'] - tmpdf['noncarbsld_source'] - tmpdf['carbsld_source']/2 - adv_cat_int = -tmpdfint['tflx'] - tmpdfint['noncarbsld_source'] - tmpdfint['carbsld_source']/2 + adv_cat = -tmpdf["tflx"] - tmpdf["noncarbsld_source"] - tmpdf["carbsld_source"] / 2 + adv_cat_int = ( + -tmpdfint["tflx"] + - tmpdfint["noncarbsld_source"] + - tmpdfint["carbsld_source"] / 2 + ) # advective plus storage flx (sum of -noncarbsld_source -carbsld_source/2) - tot_cat = -tmpdf['noncarbsld_source'] - tmpdf['carbsld_source']/2 - tot_cat_int = -tmpdfint['noncarbsld_source'] - tmpdfint['carbsld_source']/2 + tot_cat = -tmpdf["noncarbsld_source"] - tmpdf["carbsld_source"] / 2 + tot_cat_int = -tmpdfint["noncarbsld_source"] - tmpdfint["carbsld_source"] / 2 # compute potential flux if convert_units: # get co2 potentials in ton/ha/yr # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) - tmpdf['co2pot_adv_tonHaYr'] = adv_cat * ccharge * co2potential_g_mol * conv_factor - tmpdfint['co2pot_adv_tonHa'] = adv_cat_int * ccharge * co2potential_g_mol * conv_factor + tmpdf["co2pot_adv_tonHaYr"] = ( + adv_cat * ccharge * co2potential_g_mol * conv_factor + ) + tmpdfint["co2pot_adv_tonHa"] = ( + adv_cat_int * ccharge * co2potential_g_mol * conv_factor + ) # advective plus storage flx (sum of -noncarbsld_source -carbsld_source/2) - tmpdf['co2pot_tot_tonHaYr'] = tot_cat * ccharge * co2potential_g_mol * conv_factor - tmpdfint['co2pot_tot_tonHa'] = tot_cat_int * ccharge * co2potential_g_mol * conv_factor + tmpdf["co2pot_tot_tonHaYr"] = ( + tot_cat * ccharge * co2potential_g_mol * conv_factor + ) + tmpdfint["co2pot_tot_tonHa"] = ( + tot_cat_int * ccharge * co2potential_g_mol * conv_factor + ) - else: # get co2 potentials in g/m2/yr + else: # get co2 potentials in g/m2/yr # advective flx (sum of -tflx -noncarbsld_source -carbsld_source/2) - tmpdf['co2pot_adv_gm2Yr'] = adv_cat * ccharge * co2potential_g_mol - tmpdfint['co2pot_adv_gm2'] = adv_cat_int * ccharge * co2potential_g_mol + tmpdf["co2pot_adv_gm2Yr"] = adv_cat * ccharge * co2potential_g_mol + tmpdfint["co2pot_adv_gm2"] = adv_cat_int * ccharge * co2potential_g_mol # advective plus storage flx (sum of -noncarbsld_source -carbsld_source/2) - tmpdf['co2pot_tot_gm2Yr'] = tot_cat * ccharge * co2potential_g_mol - tmpdfint['co2pot_tot_gm2'] = tot_cat_int * ccharge * co2potential_g_mol + tmpdf["co2pot_tot_gm2Yr"] = tot_cat * ccharge * co2potential_g_mol + tmpdfint["co2pot_tot_gm2"] = tot_cat_int * ccharge * co2potential_g_mol # add qualitative columns - tmpdf['units'] = 'mol m-2 yr' - tmpdfint['units'] = 'mol m-2' - tmpdfint['flx_type'] = "int_flx" - tmpdf['flx_type'] = "flx" - tmpdfint['runname'] = tmpdf['runname'] = runname - tmpdfint['cation'] = tmpdf['cation'] = cation - tmpdfint['charge'] = tmpdf['charge'] = ccharge + tmpdf["units"] = "mol m-2 yr" + tmpdfint["units"] = "mol m-2" + tmpdfint["flx_type"] = "int_flx" + tmpdf["flx_type"] = "flx" + tmpdfint["runname"] = tmpdf["runname"] = runname + tmpdfint["cation"] = tmpdf["cation"] = cation + tmpdfint["charge"] = tmpdf["charge"] = ccharge # --- return - # combine + # combine outdf = pd.concat([tmpdf, tmpdfint], axis=0, ignore_index=True) return outdf, tmpdf, tmpdfint - - -# %% +# %% # ****************************************************************************** # -------------------------- CDR METRIC MAIN ----------------------------------- def cflx_calc( @@ -924,8 +1027,8 @@ def cflx_calc( feedstock: str, dust_from_file: bool = True, convert_units: bool = True, - save_dir: str = 'postproc_flxs', - calc_list: list = ['co2_flx', 'carbAlk_adv', 'sumCat_adv', 'sld_flx'] + save_dir: str = "postproc_flxs", + calc_list: list = ["co2_flx", "carbAlk_adv", "sumCat_adv", "sld_flx"], ): """ compute various cdr-relevant c flux metrics in the postprocessing @@ -940,8 +1043,8 @@ def cflx_calc( feedstock : str ID of the feedstock used (for rock dissolution calculation) dust_from_file : bool - if True, then compute integrated dust fluxes from the dust.txt file. Should be - set to True for re-application runs since the int_sld* files are patched + if True, then compute integrated dust fluxes from the dust.txt file. Should be + set to True for re-application runs since the int_sld* files are patched together from multiple 1-year runs, so they don't reflect the true time integral (just the timeSTEP integral). This ensures that the integral is correct convert_units : bool @@ -961,177 +1064,170 @@ def cflx_calc( if not os.path.exists(savehere): os.makedirs(savehere) - # [1] --- CO2 FLUXES ------------------------------------------------ if "co2_flx" in calc_list: - co2df = co2_flx(outdir, runname, var_fn = "flx_gas", cdvar= "pco2", - organic_sp_list = ["g1", "g2", "g3"], - inorganic_sp_list = ["arg", "cc", "dlm"], - convert_units = convert_units, - co2_g_mol = 44.01 - ) + co2df = co2_flx( + outdir, + runname, + var_fn="flx_gas", + cdvar="pco2", + organic_sp_list=["g1", "g2", "g3"], + inorganic_sp_list=["arg", "cc", "dlm"], + convert_units=convert_units, + co2_g_mol=44.01, + ) # save savename = "co2_flxs.pkl" co2df.to_pickle(os.path.join(savehere, savename)) - # [2] --- CARBONATE ALKALINITY FLUXES --------------------------------- if "carbAlk_adv" in calc_list: - cAlkdf = carbAlk_adv(outdir, runname, var_fn = "flx_co2sp", cdvar = "ALK", - convert_units = convert_units, - co2potential_g_mol_sil = 88.02, - co2potential_g_mol_cc = 44.01 - ) + cAlkdf = carbAlk_adv( + outdir, + runname, + var_fn="flx_co2sp", + cdvar="ALK", + convert_units=convert_units, + co2potential_g_mol_sil=88.02, + co2potential_g_mol_cc=44.01, + ) # save savename = "carbAlk_flxs.pkl" cAlkdf.to_pickle(os.path.join(savehere, savename)) - # [3] --- SUM OF CATION FLUXES ----------------------------------------- - if "sumCat_adv" in calc_list: - sumCatdf, sumCatdict = sumCat_adv(outdir, runname, var_fn = "flx_aq", - catvars_charge = {"ca": 2, "mg": 2, "k": 1, "na": 1}, - convert_units = True) + if "sumCat_adv" in calc_list: + sumCatdf, sumCatdict = sumCat_adv( + outdir, + runname, + var_fn="flx_aq", + catvars_charge={"ca": 2, "mg": 2, "k": 1, "na": 1}, + convert_units=True, + ) # save the main file savename_main = "cationflx_sum.pkl" sumCatdf.to_pickle(os.path.join(savehere, savename_main)) - + # save the output dictionary for name, df in sumCatdict.items(): # create file name using the dictionary key - savename = f'cationflx_{name}.pkl' + savename = f"cationflx_{name}.pkl" # save df.to_pickle(os.path.join(savehere, savename)) - # [4] --- ROCK DISSOLUTION ---------------------------------------------- - if "sld_flx" in calc_list: + if "sld_flx" in calc_list: # feedstock must be in list to loop through if not isinstance(feedstock, list): feedstock = [feedstock] for fs in feedstock: - rockdf = sld_flx(outdir, runname, fs, var_fn = "flx_sld", - dust_from_file = True, - molar_mass_dict = molar_mass_dict) + rockdf = sld_flx( + outdir, + runname, + fs, + var_fn="flx_sld", + dust_from_file=True, + molar_mass_dict=molar_mass_dict, + ) # save savename = f"rockflx_{fs}.pkl" rockdf.to_pickle(os.path.join(savehere, savename)) - - - -# %% +# %% # ****************************************************************************** # ----------------------- PROFILE METRIC FUNCTIONS --------------------------------- # dictionary of postprocess function inputs postproc_prof_dict = { - "adsorbed": { "var_prefix": "prof_aq(ads)", "var_units": "mol/L", }, - # --- requires special function (due to base saturation calc) "adsorbed_percCEC": { "var_prefix": "prof_aq(ads%cec)", "var_units": "%cec", }, # ---------------------------------------------------------- - "adsorbed_ppm": { "var_prefix": "prof_aq(adsppm)", "var_units": "ppm", }, - "aqueous": { "var_prefix": "prof_aq", "var_units": "mol/L", }, - "aqueous_total": { "var_prefix": "prof_aq(tot)", "var_units": "mol/L", }, - # --- requires special fxn (due to variable units) - "bulksoil": { - "var_prefix": "bsd" - }, + "bulksoil": {"var_prefix": "bsd"}, # ------------------------------------------------- - "exchange_total": { "var_prefix": "prof_ex(tot)", "var_units": "mol/L", }, - "gas": { "var_prefix": "prof_gas", "var_units": "atm", "calculate_mean": False, }, - "rate": { "var_prefix": "rate", "var_units": "mol/m2/yr", }, - # --- requires special treatment because it's from the lab run "soil_ph": { "var_prefix": "prof_aq", "var_units": "mol/L", }, # ------------------------------------------------------------- - "solid": { "var_prefix": "prof_sld", "var_units": "mol/m3", }, - "solid_sp_saturation": { "var_prefix": "sat_sld", "var_units": "X", - }, - + }, "solid_volumePercent": { "var_prefix": "prof_sld(v%)", "var_units": "%vol", }, - "solid_weightPercent": { "var_prefix": "prof_sld(wt%)", "var_units": "wt%", }, - "specific_surface_area": { "var_prefix": "ssa", "var_units": "m2/g(?)", }, - "surface_area": { "var_prefix": "sa", "var_units": "X", }, } + def profile_to_ds_optMean( outdir: str, runname: str, var_prefix: str, var_units: str, - time_indices: np.array=np.arange(1,21,1), - calculate_mean: bool=True, - depth_mean_suffix: str="coredep", - run_subdir: str="prof", + time_indices: np.array = np.arange(1, 21, 1), + calculate_mean: bool = True, + depth_mean_suffix: str = "coredep", + run_subdir: str = "prof", ) -> xr.Dataset: - ''' + """ Collects the profile data from a given var_prefix for the defined - time_indices. Converts from pandas dataframes into xr datasets - defined over depth and time. If calculate_mean is true, then - the mean of each variable is calculated from depth 0 to depth i. + time_indices. Converts from pandas dataframes into xr datasets + defined over depth and time. If calculate_mean is true, then + the mean of each variable is calculated from depth 0 to depth i. - Note, the averaging calculation only works for equally spaced depth + Note, the averaging calculation only works for equally spaced depth grids (which SCEPTER is by default) Parameters @@ -1142,21 +1238,21 @@ def profile_to_ds_optMean( name of the SCEPTER run (equivalent to the directory within outdir). Generally _field. var_prefix : str prefix of the file to read in the 'prof' subdirectory. Includes all characters up to `*-xxx.txt` - where xxx is a 3 digit numeric from 001 to 020. + where xxx is a 3 digit numeric from 001 to 020. var_units : str units for the variables you're reading in (solid profiles, for example, are often mol/m3) time_indices : np.array - SCEPTER outputs 20 time indices as profiles. This array selects which to include in the + SCEPTER outputs 20 time indices as profiles. This array selects which to include in the output dataset. Default is all 20. calculate_mean : bool - [True | False] true means the mean of all variables is calculated at each depth (as + [True | False] true means the mean of all variables is calculated at each depth (as though someone took a sample to that depth and calculated the mean) depth_mean_suffix : str - Only used if calculate_mean == True. This is the variable name suffix to append if + Only used if calculate_mean == True. This is the variable name suffix to append if we calculate the depth mean run_subdir : str name of the subdirectory that holds the profile files ("prof" by default) - ''' + """ # loop through time indices to extract data for ts in time_indices: if ts == time_indices[0]: @@ -1166,17 +1262,23 @@ def profile_to_ds_optMean( outdf = pd.concat([outdf, tmpdf]) # convert to an xarray dataset - ds = outdf.set_index(['depth', 'time']).to_xarray() - + ds = outdf.set_index(["depth", "time"]).to_xarray() + if calculate_mean: # get the mean wt% from depth 0 to depth i for every depth # calculate the cumulative sum and count over the 'depth' dimension - cumsum_ds = ds.cumsum(dim='depth') - cumcount_ds = xr.ones_like(ds).cumsum(dim='depth') # get the cumulative count (cumsum / cumcount = mean; since depth is equally spaced) + cumsum_ds = ds.cumsum(dim="depth") + cumcount_ds = xr.ones_like( + ds + ).cumsum( + dim="depth" + ) # get the cumulative count (cumsum / cumcount = mean; since depth is equally spaced) # calculate the average by dividing the cumulative sum by the cumulative count average_ds = cumsum_ds / cumcount_ds - average_ds_renamed = average_ds.rename({var: f"{var}_{depth_mean_suffix}" for var in average_ds.data_vars}) + average_ds_renamed = average_ds.rename( + {var: f"{var}_{depth_mean_suffix}" for var in average_ds.data_vars} + ) # add new variables to the original dataset ds = xr.merge([ds, average_ds_renamed]) @@ -1184,7 +1286,7 @@ def profile_to_ds_optMean( # add units for var in ds.data_vars: ds[var].attrs["units"] = var_units # set the same units for all variables - + # add output file type ds.attrs["outfile"] = var_prefix @@ -1192,11 +1294,9 @@ def profile_to_ds_optMean( return ds -def get_bsd_prof_units( - var_name -)-> str: - ''' - Read in a variable name and extract the units from it. +def get_bsd_prof_units(var_name) -> str: + """ + Read in a variable name and extract the units from it. Return just the units as a string. Units are denoted by [brackets]. If no brackets, return 'NA' @@ -1204,30 +1304,30 @@ def get_bsd_prof_units( ---------- var_name : str name of the data_var - + Returns ------- str the value within the brackets of var_name (or "NA" if no brackets) - ''' + """ # check if the variable name contains units in brackets (e.g., var1[m/yr]) - match = re.search(r'\[(.*?)\]', var_name) + match = re.search(r"\[(.*?)\]", var_name) if match: # If units are found, return them return match.group(1) else: # If no units are found, return 'NA' - return 'NA' + return "NA" def bsd_profile_to_ds( outdir: str, runname: str, var_prefix: str, - time_indices: np.array=np.arange(1,21,1), - run_subdir: str="prof", + time_indices: np.array = np.arange(1, 21, 1), + run_subdir: str = "prof", ) -> xr.Dataset: - ''' + """ Collects the bsd profile data and turns it into an xarray defined over time and depth. Units that are in the brackets of column names are also defined at each attribute using the get_bsd_prof_units function @@ -1240,18 +1340,18 @@ def bsd_profile_to_ds( name of the SCEPTER run (equivalent to the directory within outdir). Generally _field. var_prefix : str prefix of the file to read in the 'prof' subdirectory. Includes all characters up to `*-xxx.txt` - where xxx is a 3 digit numeric from 001 to 020. + where xxx is a 3 digit numeric from 001 to 020. time_indices : np.array - SCEPTER outputs 20 time indices as profiles. This array selects which to include in the + SCEPTER outputs 20 time indices as profiles. This array selects which to include in the output dataset. Default is all 20. run_subdir : str name of the subdirectory that holds the profile files ("prof" by default) - + Returns ------- xr.Dataset - all bsd variables defined over time and depth. - ''' + all bsd variables defined over time and depth. + """ # loop through time indices to extract data for ts in time_indices: @@ -1262,15 +1362,15 @@ def bsd_profile_to_ds( outdf = pd.concat([outdf, tmpdf]) # convert to an xarray dataset - ds = outdf.set_index(['depth', 'time']).to_xarray() + ds = outdf.set_index(["depth", "time"]).to_xarray() # create dictionary with units for each variable units_dict = {var: get_bsd_prof_units(var) for var in ds.data_vars} # add the units as an attribute to each variable in the dataset for var in ds.data_vars: - ds[var].attrs['units'] = units_dict[var] - + ds[var].attrs["units"] = units_dict[var] + # return result return ds @@ -1280,16 +1380,16 @@ def ads_percCec_prof_baseSat( runname: str, var_prefix: str, var_units: str, - time_indices: np.array=np.arange(1,21,1), - calculate_mean: bool=True, - depth_mean_suffix: str="depmean", - run_subdir: str="prof", + time_indices: np.array = np.arange(1, 21, 1), + calculate_mean: bool = True, + depth_mean_suffix: str = "depmean", + run_subdir: str = "prof", ) -> xr.Dataset: - ''' - Get adsorbed species as a percent of CEC in an xarray dataset. This is just a wrapper - around the profile_to_ds_optMean function which computes base saturation after getting + """ + Get adsorbed species as a percent of CEC in an xarray dataset. This is just a wrapper + around the profile_to_ds_optMean function which computes base saturation after getting the ads%cec profile as a dataset. - + Parameters ---------- outdir : str @@ -1298,55 +1398,64 @@ def ads_percCec_prof_baseSat( name of the SCEPTER run (equivalent to the directory within outdir). Generally _field. var_prefix : str prefix of the file to read in the 'prof' subdirectory. Includes all characters up to `*-xxx.txt` - where xxx is a 3 digit numeric from 001 to 020. + where xxx is a 3 digit numeric from 001 to 020. var_units : str name of the units for the variables to add as an attribute to the dataset time_indices : np.array - SCEPTER outputs 20 time indices as profiles. This array selects which to include in the + SCEPTER outputs 20 time indices as profiles. This array selects which to include in the output dataset. Default is all 20. calculate_mean : bool - [True | False] true means the mean of all variables is calculated at each depth (as + [True | False] true means the mean of all variables is calculated at each depth (as though someone took a sample to that depth and calculated the mean) depth_mean_suffix : str - Only used if calculate_mean == True. This is the variable name suffix to append if + Only used if calculate_mean == True. This is the variable name suffix to append if we calculate the depth mean run_subdir : str name of the subdirectory that holds the profile files ("prof" by default) - + Returns ------- xr.Dataset - all bsd variables defined over time and depth. - ''' + all bsd variables defined over time and depth. + """ # get the profile as a dataset - ds = profile_to_ds_optMean(outdir, runname, var_prefix, var_units, time_indices, calculate_mean, depth_mean_suffix, run_subdir) + ds = profile_to_ds_optMean( + outdir, + runname, + var_prefix, + var_units, + time_indices, + calculate_mean, + depth_mean_suffix, + run_subdir, + ) # compute base saturation - bs_sp_list = ['ca','mg','k','na'] # bases that we want to sum + bs_sp_list = ["ca", "mg", "k", "na"] # bases that we want to sum valid_vars = [var for var in bs_sp_list if var in ds] if valid_vars: ds["base_saturation"] = ds[valid_vars].to_array(dim="vars").sum(dim="vars") ds["base_saturation"].attrs["units"] = var_units # add units else: - ds['base_saturation'] = np.nan + ds["base_saturation"] = np.nan ds["base_saturation"].attrs["units"] = var_units # add units - + # return result return ds def prof_postproc_save( - outdir: str, - runname_field: str, + outdir: str, + runname_field: str, runname_lab: str, - postproc_prof_list: list=["all"], - save_dir: str="postproc_profs", + postproc_prof_list: list = ["all"], + save_dir: str = "postproc_profs", ): - ''' + """ Convert SCEPTER/scepter_output/myrun/prof/* files to .nc files. Relies on other profile - postproc embedded functions. Only the profile files listed in postproc_prof_list will - be processed. + postproc embedded functions. Only the profile files listed in postproc_prof_list will + be processed. Parameters ---------- @@ -1356,7 +1465,7 @@ def prof_postproc_save( name of the SCEPTER run (equivalent to the directory within outdir). Generally _field. runname_lab : str name of the SCEPTER run (equivalent to the directory within outdir). Generally _lab. - postproc_prof_list : list + postproc_prof_list : list list of postprocess files that are also keywords in postproc_prof_dict (see `cflx_proc.py`) save_dir : str name of the subdirectory where the .nc files are saved @@ -1364,7 +1473,7 @@ def prof_postproc_save( Returns ------- - ''' + """ # where to save the results savehere = os.path.join(outdir, runname_field, save_dir) # make dir if it doesn't exist @@ -1378,34 +1487,37 @@ def prof_postproc_save( # check that all listed prof names are in the dictionary missing_keys = [key for key in postproc_prof_list if key not in postproc_prof_dict] if missing_keys: - print(f"Warning: The following postprocess profile names are not compatible: {missing_keys}; check for typos!") + print( + f"Warning: The following postprocess profile names are not compatible: {missing_keys}; check for typos!" + ) # remove the missing ones pplist_new = [key for key in postproc_prof_list if key in postproc_prof_dict] postproc_prof_list = pplist_new # --- loop through postproc list, save result for pp in postproc_prof_list: - # --- check for special cases first if pp == "bulksoil": ds = bsd_profile_to_ds(outdir, runname_field, **postproc_prof_dict[pp]) elif pp == "adsorbed_percCEC": - ds = ads_percCec_prof_baseSat(outdir, runname_field, **postproc_prof_dict[pp]) + ds = ads_percCec_prof_baseSat( + outdir, runname_field, **postproc_prof_dict[pp] + ) elif pp == "soil_ph": ds = profile_to_ds_optMean(outdir, runname_field, **postproc_prof_dict[pp]) - ds = ds.sel(time = np.max(ds.time.values)) # keep only the max time (lab run time is not aligned with field!) + ds = ds.sel( + time=np.max(ds.time.values) + ) # keep only the max time (lab run time is not aligned with field!) # --------------------------------- else: ds = profile_to_ds_optMean(outdir, runname_field, **postproc_prof_dict[pp]) - - # --- save + + # --- save savename = f"{pp}.nc" ds.to_netcdf(os.path.join(savehere, savename)) - - -# ------- SCRATCH ----------------------------------------------------- # +# ------- SCRATCH ----------------------------------------------------- # # %% # outdir = "/home/tykukla/SCEPTER/scepter_output" # # runname = "noFert_cc_multiyear_site_311b_app_10000p0_psize_75_composite_field" @@ -1415,12 +1527,7 @@ def prof_postproc_save( # cflx_calc(outdir, runname, feedstock) - - - - - -# %% +# %% # outdir = "/home/tykukla/SCEPTER/scepter_output" # runname = "noFert_cc_multiyear_site_311b_app_10000p0_psize_75_composite_field" @@ -1432,19 +1539,19 @@ def prof_postproc_save( # df = co2_flx(outdir, runname) # df -# %% +# %% # import matplotlib.pyplot as plt # plt.plot(dfint['time'], dfint['adv']*dfint['time'], label='adv') # plt.plot(dfint['time'], dfint['tflx']*dfint['time'], label='tflx') # plt.legend() -# %% +# %% # plt.plot(df['time'], df['adv'], label='adv') # plt.plot(df['time'], df['tflx'], label='tflx') # plt.plot(df['time'], df['tflx'] + df['adv'], label='tflx+adv') # plt.legend() -# %% +# %% # outdir = "/home/tykukla/SCEPTER/scepter_output" # runname = "noFert_cc_multiyear_site_311b_app_10000p0_psize_75_composite_field" @@ -1459,32 +1566,32 @@ def prof_postproc_save( # df = preprocess_txt(outdir, runname, fn, run_subdir = "flx") # dfint = preprocess_txt(outdir, runname, fn_int, run_subdir = "flx") -# # %% +# # %% # plt.plot(dfint['time'], dfint['tflx']*dfint['time']) # plt.plot(dfint['time'], dfint['rain']*dfint['time']) -# # %% +# # %% # plt.plot(dfint['time'], dfint['time']*(-1*(dfint['rain']) - dfint['tflx'])) # plt.plot(dfint['time'], dfint['cc']*dfint['time']) -# # %% +# # %% # plt.plot(dfint['time'], dfint['time']*(-1*(dfint['rain']) - dfint['cc'])) # plt.plot(dfint['time'], (dfint['tflx']+dfint['adv'])*dfint['time']) -# # %% +# # %% # plt.plot(dfint['time'], dfint['cc']*dfint['time'], label='diss') # plt.plot(dfint['time'], -1*dfint['rain']*dfint['time'], label='rain') # plt.plot(dfint['time'], dfint['tflx']*dfint['time'], label='tflx') # plt.plot(dfint['time'], dfint['adv']*dfint['time'], label='adv') # plt.legend() -# # %% +# # %% # plt.plot(dfint['time'], dfint['time']*(-1*dfint['rain']-dfint['adv']), label='rain-adv') # plt.plot(dfint['time'], dfint['cc']*dfint['time'], label='diss') # plt.plot(dfint['time'], dfint['tflx']*dfint['time'], label='tflx') # plt.legend() -# # %% +# # %% # plt.plot(df['time'], df['cc'], label='diss') # plt.plot(df['time'], -1*df['rain'], label='rain') # plt.plot(df['time'], df['tflx'], label='tflx') @@ -1493,7 +1600,6 @@ def prof_postproc_save( # plt.legend() - # # %% # plt.plot(dfint['time'], dfint['cc']*dfint['time']) # plt.plot(dfint['time'], -1*dfint['rain']*dfint['time']) @@ -1512,8 +1618,7 @@ def prof_postproc_save( # # %% - -# %% +# %% # # .. inputs minus outputs # inputs_x = (dfint['cc'] + dfint['g2'])*dfint['time'] @@ -1521,7 +1626,7 @@ def prof_postproc_save( ## -- CONFIRMED -## +## ## cc + g2 - (dif + adv) = tflx ## -- @@ -1549,7 +1654,7 @@ def prof_postproc_save( # plt.legend() -# # %% +# # %% # plt.plot(dfint['time'], (dfint['cc']*dfint['time'] / ((dfint['g2'] + dfint['cc'])*dfint['time']))) # # %% @@ -1593,7 +1698,7 @@ def prof_postproc_save( # plt.ylim(-4,4) # plt.legend() -# # %% +# # %% # plt.plot(dfint['time'], -1*dfint['dif']*dfint['time'] - dfint['g2']*dfint['time'], label='-dif-resp') # plt.plot(dfint['time'], dfint['adv']*dfint['time'], label='adv') # plt.plot(dfint['time'], -1*dfint['cc']*dfint['time'], label='cc') @@ -1603,7 +1708,7 @@ def prof_postproc_save( # #plt.plot(dfint['time'], dfint['g2']*dfint['time'], label='resp') # plt.legend() -# # --- CONFIRMED -- +# # --- CONFIRMED -- # # adv = (-dif-resp) - cc - tflx # # where: # # -dif-resp = respired carbon that's stored (it's positive when resp > diffusion, indicating respired carbon staying in the system) diff --git a/scepter/setup/move_s3_to_scepter.sh b/scepter/setup/move_s3_to_scepter.sh index d9cd870..27fe037 100755 --- a/scepter/setup/move_s3_to_scepter.sh +++ b/scepter/setup/move_s3_to_scepter.sh @@ -27,7 +27,7 @@ s5cmd ls "$SOURCE_BUCKET" | grep "spintuneup" | while read -r line; do # Extract the directory path from the listing DIR_PATH=$(echo "$line" | awk '{print $NF}') - + # Move each matching directory to the destination echo "Syncing ${SOURCE_BUCKET}${DIR_PATH}* to ${DESTINATION_PATH}${DIR_PATH}" s5cmd sync "${SOURCE_BUCKET}${DIR_PATH}*" "${DESTINATION_PATH}${DIR_PATH}" @@ -56,5 +56,3 @@ done # --- delete dirs that are now empty # find $src -mindepth 1 -type d -empty -not -path "$MAIN_DIR" -delete - - diff --git a/scepter/setup/scepter_helperFxns.py b/scepter/setup/scepter_helperFxns.py index d08d915..772fa91 100644 --- a/scepter/setup/scepter_helperFxns.py +++ b/scepter/setup/scepter_helperFxns.py @@ -11,9 +11,9 @@ import re import shutil import subprocess -from scipy.integrate import cumulative_trapezoid import pandas as pd +from scipy.integrate import cumulative_trapezoid # %% # -------------------------------------------------------------------------- @@ -710,31 +710,32 @@ def run_complete_check( pass # pass does nothing, creating an empty file -# modify the flx/dust.txt file to get total dust flux over time +# modify the flx/dust.txt file to get total dust flux over time def dustflx_calc( outdir: str, - runname_field: str, + runname_field: str, fdust1: float, fdust2: float, dustsp: str, - dustsp_2nd: str=None): + dustsp_2nd: str = None, +): """ SCEPTER's default dust file (*/flx/dust.txt) shows time and the relative amount of dust that gets applied. We use the dust flux to compute the timeseries of dust application and the integrated dust application. Nothing gets returned, - but the updated dust.txt file gets saved. + but the updated dust.txt file gets saved. Parameters ---------- outdir : str output directory for SCEPTER results (ex: "home/name/SCEPTER/scepter_output") runname_field : str - name of the directory for the SCEPTER field run (for now, we're not applying + name of the directory for the SCEPTER field run (for now, we're not applying any changes to the lab dust fluxes) fdust1 : float - [g m2 yr] amount of primary dust applied each year + [g m2 yr] amount of primary dust applied each year fdust2 : float - [g m2 yr] amount of secondary dust applied each year + [g m2 yr] amount of secondary dust applied each year dustsp : str name of the primary dust species (e.g., "cc" or "gbas") dustsp_2nd : str @@ -744,7 +745,7 @@ def dustflx_calc( ------- """ # read in the dust flux dataframe - file_path = os.path.join(outdir, runname_field, 'flx', 'dust.txt') + file_path = os.path.join(outdir, runname_field, "flx", "dust.txt") df = preprocess_txt(file_path) # make sure dust fluxes are numbers @@ -758,27 +759,31 @@ def dustflx_calc( fdust2 = float(fdust2) except: fdust2 = 0 - + # add a column for the dust flux - df['dust1_STATIC'] = fdust1 - df['dust2_STATIC'] = fdust2 + df["dust1_STATIC"] = fdust1 + df["dust2_STATIC"] = fdust2 - # compute dust timeseries - df['dust1_g_m2_yr'] = df['dust(relative_to_average)'] * df['dust1_STATIC'] - df['dust2_g_m2_yr'] = df['dust(relative_to_average)'] * df['dust2_STATIC'] + # compute dust timeseries + df["dust1_g_m2_yr"] = df["dust(relative_to_average)"] * df["dust1_STATIC"] + df["dust2_g_m2_yr"] = df["dust(relative_to_average)"] * df["dust2_STATIC"] # compute integrated dust timeseries - df['int_dust1_g_m2_yr'] = cumulative_trapezoid(df['dust1_g_m2_yr'], df['time'], initial=0) - df['int_dust2_g_m2_yr'] = cumulative_trapezoid(df['dust2_g_m2_yr'], df['time'], initial=0) + df["int_dust1_g_m2_yr"] = cumulative_trapezoid( + df["dust1_g_m2_yr"], df["time"], initial=0 + ) + df["int_dust2_g_m2_yr"] = cumulative_trapezoid( + df["dust2_g_m2_yr"], df["time"], initial=0 + ) # add dust species info - df['dustsp1'] = dustsp - df['dustsp2'] = dustsp_2nd - + df["dustsp1"] = dustsp + df["dustsp2"] = dustsp_2nd + # save the result df.to_csv( # default is mode='w' which will overwrite the existing file (that's fine because we've merged it with the new data) file_path, index=None, sep="\t" - ) + ) # -------------------------------------------------------------------------- @@ -857,7 +862,7 @@ def to_aws( outdir_final = dst_aws else: outdir_final = outdir - + # return the new directory for postproc purposes return outdir_final