From 923c5e18fb10cd70ae0dc559117a8676075ef977 Mon Sep 17 00:00:00 2001 From: "tomas.sherwen@gmail.com" Date: Tue, 29 Jun 2021 14:12:05 +0100 Subject: [PATCH 1/2] inc. latest scripts/geos-chem-schedule --- scripts/geos-chem-schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/geos-chem-schedule b/scripts/geos-chem-schedule index 589c7b6..0415b55 160000 --- a/scripts/geos-chem-schedule +++ b/scripts/geos-chem-schedule @@ -1 +1 @@ -Subproject commit 589c7b6ae04adbfe873941e86955f749f970fed0 +Subproject commit 0415b55b7365795b6ef292c4b256ebb0350695b6 From f42836c6800b9518509af904a61a0152877fcff4 Mon Sep 17 00:00:00 2001 From: "tomas.sherwen@gmail.com" Date: Tue, 29 Jun 2021 14:34:54 +0100 Subject: [PATCH 2/2] enforce PEP8 throughout Signed-off-by: tomas.sherwen@gmail.com --- AC_tools/GEOS.py | 74 +++++++-------- AC_tools/GEOSChem_bpch.py | 62 +++++++------ AC_tools/GEOSChem_nc.py | 40 ++++---- AC_tools/HEMCO.py | 11 ++- AC_tools/KPP.py | 92 ++++++++++--------- AC_tools/core.py | 20 ++-- AC_tools/generic.py | 27 +++--- AC_tools/mask.py | 12 +-- AC_tools/observations.py | 10 +- AC_tools/obsolete/plotting_REDUNDANT.py | 5 +- AC_tools/obsolete/variables_REDUNDANT.py | 8 +- AC_tools/planeflight.py | 17 ++-- AC_tools/plotting.py | 28 +++--- AC_tools/variables.py | 32 +++---- ...nalyse_vertical_Ox_loss_by_route_NetCDF.py | 12 +-- scripts/KPP_mk_tagged_GC_Mechanism.py | 14 +-- scripts/SMVGEAR_process_prod_loss_tags.py | 4 +- setup.py | 24 ++--- 18 files changed, 243 insertions(+), 249 deletions(-) diff --git a/AC_tools/GEOS.py b/AC_tools/GEOS.py index 8377e34..e8d04fa 100644 --- a/AC_tools/GEOS.py +++ b/AC_tools/GEOS.py @@ -200,19 +200,19 @@ def get_GEOS5_online_diagnostic_plots(dt=None, ptype='wxmaps', # if no day given, use previous day if isinstance(dt, type(None)): # Just use yesterday for Now - TNow = time2datetime( [gmtime()] )[0] + TNow = time2datetime([gmtime()])[0] dt = datetime.datetime(TNow.year, TNow.month, TNow.day-1) # Which forecast to use if isinstance(fcst, type(None)): fcst_str = '{}{:0>2}{:0>2}T{:0>2}0000' - fcst = fcst_str.format( dt.year, dt.month, dt.day, dt.hour ) + fcst = fcst_str.format(dt.year, dt.month, dt.day, dt.hour) # What is the website location for the data? site = 'https://fluid.nccs.nasa.gov/' if ptype == 'wxmaps': - type_root = '{}/{}'.format( site, ptype) + type_root = '{}/{}'.format(site, ptype) else: - type_root = '{}/wxmaps/{}'.format( site, ptype) + type_root = '{}/wxmaps/{}'.format(site, ptype) # What is the outline file structure? urlstr = '{}/?one_click=1&tau={:0>3}&stream={}&level={}®ion={}&fcst={}&field={}' @@ -239,13 +239,13 @@ def get_GEOS5_online_diagnostic_plots(dt=None, ptype='wxmaps', # Download using Python wget f = '{}_{}_{}_fcast_{}_{}_{}_{:0>2}_{:0>3}.png' name = f.format(prefix, ptype, stream, fcst, field, region, - level, tau ) + level, tau) wget.download(image_URL, folder+name) -def get_GEOS5_datagram_plots( dt=None, stream='G5FPFC', folder=None, - prefix='ARNA', plts2get=['du_16.7_-23.0'], - verbose=False, debug=False ): +def get_GEOS5_datagram_plots(dt=None, stream='G5FPFC', folder=None, + prefix='ARNA', plts2get=['du_16.7_-23.0'], + verbose=False, debug=False): """ Get GEOS5 datagram plots and save these locally @@ -269,7 +269,7 @@ def get_GEOS5_datagram_plots( dt=None, stream='G5FPFC', folder=None, # if no day given, use previous day if isinstance(dt, type(None)): # Just use yesterday for Now - TNow = time2datetime( [gmtime()] )[0] + TNow = time2datetime([gmtime()])[0] dt = datetime.datetime(TNow.year, TNow.month, TNow.day-1) date_str = '{}_{:0>2}_{:0>2}'.format(dt.year, dt.month, dt.day) # get plots in list @@ -277,10 +277,10 @@ def get_GEOS5_datagram_plots( dt=None, stream='G5FPFC', folder=None, gram_root = site+'/gram/static/plots/' # loop and retrieve the files for plt2get in plts2get: - url = '{}{}.png'.format( gram_root, plt2get ) + url = '{}{}.png'.format(gram_root, plt2get) # Download using wget through python fstr = '{}_{}_{}_datagram_{}.png' - fstr = fstr.format( prefix, stream, date_str, plt2get ) + fstr = fstr.format(prefix, stream, date_str, plt2get) filename = fstr.format(date_str, stream, ) if debug: pstr = 'Getting {} and saving here: {}' @@ -294,8 +294,8 @@ def get_GEOSCF_vertical_levels(print_equivalents=False, native_levels=False): """ # Get a list of the pressure levels in GEOS-CF if native_levels: - HPa_l = [ - 0.01, 0.02, 0.0327, 0.0476, 0.066, 0.0893, 0.1197, 0.1595, 0.2113, 0.2785, 0.365, 0.4758, 0.6168, 0.7951, 1.0194, 1.3005, 1.6508, 2.085, 2.6202, 3.2764, 4.0766, 5.0468, 6.2168, 7.6198, 9.2929, 11.2769, 13.6434, 16.4571, 19.7916, 23.7304, 28.3678, 33.81, 40.1754, 47.6439, 56.3879, 66.6034, 78.5123, 92.3657, 108.663, 127.837, 150.393, 176.93, 208.152, 244.875, 288.083, 337.5, 375.0, 412.5, 450.0, 487.5, 525.0, 562.5, 600.0, 637.5, 675.0, 700.0, 725.0, 750.0, 775.0, 800.0, 820.0, 835.0, 850.0, 865.0, 880.0, 895.0, 910.0, 925.0, 940.0, 955.0, 970.0, 985.0 + HPa_l = [ + 0.01, 0.02, 0.0327, 0.0476, 0.066, 0.0893, 0.1197, 0.1595, 0.2113, 0.2785, 0.365, 0.4758, 0.6168, 0.7951, 1.0194, 1.3005, 1.6508, 2.085, 2.6202, 3.2764, 4.0766, 5.0468, 6.2168, 7.6198, 9.2929, 11.2769, 13.6434, 16.4571, 19.7916, 23.7304, 28.3678, 33.81, 40.1754, 47.6439, 56.3879, 66.6034, 78.5123, 92.3657, 108.663, 127.837, 150.393, 176.93, 208.152, 244.875, 288.083, 337.5, 375.0, 412.5, 450.0, 487.5, 525.0, 562.5, 600.0, 637.5, 675.0, 700.0, 725.0, 750.0, 775.0, 800.0, 820.0, 835.0, 850.0, 865.0, 880.0, 895.0, 910.0, 925.0, 940.0, 955.0, 970.0, 985.0 ] else: HPa_l = [ @@ -340,11 +340,11 @@ def extract_GEOSCF4FAAM_flight(folder=None, flight_ID='C216', folder4csv=None, filename = 'core_faam_*_{}_1hz.nc'.format(flight_ID.lower()) file2use = glob.glob(folder+filename) if len(file2use) > 1: - print( 'WARNING: more that one file found! (so using latest file)' ) + print('WARNING: more that one file found! (so using latest file)') print(file2use) - ds = xr.open_dataset( file2use[0] ) + ds = xr.open_dataset(file2use[0]) # Only select the variable of intereest and drop where these are NaNs - df = ds[ [PressVar, LatVar, LonVar, TimeVar] ].to_dataframe() + df = ds[[PressVar, LatVar, LonVar, TimeVar]].to_dataframe() df = df.dropna() # If doing a test, then just extract the first 150 points of flight track if testing_mode: @@ -365,10 +365,10 @@ def extract_GEOSCF4FAAM_flight(folder=None, flight_ID='C216', folder4csv=None, if inc_ds_vars_in_csv: FAAM_df = ds.to_dataframe() df = pd.concat([df, FAAM_df], axis=1) - duplicates = [i for i in FAAM_df.columns if i in df.columns ] - if len(duplicates)>1: + duplicates = [i for i in FAAM_df.columns if i in df.columns] + if len(duplicates) > 1: print('WARNING: duplicates in FAAM and GEOS-CF dataframe headers!') - df = df[ list(set(df.columns)) ] + df = df[list(set(df.columns))] # Save dateframe to a csv file if isinstance(folder4csv, type(None)): folder4csv = './' @@ -421,34 +421,34 @@ def extract_GEOSCF_assim4df(df=None, ds=None, # Restrict the dataset to the day(s) of the flight dates = dt64_2_dt(ds.time.values) # time_bool = [((i>=sdate) & (i<=edate)) for i in ds.time.values] - time_bool = [((i>=sdate) & (i<=edate)) for i in dates] + time_bool = [((i >= sdate) & (i <= edate)) for i in dates] ds = ds.isel(time=time_bool) # Reduce the dataset size to the spatial locations of the flight (+ buffer) if isinstance(spatial_buffer, type(None)): - spatial_buffer = 2 # degrees lat / lon + spatial_buffer = 2 # degrees lat / lon lat_min = df[LatVar].values.min() - spatial_buffer lat_max = df[LatVar].values.max() + spatial_buffer - lat_bool = [((i>=lat_min) & (i<=lat_max)) for i in ds[dsLatVar].values] + lat_bool = [((i >= lat_min) & (i <= lat_max)) for i in ds[dsLatVar].values] ds = ds.isel(lat=lat_bool) lon_min = df[LonVar].values.min() - spatial_buffer lon_max = df[LonVar].values.max() + spatial_buffer - lon_bool = [((i>=lon_min) & (i<=lon_max)) for i in ds[dsLonVar].values] + lon_bool = [((i >= lon_min) & (i <= lon_max)) for i in ds[dsLonVar].values] ds = ds.isel(lon=lon_bool) # Get a list of the levels that the data is present for # NOTE: GEOS-CF has 3 options (p23, interpolated; x1/v1, surface) if single_horizontal_level: HPa_l = get_GEOSCF_vertical_levels(native_levels=True) - HPa_l = [ HPa_l[-1] ] + HPa_l = [HPa_l[-1]] else: HPa_l = get_GEOSCF_vertical_levels(native_levels=False) # Save subset of dataset locally and then reload if isinstance(TEMP_nc_name, type(None)): - TEMP_nc_name = 'TEMP_NetCDF_{}.nc'.format(collection) + TEMP_nc_name = 'TEMP_NetCDF_{}.nc'.format(collection) ds = save_ds2disk_then_reload(ds, savename=TEMP_nc_name, debug=debug) # Resample the values to extract if resample_df2ds_freq: grads_step = ds.time.attrs['grads_step'] - grads_step = grads_step.replace('mn', 'T' ) + grads_step = grads_step.replace('mn', 'T') df = df.resample(grads_step).mean() # Get nearest indexes in 4D data from locations in dataframe idx_dict = calc_4D_idx_in_ds(ds_hPa=HPa_l, ds=ds, df=df, @@ -460,13 +460,13 @@ def extract_GEOSCF_assim4df(df=None, ds=None, ) # Make a dictionaries to convert between ds and df variable names df2ds_dict = { - LonVar:dsLonVar, LatVar:dsLatVar, TimeVar:dsTimeVar, PressVar:dsAltVar, + LonVar: dsLonVar, LatVar: dsLatVar, TimeVar: dsTimeVar, PressVar: dsAltVar, } df2ds_dict_r = {v: k for k, v in list(df2ds_dict.items())} # Create a data frame for values dfN = pd.DataFrame() # Extraction of data points in a bulk manner - for nval, var in enumerate( vars2extract ): + for nval, var in enumerate(vars2extract): # Now extract values dims2use = list(ds[var].coords) idx_list = [idx_dict[df2ds_dict_r[i]] for i in dims2use] @@ -503,10 +503,10 @@ def extract_GEOSCF_assim4df(df=None, ds=None, def regrid_restart_file4flexgrid(dsA, OutFile=None, lons=None, - lats=None, res='1x1', folder='./', - vars2regrid=None, rm_regridder=True, - save2netcdf=True, method='bilinear', - debug=False): + lats=None, res='1x1', folder='./', + vars2regrid=None, rm_regridder=True, + save2netcdf=True, method='bilinear', + debug=False): """ Regrid a GEOS-Chem restart file to a given resolution using xEMSF @@ -535,9 +535,9 @@ def regrid_restart_file4flexgrid(dsA, OutFile=None, lons=None, ds_out = xr.Dataset(dsA.coords) # Replace the lat and lon coords del ds_out['lat'] - ds_out = ds_out.assign_coords({'lat':lats}) + ds_out = ds_out.assign_coords({'lat': lats}) del ds_out['lon'] - ds_out = ds_out.assign_coords({'lon':lons}) + ds_out = ds_out.assign_coords({'lon': lons}) # Create a regidder (to be reused ) regridder = xe.Regridder(dsA, ds_out, method, reuse_weights=True) # Loop and regrid variables @@ -546,7 +546,7 @@ def regrid_restart_file4flexgrid(dsA, OutFile=None, lons=None, vars2regrid = dsA.data_vars # Only regrid variables wit lon and lat in them vars2leave = [i for i in vars2regrid if 'lat' not in dsA[i].coords.keys()] - vars2regrid = [i for i in vars2regrid if 'lat' in dsA[i].coords.keys() ] + vars2regrid = [i for i in vars2regrid if 'lat' in dsA[i].coords.keys()] for var2use in vars2regrid: if debug: print(var2use) @@ -554,9 +554,9 @@ def regrid_restart_file4flexgrid(dsA, OutFile=None, lons=None, ds_out = xr.Dataset(dsA.coords) # Replace the lat and lon coords with the ones to regrid to del ds_out['lat'] - ds_out = ds_out.assign_coords({'lat':lats}) + ds_out = ds_out.assign_coords({'lat': lats}) del ds_out['lon'] - ds_out = ds_out.assign_coords({'lon':lons}) + ds_out = ds_out.assign_coords({'lon': lons}) # Get a DataArray dr = dsA[var2use] # Build regridder diff --git a/AC_tools/GEOSChem_bpch.py b/AC_tools/GEOSChem_bpch.py index d880065..eebbb6d 100755 --- a/AC_tools/GEOSChem_bpch.py +++ b/AC_tools/GEOSChem_bpch.py @@ -762,7 +762,7 @@ def get_GC_output(wd, vars=None, species=None, category=None, r_cubes=False, # if not r_cubes: - # Extract data + # Extract data # try: # arr = [ cubes[i].data for i in range( len(vars) ) ] # except: @@ -770,7 +770,7 @@ def get_GC_output(wd, vars=None, species=None, category=None, r_cubes=False, # print 'vars: >{}<'.format( ','.join(vars) ) # sys.exit( 0 ) - # restore to zero scaling (v/v instead of pptv etc ) - only for GC tracers + # restore to zero scaling (v/v instead of pptv etc ) - only for GC tracers # if restore_zero_scaling: # if (category == 'IJ_AVG_S') or ('IJ_AVG_S' in vars[0] ): # print [ cubes[i].attributes['ctm_units'] for i in range( len(vars) ) ] @@ -1944,7 +1944,7 @@ def spec_dep(wd=None, spec='O3', s_area=None, months=None, logging.debug(DebugStr.format(len(arr), *[str(ii) for ii in [(i.shape, i.sum(), i.mean()) - for i in [arr]]]) + for i in [arr]]]) ) # Convert to Gg "Ox" (Gg X /s) arr = molec_cm2_s_2_Gg_Ox_np(arr, spec, s_area=s_area, Iodine=Iodine, @@ -2229,12 +2229,12 @@ def get_wet_dep(months=None, years=None, vol=None, def molec_weighted_avg_BPCH(arr, wd=None, vol=None, t_p=None, - trop_limit=True, multiply_method=False, rm_strat=True, - molecs=None, - weight_lon=False, weight_lat=False, LON_axis=0, - LAT_axis=1, - n_air=None, - annual_mean=True, res='4x5', debug=False): + trop_limit=True, multiply_method=False, rm_strat=True, + molecs=None, + weight_lon=False, weight_lat=False, LON_axis=0, + LAT_axis=1, + n_air=None, + annual_mean=True, res='4x5', debug=False): """ Takes an array and retuns the average (molecular weighted) value @@ -3614,14 +3614,14 @@ def convert_molec_cm3_s2_molec_per_yr(ars=None, vol=None): def convert_molec_cm3_s_2_g_X_s_BPCH(ars=None, specs=None, ref_spec=None, - months=None, years=None, vol=None, t_ps=None, - trop_limit=True, - s_area=None, rm_strat=True, wd=None, - res='4x5', - multiply_method=True, use_time_in_trop=True, - conbine_ars=True, - month_eq=False, limit_PL_dim2=38, - verbose=False, debug=False): + months=None, years=None, vol=None, t_ps=None, + trop_limit=True, + s_area=None, rm_strat=True, wd=None, + res='4x5', + multiply_method=True, use_time_in_trop=True, + conbine_ars=True, + month_eq=False, limit_PL_dim2=38, + verbose=False, debug=False): """ Convert molec/cm3/s to g/grid box. This is used for converting prod/loss output units @@ -3851,8 +3851,8 @@ def get_avg_trop_conc_of_X(spec='O3', wd=None, s_area=None, res='4x5', arr = arr.mean(axis=-1) # Area weight and return val = molec_weighted_avg_BPCH(arr, res=res, trop_limit=trop_limit, \ - # vol=vol, t_p=t_p, n_air=n_air, molecs=molecs, - rm_strat=rm_strat, wd=wd, annual_mean=annual_mean) + # vol=vol, t_p=t_p, n_air=n_air, molecs=molecs, + rm_strat=rm_strat, wd=wd, annual_mean=annual_mean) if rtn_units: return val, units else: @@ -3898,14 +3898,16 @@ def get_default_variable_dict(wd=None, # Consider just troposphere or consider full atmosphere? if full_vert_grid: Var_rc['trop_limit'] = False # limit arrays to 38 levels... - Var_rc['rm_strat'] = False # This is for convert_molec_cm3_s_2_g_X_s_BPCH + # This is for convert_molec_cm3_s_2_g_X_s_BPCH + Var_rc['rm_strat'] = False Var_rc['full_vert_grid'] = full_vert_grid # for get_dims4res Var_rc['limit_PL_dim2'] = 59 # limit of levels for chemistry? # apply dim. limiting? Var_rc['limit_vertical_dim'] = limit_vertical_dim else: Var_rc['trop_limit'] = True # limit arrays to 38 levels... - Var_rc['rm_strat'] = True # This is for convert_molec_cm3_s_2_g_X_s_BPCH + # This is for convert_molec_cm3_s_2_g_X_s_BPCH + Var_rc['rm_strat'] = True Var_rc['full_vert_grid'] = full_vert_grid # for get_dims4res Var_rc['limit_PL_dim2'] = 38 # limit of levels for chemistry # only consider boxes that are 100 % tropospheric @@ -4117,15 +4119,15 @@ def process_to_X_per_s(spec=None, ars=None, tags=None, ref_spec=None, month_eq = False # Convert to g X/s ars = convert_molec_cm3_s_2_g_X_s_BPCH(ars=ars, ref_spec=ref_spec, \ - # shared settings... - months=Data_rc['months'], years=Data_rc['years'], - vol=Data_rc['vol'], t_ps=Data_rc['t_ps'], \ - trop_limit=Var_rc['trop_limit'], - rm_strat=Var_rc['rm_strat'], - # ... and function specific settings... - month_eq=month_eq, - # month_eq=False, - conbine_ars=False) + # shared settings... + months=Data_rc['months'], years=Data_rc['years'], + vol=Data_rc['vol'], t_ps=Data_rc['t_ps'], \ + trop_limit=Var_rc['trop_limit'], + rm_strat=Var_rc['rm_strat'], + # ... and function specific settings... + month_eq=month_eq, + # month_eq=False, + conbine_ars=False) # Adjust for # of X in tag # is the broadcasting right here? should the array just be overwritten? if adjust_by_stiochiometry_of_tag: diff --git a/AC_tools/GEOSChem_nc.py b/AC_tools/GEOSChem_nc.py index 78b1695..6a5ee64 100644 --- a/AC_tools/GEOSChem_nc.py +++ b/AC_tools/GEOSChem_nc.py @@ -81,13 +81,14 @@ def get_GEOSChem_files_as_ds(file_str='GEOSChem.SpeciesConc.*.nc4', wd=None, if not isinstance(dates2use, type(None)): FileRootsVar = 'FileRoots' df = pd.DataFrame(files) - df = pd.DataFrame({FileRootsVar:files}) + df = pd.DataFrame({FileRootsVar: files}) # Which date format to look for in filenames? if is_HEMCO_collection: - format='%Y%m%d%H%M' + format = '%Y%m%d%H%M' else: - format='%Y%m%d_%H%Mz' + format = '%Y%m%d_%H%Mz' # Setup a helper function to extract dates from file strings + def get_date_from_filename(x, format=format): """ Extract Dates from filenames @@ -103,7 +104,7 @@ def get_date_from_filename(x, format=format): dtVar = 'datetime' df[dtVar] = df[FileRootsVar].map(get_date_from_filename) bool = df[dtVar].isin(dates2use) - files = list(df.loc[bool,FileRootsVar].values) + files = list(df.loc[bool, FileRootsVar].values) # Open all of these files as single Dataset if open_with_coords_dropped: def drop_all_coords(ds): @@ -114,7 +115,7 @@ def drop_all_coords(ds): # NOTE: Updated to use faster opening settings for files sharing the same coords # https://github.com/pydata/xarray/issues/1823 ds = xr.open_mfdataset(files, - # concat_dim='time', + # concat_dim='time', combine=combine, data_vars=data_vars, coords=coords, compat=compat, parallel=parallel) @@ -188,7 +189,7 @@ def get_Gg_trop_burden(ds=None, spec=None, spec_var=None, StateMet=None, spec = spec_var.replace(spec_conc_prefix, '') if debug: PStr = 'Attempting in ds conversion of {} ({})' - print( PStr.format(spec, spec_var) ) + print(PStr.format(spec, spec_var)) # Check units SpecUnits = dsL[spec_var].units MixingRatioUnits = MXUnits == SpecUnits @@ -401,7 +402,6 @@ def GetSpeciesConcDataset(file_str='GEOSChem.SpeciesConc.*.nc4', wd=None, dates2use=dates2use) - def get_Inst1hr_ds(file_str='GEOSChem.inst1hr.*', wd=None, dates2use=None): """ @@ -457,7 +457,7 @@ def get_DryDep_ds(file_str='GEOSChem.DryDep.*', wd=None, def get_WetLossConv_ds(file_str='GEOSChem.WetLossConv.*', wd=None, - dates2use=None): + dates2use=None): """ Wrapper to get NetCDF Wet Loss via Convection output as a dataset @@ -475,7 +475,7 @@ def get_WetLossConv_ds(file_str='GEOSChem.WetLossConv.*', wd=None, def get_WetLossLS_ds(file_str='GEOSChem.WetLossLS.*', wd=None, - dates2use=None): + dates2use=None): """ Wrapper to get Wet Loss via large-scale Convection NetCDF output as a ds @@ -817,7 +817,7 @@ def get_general_stats4run_dict_as_df(run_dict=None, extra_str='', REF1=None, for key in run_dict.keys(): dsD[key] = GetSpeciesConcDataset(wd=run_dict[key], dates2use=dates2use) # - Get burdens for core species - avg_over_time = True # Note: burdens area averaged overtime + avg_over_time = True # Note: burdens area averaged overtime core_burden_specs = ['O3', 'CO', 'NO', 'NO2'] specs2use = list(set(core_burden_specs+extra_burden_specs)) prefix = 'SpeciesConc_' @@ -827,12 +827,12 @@ def get_general_stats4run_dict_as_df(run_dict=None, extra_str='', REF1=None, if use_REF_wd4Met: # Set working directory for shared variables if isinstance(REF_wd, type(None)): - REF_wd = run_dict[ list(run_dict.keys())[0] ] + REF_wd = run_dict[list(run_dict.keys())[0]] StateMet = get_StateMet_ds(wd=REF_wd, dates2use=dates2use) else: StateMet = get_StateMet_ds(wd=run_dict[key], dates2use=dates2use) # Average burden over time - ds = dsD[key]#.mean(dim='time', keep_attrs=True) + ds = dsD[key] # .mean(dim='time', keep_attrs=True) S = get_Gg_trop_burden(ds, vars2use=vars2use, StateMet=StateMet, use_time_in_trop=use_time_in_trop, avg_over_time=avg_over_time, @@ -854,7 +854,7 @@ def get_general_stats4run_dict_as_df(run_dict=None, extra_str='', REF1=None, # Get NOx burden BurdenStr = '{} burden ({})' NO2_varname = BurdenStr.format('NO2', mass_unit) - NO_varname = BurdenStr.format('NO', mass_unit) + NO_varname = BurdenStr.format('NO', mass_unit) NOx_varname = BurdenStr.format('NOx', mass_unit) PtrStr = 'NOx family not added for trop. df columns: {}' try: @@ -965,11 +965,11 @@ def add_molec_den2ds(ds, MolecVar='Met_MOLCES', AirDenVar='Met_AIRDEN'): try: ds[MolecVar] except KeyError: - RMM_air = constants('RMM_air') / 1E3 # g/mol => kg/mol - ds[MolecVar] = ds[AirDenVar].copy() / 1E6 # kg/m3 => kg/cm3 + RMM_air = constants('RMM_air') / 1E3 # g/mol => kg/mol + ds[MolecVar] = ds[AirDenVar].copy() / 1E6 # kg/m3 => kg/cm3 values = ds[MolecVar].values values = values / RMM_air # kg/cm3 / kg/mol - values = values * constants('AVG') # mol/cm3 => molecules/cm3 + values = values * constants('AVG') # mol/cm3 => molecules/cm3 ds[MolecVar].values = values return ds @@ -1006,7 +1006,7 @@ def add_Xy_2ds(ds=None, var2add='Cly', prefix='SpeciesConc_', pass if verbose: Pstr = "Using species for '{}' family (ref_spec: {}): {}" - print( Pstr.format(var2add, ref_spec, specs2use) ) + print(Pstr.format(var2add, ref_spec, specs2use)) # Setup Xy variable as template of 1st Xy species spec2use = specs2use[0] var2use = '{}{}'.format(prefix, spec2use) @@ -1014,7 +1014,7 @@ def add_Xy_2ds(ds=None, var2add='Cly', prefix='SpeciesConc_', ds[var2add] = ds[var2use].copy() * stioch # Also save the individual species in reference species terms? if add_ind_specs2ds: - Var2Save = '{}-in-{}-units'.format(spec2use,ref_spec) + Var2Save = '{}-in-{}-units'.format(spec2use, ref_spec) ds[Var2Save] = ds[var2use].copy() * stioch # Add rest of Xy species and scale to stoichiometry for spec2use in specs2use[1:]: @@ -1023,7 +1023,7 @@ def add_Xy_2ds(ds=None, var2add='Cly', prefix='SpeciesConc_', ds[var2add].values = ds[var2add].values + values # Also save the individual species in reference species terms? if add_ind_specs2ds: - Var2Save = '{}-in-{}-units'.format(spec2use,ref_spec) + Var2Save = '{}-in-{}-units'.format(spec2use, ref_spec) ds[Var2Save] = ds[var2use].copy() * stioch return ds @@ -1045,5 +1045,5 @@ def get_specieslist_from_input_geos(folder=None, filename='input.geos'): if line2start_read in line: save_line = True - species = [i.split(':')[-1].strip() for i in species_lines ] + species = [i.split(':')[-1].strip() for i in species_lines] return species diff --git a/AC_tools/HEMCO.py b/AC_tools/HEMCO.py index 3e7420d..7b7d0e3 100644 --- a/AC_tools/HEMCO.py +++ b/AC_tools/HEMCO.py @@ -37,6 +37,7 @@ def rm_eruptive_volcancos_from_files(dates=None, sdate=None, edate=None, if isinstance(output_folder, type(None)): output_folder = folder # Helper functions for processing volcano files + def get_volc_subfolder4dt(folder=None, dt=None): """ Get the volcano folder for a specific datetime """ year = dt.year @@ -56,7 +57,7 @@ def get_volc_file_lines4date(dt, folder=None): folder2use = get_volc_subfolder4dt(dt=dt, folder=folder) # Extract lines lines = read_lines_from_txt_file(filename=filename, - folder=folder2use) + folder=folder2use) return lines def rm_eruption_from_volc_file_lines(lines, skiplines=4, dt_str='', @@ -83,14 +84,14 @@ def rm_eruption_from_volc_file_lines(lines, skiplines=4, dt_str='', tmp_line = line.strip().split() LAT = tmp_line[0] LON = tmp_line[1] - S = tmp_line[2] # SULFUR - ELEV = tmp_line[3] # ELEVATION - CLOUD = tmp_line[4] # CLOUD_COLUMN_HEIGHT + S = tmp_line[2] # SULFUR + ELEV = tmp_line[3] # ELEVATION + CLOUD = tmp_line[4] # CLOUD_COLUMN_HEIGHT # If not equal, then EXCLUDE the line if ELEV != CLOUD: if verbose: print(pstr1.format(S, dt_str, LAT, LON, ELEV, - CLOUD)) + CLOUD)) include_line = False except IndexError: if debug: diff --git a/AC_tools/KPP.py b/AC_tools/KPP.py index f1ecb3f..49d34dc 100644 --- a/AC_tools/KPP.py +++ b/AC_tools/KPP.py @@ -68,7 +68,7 @@ def get_PL_ars4mech_NetCDF(fam='LOx', ref_spec='O3', Ox_fam_dict=None, dsPL = get_ProdLoss_ds(wd=wd, dates2use=dates2use) prefix = 'Prod_' diag_tag_prefix = '{}{}'.format(prefix, 'T') - vars2use = [i.replace('PT', diag_tag_prefix) for i in Ox_fam_dict['tags'] ] + vars2use = [i.replace('PT', diag_tag_prefix) for i in Ox_fam_dict['tags']] dsPL = dsPL[vars2use] # Rename back into old format for now - Update this? rename_dict = dict(zip(vars2use, Ox_fam_dict['tags'])) @@ -92,7 +92,7 @@ def get_PL_ars4mech_NetCDF(fam='LOx', ref_spec='O3', Ox_fam_dict=None, # Convert to g mass values = (values * species_mass('O3')) # Account for stiochiometrey of reaction - values = values * RR_dict_fam_stioch[ tags2_rxn_num[var2use] ] + values = values * RR_dict_fam_stioch[tags2_rxn_num[var2use]] # Convert to /month from /s values = values * month2sec[:, None, None, None] dsPL[var2use].values = values @@ -117,7 +117,7 @@ def get_PL_ars4mech_NetCDF(fam='LOx', ref_spec='O3', Ox_fam_dict=None, arr = np.ma.array(ars).sum(axis=0) var2use = Ox_fam_dict['tags'][0] ds_TEMP = dsPL[[var2use]].copy() - print(arr.shape, dsPL[var2use].values.shape ) + print(arr.shape, dsPL[var2use].values.shape) print(arr.sum()) ds_TEMP[var2use].values = arr LOx_trop = rm_fractional_troposphere(ds_TEMP, vars2use=[var2use], @@ -132,7 +132,7 @@ def get_PL_ars4mech_NetCDF(fam='LOx', ref_spec='O3', Ox_fam_dict=None, RMM_air = constants('RMM_air') StateMet[MolecVar] = StateMet['Met_AIRDEN'].copy() # kg/m3 => molecs/cm3 - StateMet[MolecVar].values = StateMet[MolecVar].values / RMM_air /1E6 + StateMet[MolecVar].values = StateMet[MolecVar].values / RMM_air / 1E6 # Multiply values through by # molecules dsPL = dsPL * StateMet[MolecVar] # sum over lat and lon @@ -195,18 +195,18 @@ def get_PL_ars4mech_BPCH(fam='LOx', ref_spec='O3', Ox_fam_dict=None, month_eq = False # Now convert the units (to G/s) ars = convert_molec_cm3_s_2_g_X_s_BPCH(ars=ars, ref_spec=ref_spec, - # Shared settings... - months=Data_rc['months'], - years=Data_rc['years'], - vol=Data_rc['vol'], t_ps=Data_rc['t_ps'], - trop_limit=Var_rc['trop_limit'], - rm_strat=Var_rc['rm_strat'], - # There are 59 levels of computation for P/l in - # v11-1+ (so limit to 59) - limit_PL_dim2=limit_PL_dim2, - # ... and function specific settings... - month_eq=month_eq, - conbine_ars=False) + # Shared settings... + months=Data_rc['months'], + years=Data_rc['years'], + vol=Data_rc['vol'], t_ps=Data_rc['t_ps'], + trop_limit=Var_rc['trop_limit'], + rm_strat=Var_rc['rm_strat'], + # There are 59 levels of computation for P/l in + # v11-1+ (so limit to 59) + limit_PL_dim2=limit_PL_dim2, + # ... and function specific settings... + month_eq=month_eq, + conbine_ars=False) # Add stoichiometric scaling (# of Ox losses per tagged rxn. ) ars = [i*RR_dict_fam_stioch[tags2_rxn_num[tags[n]]] for n, i in enumerate(ars)] @@ -247,9 +247,10 @@ def get_PL_ars4mech_BPCH(fam='LOx', ref_spec='O3', Ox_fam_dict=None, weight_lat=True, wd=Var_rc['wd'], trop_limit=Var_rc['trop_limit'], rm_strat=Var_rc['rm_strat'], \ - # provide shared data arrays averaged over time... - molecs=Data_rc['molecs'].mean(axis=-1), - t_p=t_ps) \ + # provide shared data arrays averaged over time... + molecs=Data_rc['molecs'].mean( + axis=-1), + t_p=t_ps) \ for i in ars] else: pass @@ -377,6 +378,7 @@ def get_reactants_and_products4tagged_fam(fam='LOx', KPP_output_mech=None, df['rxn str'] = s # df.index = df.index - 1 # Adjust Fortran numbering in output to Pythonic # # Split apart reaction str to give products and reactants + def extract_products(input): return str(input).split(' --> ')[-1].strip() @@ -404,6 +406,7 @@ def extract_reactants(input): if a__ != b_[n]: print(a__, b_[n]) # Only consider reaction that include family in the products + def fam_in_rxn(input): return (fam in input) rtn_vars = ['react', 'prod', 'KPP input react'] @@ -476,6 +479,7 @@ def KPP_eqn_file_species(folder=None, filename=None, debug=False): if debug: print(num2read_line_from) # Process extracted lines to dict of names and descriptions... + def strip_line(line_): try: name, descrip = line_.strip().split('= IGNORE;') @@ -502,21 +506,21 @@ def get_dicts_of_KPP_eqn_file_reactions(folder=None, filename=None, import string # Local vars KPP_rxn_funcs = ( - # Main KPP functions - 'HET', 'PHOTOL', 'GCARR','GCJPLPR', -# 'GC_', - # Specialist KPP functions for mechanism - 'GC_HO2HO2', 'GC_OHCO', - 'GC_RO2NO', 'GC_OHHNO3', 'GC_RO2HO2', 'GC_HACOHA', - 'GC_RO2HO2', 'GC_HACOHB', 'GC_TBRANCH', 'GCJPLEQ', - 'GC_GLYCOHA', 'GC_GLYCOHB', 'GC_DMSOH', 'GC_GLYXNO3', - 'GC_HO2NO3', - # Include ISOP reaction functions (inc. GC) - 'GC_ALK', 'GC_NIT', 'GC_PAN', 'GC_EPO', 'GC_ISO1', 'GC_ISO2', - # KPP function without GC prefix -# 'NIT', 'PAN', 'ALK', 'EPO', - 'ARRPLUS', 'TUNPLUS', - '1.33E-13+3.82E-11*exp', # Why is this function not in gckpp.kpp? + # Main KPP functions + 'HET', 'PHOTOL', 'GCARR', 'GCJPLPR', + # 'GC_', + # Specialist KPP functions for mechanism + 'GC_HO2HO2', 'GC_OHCO', + 'GC_RO2NO', 'GC_OHHNO3', 'GC_RO2HO2', 'GC_HACOHA', + 'GC_RO2HO2', 'GC_HACOHB', 'GC_TBRANCH', 'GCJPLEQ', + 'GC_GLYCOHA', 'GC_GLYCOHB', 'GC_DMSOH', 'GC_GLYXNO3', + 'GC_HO2NO3', + # Include ISOP reaction functions (inc. GC) + 'GC_ALK', 'GC_NIT', 'GC_PAN', 'GC_EPO', 'GC_ISO1', 'GC_ISO2', + # KPP function without GC prefix + # 'NIT', 'PAN', 'ALK', 'EPO', + 'ARRPLUS', 'TUNPLUS', + '1.33E-13+3.82E-11*exp', # Why is this function not in gckpp.kpp? ) # Loop lines in file with open(folder+filename, 'r') as file_: @@ -564,6 +568,7 @@ def get_dicts_of_KPP_eqn_file_reactions(folder=None, filename=None, if debug: print(n_line, line_) # Remove spacing errors in KPP eqn entries + def remove_KPP_spacing_errors(input): """ Remove differences in spacing in KPP """ # Numbers @@ -594,6 +599,7 @@ def split_KPP_rxn_str_into_chunks(rxn, KPP_line_max=43, debug=False): """ print(rxn) # Sub-function to cut strings to last " +" + def return_string_in_parts_ending_with_plus(input): """ return string upto last ' +' in string """ # Find the last ' + ' in string @@ -721,7 +727,7 @@ def process_KPP_rxn_dicts2dfs(rxn_dicts=None, Use_FORTRAN_KPP_numbering=True, return rxn_dicts -def split_combined_KPP_eqns(list_in, debug=False ): +def split_combined_KPP_eqns(list_in, debug=False): """ Split combined KPP eqn strings """ # Indices of KPP eqn strings with more than one "=" inds = [] @@ -1418,10 +1424,10 @@ def get_Ox_fam_dicts(fam='LOx', ref_spec='O3', GC_version='v12.9.1', } # - Extract data for Ox loss for family from model ars = get_PL_ars4mech_NetCDF(Ox_fam_dict=Ox_fam_dict, - rm_strat=rm_strat, wd=wd, - fam=fam, ref_spec=ref_spec, - StateMet=StateMet, - weight_by_molecs=weight_by_molecs) + rm_strat=rm_strat, wd=wd, + fam=fam, ref_spec=ref_spec, + StateMet=StateMet, + weight_by_molecs=weight_by_molecs) # Convert this to a dictionary and return # Inc. lists of sorted family names and ars in returned dictionary @@ -1611,7 +1617,7 @@ def calc_fam_loss_by_route(wd=None, fam='LOx', ref_spec='O3', print(dfFam) # return dictionaries of LOx by reaction or by family (in Tg O3) if rtn_by_rxn: -# df.loc[:,TotalFluxVar] = df.loc[:,TotalFluxVar] /1E12 + # df.loc[:,TotalFluxVar] = df.loc[:,TotalFluxVar] /1E12 return df if rtn_by_fam: return dfFam @@ -1642,7 +1648,7 @@ def add_tags4strs2mech(rxn_dicts, tagged_rxns={}, """ if isinstance(search_strs, type(None)): search_strs = 'BrSAL', 'CH3Br', 'CH3Cl', 'CH2Cl2', 'CHCl3', '0.150IBr', - search_strs += 'HOBr','ClNO2', + search_strs += 'HOBr', 'ClNO2', # Setup regex to find existing tags in reaction strings re1 = '(\\+)' # Any Single Character 1 re2 = '(\\s+)' # White Space 1 @@ -1670,7 +1676,7 @@ def add_tags4strs2mech(rxn_dicts, tagged_rxns={}, # Update the counter (NOTE: counter starts from 1) counter += 1 if debug: - print( counter, current_tag ) + print(counter, current_tag) # Check if rxn already tagged, if so just use that tag. m = rg.search(rxn_str) if m: @@ -1756,7 +1762,7 @@ def GC_OHCO(A0, B0, C0, NUMDEN=1E4, TEMP=298.0, PRESS=1000.0): KHI1 = 1.1E-12*(300./TEMP)**(-1.3E0) XYRAT1 = KLO1 * NUMDEN / KHI1 BLOG1 = np.log10(XYRAT1) - FEXP1 = 1.E+0/(1.E+0 + BLOG1*BLOG1 ) + FEXP1 = 1.E+0/(1.E+0 + BLOG1*BLOG1) KCO1 = KLO1 * NUMDEN * 0.6**FEXP1/(1.e+0+XYRAT1) KLO2 = 1.5E-13 * (300/TEMP)**(0.E+0) KHI2 = 2.1e+09 * (300/TEMP)**(-6.1E+0) diff --git a/AC_tools/core.py b/AC_tools/core.py index 80239d7..4f00170 100755 --- a/AC_tools/core.py +++ b/AC_tools/core.py @@ -405,7 +405,7 @@ def get_latlonalt4res(res=None, centre=True, hPa=False, nest=None, lon = np.arange(-180, 180, step_size) else: lat = np.array([-90]+list(np.arange(-89-(step_size/2), - 90+(step_size/2),step_size))+[90] + 90+(step_size/2), step_size))+[90] ) lon = np.arange(-180-(step_size/2), 180+(step_size/2), step_size) # Manually set values for (generic?) 0.5x0.5 grid @@ -416,7 +416,7 @@ def get_latlonalt4res(res=None, centre=True, hPa=False, nest=None, lon = np.arange(-180, 180, step_size) else: lat = np.array([-90]+list(np.arange(-89.75, 90+(step_size/2), - step_size))+[90] + step_size))+[90] ) lon = np.arange(-180-(step_size/2), 180+(step_size/2), step_size) # Manually set values for 0.1x0.1 @@ -561,13 +561,13 @@ def element_in_str(element): if also_return_GC_version: # list GEOS-Chem versions (written with dashes and underscores) versions = [ - 'v11-02', 'v12.0.0', - 'v11-01', 'v11_01', 'v10-01', 'v10_01', 'v9-02', 'v9_02', 'v9-01-03', - 'v9_01_03', 'v9-01-02', 'v9_01_02', 'v9-01-01', 'v9_01_01', 'v8-03-02', - 'v8_03_02', 'v8-03-01', 'v8_03_01', 'v8-02-04', 'v8_02_04', 'v8-02-03', - 'v8_02_03', 'v8-02-02', 'v8_02_02', 'v8-02-01', 'v8_02_01', 'v8-01-04', - 'v8_01_04', 'v8-01-03', 'v8_01_03', 'v8-01-02', 'v8_01_02', 'v8-01-01', - 'v8_01_01', 'v7-04-13', 'v7_04_13', 'v7-04-12', 'v7_04_12' + 'v11-02', 'v12.0.0', + 'v11-01', 'v11_01', 'v10-01', 'v10_01', 'v9-02', 'v9_02', 'v9-01-03', + 'v9_01_03', 'v9-01-02', 'v9_01_02', 'v9-01-01', 'v9_01_01', 'v8-03-02', + 'v8_03_02', 'v8-03-01', 'v8_03_01', 'v8-02-04', 'v8_02_04', 'v8-02-03', + 'v8_02_03', 'v8-02-02', 'v8_02_02', 'v8-02-01', 'v8_02_01', 'v8-01-04', + 'v8_01_04', 'v8-01-03', 'v8_01_03', 'v8-01-02', 'v8_01_02', 'v8-01-01', + 'v8_01_01', 'v7-04-13', 'v7_04_13', 'v7-04-12', 'v7_04_12' ] df = pd.DataFrame(versions, columns=['Versions']) if debug: @@ -1386,5 +1386,3 @@ def get_scientific_number(number, precision, string=False): return float(out) else: return out - - diff --git a/AC_tools/generic.py b/AC_tools/generic.py index 39990f3..af599f1 100755 --- a/AC_tools/generic.py +++ b/AC_tools/generic.py @@ -322,6 +322,7 @@ def get_linear_ODR(x=None, y=None, maxit=5000, beta0=(0, 1), """ import scipy.odr # Setup linear model to fit + def f(B, x): '''Linear function y = m*x + b''' # B is a vector of the parameters. @@ -992,7 +993,7 @@ def rm_file(folder=None, filename=None, verbose=False, debug=False): except FileNotFoundError: pstr = "WARNING: File was not removed as it doesn't exit: {}" if verbose: - print( pstr.format(folder+filename) ) + print(pstr.format(folder+filename)) def get_stats_on_files_in_folder_as_dict(folder=None): @@ -1002,7 +1003,7 @@ def get_stats_on_files_in_folder_as_dict(folder=None): # Setup a dictionary to hold info on the files d = {} # list all files in the folder - files = glob.glob( '{}/*'.format(folder) ) + files = glob.glob('{}/*'.format(folder)) nfiles = len(files) d['#'] = nfiles if len(files) == 0: @@ -1101,7 +1102,7 @@ def calc_4D_idx_in_ds(ds=None, df=None, LonVar='lon', LatVar='lat', hPa_idx = [find_nearest(ds_hPa, i) for i in df[AltVar].values] time_idx = [find_nearest(ds_time, i) for i in df.index.values] # Return a dictionary of the values - return {LatVar:lat_idx, LonVar:lon_idx, TimeVar:time_idx, AltVar:hPa_idx} + return {LatVar: lat_idx, LonVar: lon_idx, TimeVar: time_idx, AltVar: hPa_idx} def extract_ds4df_locs(ds=None, df=None, LonVar='lon', LatVar='lat', @@ -1143,8 +1144,8 @@ def extract_ds4df_locs(ds=None, df=None, LonVar='lon', LatVar='lat', dsLonVar=dsLonVar, dsLatVar=dsLatVar, dsTimeVar=dsTimeVar) # Loop by timestamp - times2use = df.index.values - for n, time in enumerate( times2use ): + times2use = df.index.values + for n, time in enumerate(times2use): # get the times for a specific data lat_idx = d[LatVar][n] lon_idx = d[LonVar][n] @@ -1153,7 +1154,7 @@ def extract_ds4df_locs(ds=None, df=None, LonVar='lon', LatVar='lat', # en masse extract indexes ds_tmp = ds.isel(lat=lat_idx, lon=lon_idx, time=time_idx, lev=lev_idx) - vals = [ ds_tmp[i].data for i in vars2extract ] + vals = [ds_tmp[i].data for i in vars2extract] vals = np.array(vals) for nval, val in enumerate(vals): dfN.loc[vars2extract[nval], time] = vals[nval] @@ -1179,13 +1180,13 @@ def save_ds2disk_then_reload(ds, savename='TEMP_NetCDF.nc', folder='./', """ if debug: PrtStr = 'Saving NetCDF {} @ {} with dims: {}' - print( PrtStr.format(savename, datetime.datetime.now(), ds.dims ) ) - print( '... and variables: {}'.format( list(ds.data_vars) ) ) + print(PrtStr.format(savename, datetime.datetime.now(), ds.dims)) + print('... and variables: {}'.format(list(ds.data_vars))) # Save the dataset to disk ds.to_netcdf(folder+savename) if debug: PrtStr = 'Saved NetCDF {} @ {} with dims: {}' - print( PrtStr.format(savename, datetime.datetime.now(), ds.dims ) ) + print(PrtStr.format(savename, datetime.datetime.now(), ds.dims)) # Delete the dataset if delete_ds: del ds @@ -1260,7 +1261,7 @@ def get_table_from_copernicus_article(URL=None, TableNum=5, tree = ET.parse(urllib.request.urlopen(URL)) root = tree.getroot() # Tables for Copernicus publications are in the Oasis namespace - tags = [elem.tag for elem in root.iter() ] + tags = [elem.tag for elem in root.iter()] tags2use = [i for i in tags if 'oasis' in i.lower()] TableTag = '{http://docs.oasis-open.org/ns/oasis-exchange/table}table' ColNameVar = 'colname' @@ -1268,8 +1269,8 @@ def get_table_from_copernicus_article(URL=None, TableNum=5, tables = [i for i in root.iter(tag=TableTag)] # Select table from list (update article number to python index) AssStr = 'WARNING: Table # {} requested, but only {} tables in article.' - assert TableNum<=len(tables), AssStr.format(TableNum, len(tables)) - table = tables[ TableNum-1 ] + assert TableNum <= len(tables), AssStr.format(TableNum, len(tables)) + table = tables[TableNum-1] if debug: print(table) table_dict = {} @@ -1291,5 +1292,3 @@ def get_table_from_copernicus_article(URL=None, TableNum=5, print(key, table_dict[key]) df[key] = table_dict[key].split() return df - - diff --git a/AC_tools/mask.py b/AC_tools/mask.py index 510e2d1..47ae877 100644 --- a/AC_tools/mask.py +++ b/AC_tools/mask.py @@ -743,7 +743,7 @@ def get_analysis_masks(masks='basic', hPa=None, M_all=False, res='4x5', if use_multiply_method: maskes = [mask_all_but(i, trop_limit=trop_limit, mask3D=True, use_multiply_method=True, res=res) - for i in mtitles] + for i in mtitles] # if comparison with saiz-lopez 2014, if M_all: ind = [n for n, i in enumerate(mtitles) if not ('MBL' in i)] @@ -753,7 +753,7 @@ def get_analysis_masks(masks='basic', hPa=None, M_all=False, res='4x5', else: maskes = [mask_all_but(i, trop_limit=trop_limit, mask3D=True, use_multiply_method=False, res=res) - for i in mtitles] + for i in mtitles] # If not 'use_multiply_method', then invert hPa masks sects3D = [np.logical_not(i) for i in sects3D] # Add to mask and mask title lists @@ -942,11 +942,11 @@ def mask_all_but(region='All', M_all=False, saizlopez=False, elif case == 13: # 'Oceanic Tropics' mask = np.ma.mask_or(ocean_unmasked(res=res), tropics_unmasked(res=res, - saizlopez=saizlopez)) + saizlopez=saizlopez)) elif case == 14: # 'Land Tropics' mask = np.ma.mask_or(land_unmasked(res=res), tropics_unmasked(res=res, - saizlopez=saizlopez)) + saizlopez=saizlopez)) elif case == 15: # 'All Sur.' mask = surface_unmasked(res=res) elif case == 16: # 'Ocean Sur.' @@ -1012,11 +1012,11 @@ def mask_all_but(region='All', M_all=False, saizlopez=False, elif case == 13: mask = np.ma.mask_or(ocean_unmasked(res=res), tropics_unmasked(res=res, - saizlopez=saizlopez)) + saizlopez=saizlopez)) elif case == 14: mask = np.ma.mask_or(land_unmasked(res=res), tropics_unmasked(res=res, - saizlopez=saizlopez)) + saizlopez=saizlopez)) elif case == 15: # 'All Sur.' mask = surface_unmasked(res=res) elif case == 16: # 'Ocean Sur.' diff --git a/AC_tools/observations.py b/AC_tools/observations.py index 6d91145..0582817 100644 --- a/AC_tools/observations.py +++ b/AC_tools/observations.py @@ -13,19 +13,19 @@ import json - def get_FAAM_locations_as_df(flight_ID='C225'): """ Retive the FAAM BAE146 position (current of historic) from the html website by flight ID """ # What is the root URL for the data? - URL = 'https://www.faam.ac.uk/gluxe/position/query?flight={}'.format(flight_ID) + URL = 'https://www.faam.ac.uk/gluxe/position/query?flight={}'.format( + flight_ID) # Parse the URL via requests - f = urllib.request.urlopen( URL ) - soup = BeautifulSoup( f ) + f = urllib.request.urlopen(URL) + soup = BeautifulSoup(f) s = soup.get_text() # Parse the data a JSON string json_acceptable_string = s.replace("'", "\"") d = json.loads(json_acceptable_string) # Return as a dataframe - return pd.DataFrame( d ) + return pd.DataFrame(d) diff --git a/AC_tools/obsolete/plotting_REDUNDANT.py b/AC_tools/obsolete/plotting_REDUNDANT.py index f518b00..8a928d8 100644 --- a/AC_tools/obsolete/plotting_REDUNDANT.py +++ b/AC_tools/obsolete/plotting_REDUNDANT.py @@ -327,7 +327,7 @@ def plot_map(arr, return_m=False, grid=False, centre=False, cmap=None, no_cb=Fal if not NEW_VERSION: #################################################################################################### - # Old version is here + # Old version is here #################################################################################################### # -------------- Linear plots ------------------------------- # standard plot @@ -2221,7 +2221,8 @@ def plot_specs_zonal_change_annual2pdf(Vars, res='4x5', dpi=160, arr = Vars[n, ...].mean(axis=0).mean(axis=-1)*scale if set_window: - arr = arr[..., get_gc_lat(lat_0, res=res): get_gc_lat(lat_1, res=res), :] + arr = arr[..., get_gc_lat(lat_0, res=res) + : get_gc_lat(lat_1, res=res), :] # plot up spatial surface change zonal_plot(arr, fig, ax=ax, title=None, debug=debug, tropics=False, diff --git a/AC_tools/obsolete/variables_REDUNDANT.py b/AC_tools/obsolete/variables_REDUNDANT.py index ae02e98..c4d857e 100644 --- a/AC_tools/obsolete/variables_REDUNDANT.py +++ b/AC_tools/obsolete/variables_REDUNDANT.py @@ -324,7 +324,7 @@ def num2spec(num=69, rtn_dict=False, invert=False, ver='1.7'): # special case for dev version? if any([(ver == i) for i in ('1.6', '1.6.2',)]): d = GC_var('GCFP_d2TRA_justTRA_1.6') - if any([(ver == i) for i in('1.6.3', '1.6.4')]): + if any([(ver == i) for i in ('1.6.3', '1.6.4')]): d = GC_var('GCFP_d2TRA_justTRA_1.6.3') # Then slice nums = [int(i[4:]) for i in list(d.keys())] @@ -401,8 +401,6 @@ def get_ctm_nc_var(variable): return d[variable] - - # -------------- Non-generic Functions # # NOTE(s): @@ -445,7 +443,3 @@ def get_global_GAW_sites(f='gaw_site_list_global.h5'): ] [vars.pop(vars.index(i)) for i in sites2exclude] return vars - - - - diff --git a/AC_tools/planeflight.py b/AC_tools/planeflight.py index 7de2a02..46346e2 100755 --- a/AC_tools/planeflight.py +++ b/AC_tools/planeflight.py @@ -420,6 +420,7 @@ def pf_csv2pandas(file=None, vars=None, epoch=False, r_vars=False, else: return df + def get_pf_from_folder(folder='./', dates2use=None, debug=False): """ Get GEOS-Chem planeflight output from folder @@ -432,10 +433,11 @@ def get_pf_from_folder(folder='./', dates2use=None, debug=False): if not isinstance(dates2use, type(None)): FileRootsVar = 'FileRoots' df = pd.DataFrame(files) - df = pd.DataFrame({FileRootsVar:files}) + df = pd.DataFrame({FileRootsVar: files}) # Which date format to look for in filenames? - format='%Y%m%d%H%M' + format = '%Y%m%d%H%M' # Setup a helper function to extract dates from file strings + def get_date_from_filename(x, format=format): """ Extract Dates from filenames @@ -451,7 +453,7 @@ def get_date_from_filename(x, format=format): dtVar = 'datetime' df[dtVar] = df[FileRootsVar].map(get_date_from_filename) bool = df[dtVar].isin(dates2use) - files = list(df.loc[bool,FileRootsVar].values) + files = list(df.loc[bool, FileRootsVar].values) # Get headers ALL_vars, sites = get_pf_headers(files[0], debug=debug) # Extract dfs @@ -544,11 +546,11 @@ def mk_planeflight_input4FAAM_flight(folder=None, ds=None, filename = 'core_faam_*_{}_1hz.nc'.format(flight_ID.lower()) file2use = glob.glob(folder+filename) if len(file2use) > 1: - print('WARNING: more that one file found! (so using latest file)' ) + print('WARNING: more that one file found! (so using latest file)') print(file2use) - ds = xr.open_dataset( file2use[0] ) + ds = xr.open_dataset(file2use[0]) # Only select the variable of intereest and drop where these are NaNs - df = ds[ [PressVar, LatVar, LonVar, TimeVar] ].to_dataframe() + df = ds[[PressVar, LatVar, LonVar, TimeVar]].to_dataframe() df = df.dropna() # Add a location name (as Type) df[LocVar] = LocName @@ -569,6 +571,7 @@ def mk_planeflight_input4FAAM_flight(folder=None, ds=None, num_tracers=num_tracers, rxn_nums=rxn_nums, Username=Username,) + def reprocess_split_pf_output_over_2_lines(folder, save_original_file=True): """ Combine planeflight dat file lines where output split over 2 lines @@ -577,7 +580,7 @@ def reprocess_split_pf_output_over_2_lines(folder, save_original_file=True): for file2use in files2use: with open(file2use, 'r') as file: lines = [i.strip() for i in file] - file.close() # Force close + file.close() # Force close if save_original_file: os.rename(file2use, file2use+'.orig') else: diff --git a/AC_tools/plotting.py b/AC_tools/plotting.py index aea0bb7..4595cb3 100755 --- a/AC_tools/plotting.py +++ b/AC_tools/plotting.py @@ -90,7 +90,7 @@ def quick_map_plot(ds, var2plot=None, extra_str='', projection=ccrs.Robinson, Pstr = "In spatial plot of {}, min={} and max={}" min_ = float(ds[var2plot].values.min()) max_ = float(ds[var2plot].values.max()) - print(Pstr.format(var2plot, min_, max_ ) ) + print(Pstr.format(var2plot, min_, max_)) # Call plot via imshow... im = ds[var2plot].plot.imshow(x=LonVar, y=LatVar, ax=ax, transform=ccrs.PlateCarree(), @@ -154,13 +154,13 @@ def ds2zonal_plot(ds=None, var2plot=None, StateMet=None, AltVar='lev', # StateMet['Met_PMID'] LatLonAlt_dict = gchemgrid(rtn_dict=True) alt_array = LatLonAlt_dict['c_km_geos5'] - ds2plot = ds2plot.assign_coords({'lev':alt_array[:len(ds.lev.values)]}) + ds2plot = ds2plot.assign_coords({'lev': alt_array[:len(ds.lev.values)]}) # print out the min and max of plotted values if verbose: Pstr = "In zonal plot of {}, min={}, max={}" min_ = float(ds2plot[var2plot].values.min()) max_ = float(ds2plot[var2plot].values.max()) - print(Pstr.format(var2plot, min_, max_ ) ) + print(Pstr.format(var2plot, min_, max_)) # Now call plot via xr.dataset lat = np.array(ds2plot.lat.values) alt = np.array(ds2plot.lev.values) @@ -176,7 +176,7 @@ def ds2zonal_plot(ds=None, var2plot=None, StateMet=None, AltVar='lev', # Update axis labels ax.set_xlabel('Latitude ($^{\circ}$N)') if debug: - print( 'plt_ylabel', plt_ylabel) + print('plt_ylabel', plt_ylabel) if plt_ylabel: ax.set_ylabel('Altitude (km)') else: @@ -327,8 +327,6 @@ def plt_df_X_vs_Y_hexbin(x=None, y=None, c=None, xscale='linear', plt.show() - - def plot_up_diel_by_season(spec='O3', sub_str='UK+EIRE', fig=None, dfs=None, color_dict={'Obs.': 'k', 'Model': 'r'}, stat2plot='50%', title=None, @@ -374,7 +372,7 @@ def plot_up_diel_by_season(spec='O3', sub_str='UK+EIRE', fig=None, 'DJF' ]) season2text = { - 'DJF':'Dec-Jan-Feb', 'MAM': 'Mar-Apr-May', 'JJA': 'Jun-Jul-Aug', 'SON':'Sep-Oct-Nov', None: None, + 'DJF': 'Dec-Jan-Feb', 'MAM': 'Mar-Apr-May', 'JJA': 'Jun-Jul-Aug', 'SON': 'Sep-Oct-Nov', None: None, } if use_letters4months: pass @@ -535,7 +533,6 @@ def BASIC_diel_plot(fig=None, ax=None, dates=None, data=None, color='red', time_labels = df['data'][stat2plot].index.values time_labels = [str(int(i)) for i in time_labels] - # make sure the values with leading zeros drop these index = [float(i) for i in time_labels] if debug: @@ -914,7 +911,7 @@ def plot_zonal_figure(arr, fixcb=None, cb_sigfig=2, ax=None, if any([arr.shape[0] == i for i in (72, 144, 121, 177)]): # arr = arr.mean(axis=0) arr = molec_weighted_avg_BPCH(arr, weight_lon=True, res=res, - trop_limit=trop_limit, rm_strat=False, wd=wd) + trop_limit=trop_limit, rm_strat=False, wd=wd) # Create figure if not provided if isinstance(fig, type(None)): @@ -945,7 +942,7 @@ def plot_zonal_figure(arr, fixcb=None, cb_sigfig=2, ax=None, # If log plot - overwrite lvls if log: - # Get logarithmically spaced integers + # Get logarithmically spaced integers lvls = np.logspace(np.log10(fixcb[0]), np.log10(fixcb[1]), num=nticks) # Normalise to Log space @@ -2207,7 +2204,7 @@ def mk_discrete_cmap(lvls=None, cmap=None, rtn_norm=False, # Extract colors linearly from colormap cmaplist = cmap(np.linspace(0, 1, nticks)) # Create the new discrete colormap object - cmap_name = '{}_{}'.format( cmap.name, str(nticks) ) + cmap_name = '{}_{}'.format(cmap.name, str(nticks)) cmap = cmap.from_list(cmap_name, cmaplist, nticks) # Make a normalisation object... - define the bins and normalize if rtn_norm: @@ -2297,6 +2294,7 @@ def get_CB_color_cycle(): ] return CB_color_cycle + def plot_vertical_fam_loss_by_route(fam='LOx', ref_spec='O3', wd=None, Mechanism='Halogens', rm_strat=False, @@ -2453,8 +2451,8 @@ def plt_box_area_on_global_map(ds=None, var2use='DXYP__DXYP', # Just get an example dataset ds = ds[[var2use]] # Check input values for lat and lon range to plotting box extent - assert y0= x0) & (ds.lon <= x1)).values bool2 = ((ds.lat >= y0) & (ds.lat <= y1)).values @@ -2470,11 +2468,11 @@ def plt_box_area_on_global_map(ds=None, var2use='DXYP__DXYP', ax = fig.add_subplot(111, projection=projection, aspect=aspect, alpha=alpha) ds[var2use].plot.imshow(x=LonVar, y=LatVar, ax=ax, cmap=cmap, - transform=ccrs.PlateCarree()) + transform=ccrs.PlateCarree()) # Beautify the figure/plot ax.coastlines() # Force global perspective - ax.set_global() # this will force a global perspective + ax.set_global() # this will force a global perspective # Remove the colour-bar and force a tighter layout around map fig.delaxes(fig.axes[-1]) plt.tight_layout() diff --git a/AC_tools/variables.py b/AC_tools/variables.py index 341cda2..3efec07 100755 --- a/AC_tools/variables.py +++ b/AC_tools/variables.py @@ -386,18 +386,19 @@ def get_loc(loc=None, rtn_dict=False, debug=False): 'HFM': (-72.3000030518, 42.9000015259, 340.), # - ARNA locations 'Dakar': (-17.467686, 14.716677, 22), - 'DSS' : (-17.467686, 14.716677, 22), # Dakar airport code (as above) + 'DSS': (-17.467686, 14.716677, 22), # Dakar airport code (as above) 'Sao Vicente Airport': (-25.0569, 16.8331, 20), - 'VXE' : (-25.0569, 16.8331, 20), # Sao Vincite code (as above) + 'VXE': (-25.0569, 16.8331, 20), # Sao Vincite code (as above) 'Praia Airport': (-23.4939, 14.9242, 70), - 'RAI' : (-23.4939, 14.9242, 70), # Praia airport code (as above) + 'RAI': (-23.4939, 14.9242, 70), # Praia airport code (as above) # Other "nearby" airports - 'Gran Canaria Airport' : (-15.386667, 27.931944, 24), - 'LPA' : (-15.386667, 27.931944, 24), # Gran Canaria airport code (as above) - 'Lisbon Airport' : (-9.134167, 38.774167, 114), - 'LIS' : (-9.134167, 38.774167, 114), # Lisbon airport code (as above) - 'Paris (Charles de Gaulle) Airport' : (-2.547778, 49.009722, 119), - 'CDG' : (-2.547778, 49.009722, 119), # Paris airport code (as above) + 'Gran Canaria Airport': (-15.386667, 27.931944, 24), + # Gran Canaria airport code (as above) + 'LPA': (-15.386667, 27.931944, 24), + 'Lisbon Airport': (-9.134167, 38.774167, 114), + 'LIS': (-9.134167, 38.774167, 114), # Lisbon airport code (as above) + 'Paris (Charles de Gaulle) Airport': (-2.547778, 49.009722, 119), + 'CDG': (-2.547778, 49.009722, 119), # Paris airport code (as above) } if rtn_dict: return loc_dict @@ -593,8 +594,8 @@ def latex_spec_name(input_x, debug=False): 'SOAP': 'SOAP', 'SOAS': 'SOAS', 'TOLU': 'TOLU', 'XYLE': 'Xylene', # Extra GEOS-chem species - in standard as of v12.9.1 - 'O1D' : 'O($^{1}$D)', 'O': 'O', 'hv': '$h\\nu$', - } + 'O1D': 'O($^{1}$D)', 'O': 'O', 'hv': '$h\\nu$', + } return spec_dict[input_x] @@ -698,10 +699,9 @@ def get_spec_properties(): Get the species properties using the GEOS-Chem json files in GCPy """ - - pass + def spec_stoich(spec, IO=False, I=False, NO=False, OH=False, N=False, C=False, Br=False, Cl=False, S=False, ref_spec=None, debug=False): @@ -923,7 +923,7 @@ def spec_stoich(spec, IO=False, I=False, NO=False, OH=False, N=False, except: prt_str = '!!!!!!! WARNING - Kludge assumming stoichiometry = 1.0, for' prt_str += ' {} (ref_spec given as: {})' - print(( prt_str.format(spec, ref_spec)) ) + print((prt_str.format(spec, ref_spec))) return 1.0 @@ -1017,7 +1017,7 @@ def tra_unit(x, scale=False, adjustment=False, adjust=True, global_unit=False, 'HNO3/NOx': 'pptv', 'HNO3+NIT': 'pptv', 'HNO3+NO3': 'pptv', 'NIT/NOx': 'pptv', 'HNO3/NIT': 'pptv', # - 'Cl-': 'pptv', 'pFe':'pptv', + 'Cl-': 'pptv', 'pFe': 'pptv', # PM 'PM10': '$\mu$g m$^{-3}$', 'PM2.5': '$\mu$g m$^{-3}$', 'PM2.5(dust)': '$\mu$g m$^{-3}$', @@ -1289,7 +1289,7 @@ def GC_var(input_x=None, rtn_dict=False, debug=False): 'IxOy': ['IO', 'OIO', 'I2O2', 'I2O3', 'I2O4'], 'Iy+AERO': [ \ 'I2', 'HOI', 'IO', 'OIO', 'HI', 'INO', 'IONO', 'IONO2', 'I2O2', \ - 'I2O3', 'I2O4', 'I', ]+['ICl', 'IBr']+['AERI','ISALA','ISALC'], + 'I2O3', 'I2O4', 'I', ]+['ICl', 'IBr']+['AERI', 'ISALA', 'ISALC'], 'Iy1.1': [ \ 'I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', \ 'I2O4', 'I', 'INO'], diff --git a/scripts/KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py b/scripts/KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py index f21219c..3735190 100644 --- a/scripts/KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py +++ b/scripts/KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py @@ -59,7 +59,7 @@ def main(wd=None, CODE_wd=None, verbose=False, debug=False): # Get the dictionary of the KPP mechanism. Ox_fam_dict = AC.get_Ox_fam_dicts(fam=fam, ref_spec=ref_spec, Mechanism=Mechanism, -# tag_prefix=tag_prefix, + # tag_prefix=tag_prefix, wd=wd, CODE_wd=CODE_wd, StateMet=StateMet, rm_strat=True, @@ -79,23 +79,15 @@ def main(wd=None, CODE_wd=None, verbose=False, debug=False): # Get the dictionary of the KPP mechanism. Ox_fam_dict = AC.get_Ox_fam_dicts(fam=fam, ref_spec=ref_spec, Mechanism=Mechanism, -# tag_prefix=tag_prefix, + # tag_prefix=tag_prefix, wd=wd, CODE_wd=CODE_wd, StateMet=StateMet, rm_strat=True, weight_by_molecs=False, ) - # Analyse odd oxygen (Ox) loss budget via route (chemical family) suffix = 'v12.9.1' df = AC.calc_fam_loss_by_route(Ox_fam_dict=Ox_fam_dict, Mechanism=Mechanism, suffix=suffix) - - - - - - - diff --git a/scripts/KPP_mk_tagged_GC_Mechanism.py b/scripts/KPP_mk_tagged_GC_Mechanism.py index b10e4b9..e2ebd66 100644 --- a/scripts/KPP_mk_tagged_GC_Mechanism.py +++ b/scripts/KPP_mk_tagged_GC_Mechanism.py @@ -100,7 +100,7 @@ def main(folder=None, print_formatted_KPP_file=True, GC_version=None, # - Print out input KPP files with updated formatting (prior to tagging) # (Uniform formatting required for parsing - this step may not be required) if print_formatted_KPP_file: - extr_str='EXISTING_MECH_{}_{}'.format(mechanism, GC_version) + extr_str = 'EXISTING_MECH_{}_{}'.format(mechanism, GC_version) AC.print_out_dfs2KPP_eqn_file(headers=headers, species_df=species_df, rxn_dicts=rxn_dicts, extr_str=extr_str, ) @@ -151,12 +151,12 @@ def main(folder=None, print_formatted_KPP_file=True, GC_version=None, counter = max(tagged_rxns.keys()) current_tag = '{}{}'.format(tag_prefix, counter) search_strs = 'BrSAL', 'CH3Br', 'CH3Cl', 'CH2Cl2', 'CHCl3', '0.150IBr', - search_strs += 'HOBr','ClNO2', + search_strs += 'HOBr', 'ClNO2', # Add tags for halogen families rxn_dicts, tagged_rxns = AC.add_tags4strs2mech(rxn_dicts, counter=counter, search_strs=search_strs, tagged_rxns=tagged_rxns, -# debug=debug + # debug=debug ) counter = max(tagged_rxns.keys()) current_tag = '{}{}'.format(tag_prefix, counter) @@ -191,8 +191,8 @@ def main(folder=None, print_formatted_KPP_file=True, GC_version=None, counter = max(tagged_rxns.keys()) current_tag = '{}{}'.format(tag_prefix, counter) search_strs = [ - 'CH4', 'C2H6', 'C3H8', 'ACET', 'MOH', 'PRPE', 'ALK4', 'CH2O', 'CH2O', - 'ALD2', 'ISOP' + 'CH4', 'C2H6', 'C3H8', 'ACET', 'MOH', 'PRPE', 'ALK4', 'CH2O', 'CH2O', + 'ALD2', 'ISOP' ] search_strs = [i+' ' for i in search_strs] # Add tags for halogen families @@ -241,8 +241,8 @@ def main(folder=None, print_formatted_KPP_file=True, GC_version=None, # - Save out the lines to be pasted into the GC species database yaml file range = np.arange(1, int(current_tag[1:])) - tags = ['P{}{:0>3}'.format(tag_prefix,i) for i in range ] - tags += ['{}{:0>3}'.format(tag_prefix,i) for i in range ] + tags = ['P{}{:0>3}'.format(tag_prefix, i) for i in range] + tags += ['{}{:0>3}'.format(tag_prefix, i) for i in range] AC.prt_lines4species_database_yml(tags, extr_str=extr_str) diff --git a/scripts/SMVGEAR_process_prod_loss_tags.py b/scripts/SMVGEAR_process_prod_loss_tags.py index 7a6d6c9..66cd9e3 100644 --- a/scripts/SMVGEAR_process_prod_loss_tags.py +++ b/scripts/SMVGEAR_process_prod_loss_tags.py @@ -58,8 +58,8 @@ def main(trop_limit=True, res='4x5', debug=False): s_area = get_surface_area(res=res)[..., 0] # m2 land map # convert to mass terms ( in g X ) fam_loss = convert_molec_cm3_s_2_g_X_s_BPCH(ars=fam_loss, - ref_spec=ref_spec, wd=wd, conbine_ars=False, - rm_strat=True, month_eq=True) + ref_spec=ref_spec, wd=wd, conbine_ars=False, + rm_strat=True, month_eq=True) print([i.shape for i in fam_loss]) # sum and convert to Gg diff --git a/setup.py b/setup.py index db93244..e88e727 100644 --- a/setup.py +++ b/setup.py @@ -16,18 +16,18 @@ INSTALL_REQUIRES = [] else: INSTALL_REQUIRES = [ - 'affine', - 'cartopy', - 'geopandas', - 'matplotlib', - 'netcdf4', - 'numpy', - 'pandas', - 'pytest', - 'rasterio', - 'scipy', - 'xarray' - ] + 'affine', + 'cartopy', + 'geopandas', + 'matplotlib', + 'netcdf4', + 'numpy', + 'pandas', + 'pytest', + 'rasterio', + 'scipy', + 'xarray' + ] CLASSIFIERS = [ 'Development Status :: 4 - Beta',