diff --git a/docs/source/conf.py b/docs/source/conf.py index 968269406..4a542fad9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -130,15 +130,15 @@ def establish_version(): # further. For a list of options available for each theme, see the # documentation. html_theme_options = dict( - github_user='quaquel', - github_repo='EMAworkbench', - github_version='/tree/master/docs/', + github_user="quaquel", + github_repo="EMAworkbench", + github_version="/tree/master/docs/", # On master branch and new branch still in # pre-release status: true; else: false. - in_progress='true', + in_progress="true", # On branches previous to "latest": true; else: false. - outdated='false', - ) + outdated="false", +) # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] @@ -257,12 +257,10 @@ def establish_version(): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ("index", "emaworkbench", "EMA workbench Documentation", ["J.H. Kwakkel"], 1) -] +man_pages = [("index", "emaworkbench", "EMA workbench Documentation", ["J.H. Kwakkel"], 1)] def setup(app): # copy changelog into source folder for documentation dest = osp.join(HERE, "changelog.md") - shutil.copy(osp.join(HERE, "..", "..", "CHANGELOG.md"), dest) \ No newline at end of file + shutil.copy(osp.join(HERE, "..", "..", "CHANGELOG.md"), dest) diff --git a/docs/source/indepth_tutorial/directed-search.ipynb b/docs/source/indepth_tutorial/directed-search.ipynb index 5ea8e153c..7a1d1bddc 100644 --- a/docs/source/indepth_tutorial/directed-search.ipynb +++ b/docs/source/indepth_tutorial/directed-search.ipynb @@ -111,14 +111,7 @@ "ema_logging.log_to_stderr(ema_logging.INFO)\n", "\n", "with MultiprocessingEvaluator(model) as evaluator:\n", - " results = evaluator.optimize(\n", - " nfe=250,\n", - " searchover=\"levers\",\n", - " epsilons=[\n", - " 0.1,\n", - " ]\n", - " * len(model.outcomes),\n", - " )" + " results = evaluator.optimize(nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes))" ] }, { @@ -287,9 +280,7 @@ "source": [ "from ema_workbench import Constraint\n", "\n", - "constraints = [\n", - " Constraint(\"max pollution\", outcome_names=\"max_P\", function=lambda x: max(0, x - 1))\n", - "]" + "constraints = [Constraint(\"max pollution\", outcome_names=\"max_P\", function=lambda x: max(0, x - 1))]" ] }, { @@ -316,13 +307,7 @@ "\n", "with MultiprocessingEvaluator(model) as evaluator:\n", " results = evaluator.optimize(\n", - " nfe=250,\n", - " searchover=\"levers\",\n", - " epsilons=[\n", - " 0.1,\n", - " ]\n", - " * len(model.outcomes),\n", - " constraints=constraints,\n", + " nfe=250, searchover=\"levers\", epsilons=[0.1] * len(model.outcomes), constraints=constraints\n", " )" ] }, @@ -471,10 +456,7 @@ " results, convergence = evaluator.optimize(\n", " nfe=10000,\n", " searchover=\"levers\",\n", - " epsilons=[\n", - " 0.05,\n", - " ]\n", - " * len(model.outcomes),\n", + " epsilons=[0.05] * len(model.outcomes),\n", " convergence=convergence_metrics,\n", " constraints=constraints,\n", " )\n", @@ -550,12 +532,7 @@ "\n", "with MultiprocessingEvaluator(model) as evaluator:\n", " results = evaluator.optimize(\n", - " nfe=1000,\n", - " searchover=\"uncertainties\",\n", - " epsilons=[\n", - " 0.1,\n", - " ]\n", - " * len(model.outcomes),\n", + " nfe=1000, searchover=\"uncertainties\", epsilons=[0.1] * len(model.outcomes)\n", " )" ] }, @@ -628,10 +605,7 @@ "MINIMIZE = ScalarOutcome.MINIMIZE\n", "robustnes_functions = [\n", " ScalarOutcome(\n", - " \"90th percentile max_p\",\n", - " kind=MINIMIZE,\n", - " variable_name=\"max_P\",\n", - " function=percentile90,\n", + " \"90th percentile max_p\", kind=MINIMIZE, variable_name=\"max_P\", function=percentile90\n", " ),\n", " ScalarOutcome(\n", " \"10th percentile reliability\",\n", @@ -640,16 +614,10 @@ " function=percentile10,\n", " ),\n", " ScalarOutcome(\n", - " \"10th percentile inertia\",\n", - " kind=MAXIMIZE,\n", - " variable_name=\"inertia\",\n", - " function=percentile10,\n", + " \"10th percentile inertia\", kind=MAXIMIZE, variable_name=\"inertia\", function=percentile10\n", " ),\n", " ScalarOutcome(\n", - " \"10th percentile utility\",\n", - " kind=MAXIMIZE,\n", - " variable_name=\"utility\",\n", - " function=percentile10,\n", + " \"10th percentile utility\", kind=MAXIMIZE, variable_name=\"utility\", function=percentile10\n", " ),\n", "]" ] diff --git a/docs/source/indepth_tutorial/dps_lake_model.py b/docs/source/indepth_tutorial/dps_lake_model.py index 37a7415be..e49f643f6 100644 --- a/docs/source/indepth_tutorial/dps_lake_model.py +++ b/docs/source/indepth_tutorial/dps_lake_model.py @@ -86,7 +86,7 @@ def lake_model( """ np.random.seed(seed) - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) X = np.zeros((myears,)) average_daily_P = np.zeros((myears,)) @@ -98,12 +98,12 @@ def lake_model( X[0] = 0.0 decision = 0.1 - decisions = np.zeros(myears,) + decisions = np.zeros(myears) decisions[0] = decision natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=myears, ) @@ -123,8 +123,6 @@ def lake_model( reliability += np.sum(X < Pcrit) / (nsamples * myears) inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears) - utility += ( - np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples - ) + utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples max_P = np.max(average_daily_P) return max_P, utility, inertia, reliability diff --git a/docs/source/indepth_tutorial/general-introduction.ipynb b/docs/source/indepth_tutorial/general-introduction.ipynb index 060068144..7ebe786cf 100644 --- a/docs/source/indepth_tutorial/general-introduction.ipynb +++ b/docs/source/indepth_tutorial/general-introduction.ipynb @@ -123,9 +123,7 @@ " X[0] = 0.0\n", " decision = 0.1\n", "\n", - " decisions = np.zeros(\n", - " myears,\n", - " )\n", + " decisions = np.zeros(myears)\n", " decisions[0] = decision\n", "\n", " natural_inflows = np.random.lognormal(\n", @@ -150,9 +148,7 @@ "\n", " reliability += np.sum(X < Pcrit) / (nsamples * myears)\n", " inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)\n", - " utility += (\n", - " np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples\n", - " )\n", + " utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples\n", " max_P = np.max(average_daily_P)\n", " return max_P, utility, inertia, reliability" ] diff --git a/docs/source/indepth_tutorial/open-exploration.ipynb b/docs/source/indepth_tutorial/open-exploration.ipynb index b751d585b..f6ab4df9d 100755 --- a/docs/source/indepth_tutorial/open-exploration.ipynb +++ b/docs/source/indepth_tutorial/open-exploration.ipynb @@ -132,9 +132,7 @@ "source": [ "from ema_workbench.analysis import pairs_plotting\n", "\n", - "fig, axes = pairs_plotting.pairs_scatter(\n", - " experiments, outcomes, group_by=\"policy\", legend=False\n", - ")\n", + "fig, axes = pairs_plotting.pairs_scatter(experiments, outcomes, group_by=\"policy\", legend=False)\n", "fig.set_size_inches(8, 8)\n", "plt.show()" ] @@ -458,9 +456,7 @@ "x = experiments\n", "y = outcomes[\"max_P\"] < 0.8\n", "\n", - "fs, alg = feature_scoring.get_ex_feature_scores(\n", - " x, y, mode=RuleInductionType.CLASSIFICATION\n", - ")\n", + "fs, alg = feature_scoring.get_ex_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION)\n", "fs.sort_values(ascending=False, by=1)" ] }, @@ -599,16 +595,12 @@ "from ema_workbench.em_framework.salib_samplers import get_SALib_problem\n", "\n", "with MultiprocessingEvaluator(model) as evaluator:\n", - " sa_results = evaluator.perform_experiments(\n", - " scenarios=1000, uncertainty_sampling=Samplers.SOBOL\n", - " )\n", + " sa_results = evaluator.perform_experiments(scenarios=1000, uncertainty_sampling=Samplers.SOBOL)\n", "\n", "experiments, outcomes = sa_results\n", "\n", "problem = get_SALib_problem(model.uncertainties)\n", - "Si = sobol.analyze(\n", - " problem, outcomes[\"max_P\"], calc_second_order=True, print_to_console=False\n", - ")" + "Si = sobol.analyze(problem, outcomes[\"max_P\"], calc_second_order=True, print_to_console=False)" ] }, { diff --git a/docs/source/pyplots/basicEnvelope2.py b/docs/source/pyplots/basicEnvelope2.py index f9fab8d02..4568a4ad3 100644 --- a/docs/source/pyplots/basicEnvelope2.py +++ b/docs/source/pyplots/basicEnvelope2.py @@ -9,7 +9,5 @@ from analysis.plotting import envelopes data = load_results(r"../../../src/analysis/1000 flu cases.cPickle", zipped=False) -fig = envelopes( - data, group_by="policy", grouping_specifiers=["static policy", "adaptive policy"] -) +fig = envelopes(data, group_by="policy", grouping_specifiers=["static policy", "adaptive policy"]) plt.show() diff --git a/docs/source/pyplots/basicMultiplotDensity.py b/docs/source/pyplots/basicMultiplotDensity.py index b83e8057e..85d2f7365 100644 --- a/docs/source/pyplots/basicMultiplotDensity.py +++ b/docs/source/pyplots/basicMultiplotDensity.py @@ -5,9 +5,7 @@ from expWorkbench.util import load_results # load the data -experiments, results = load_results( - r"../../../src/analysis/1000 flu cases.cPickle", zipped=False -) +experiments, results = load_results(r"../../../src/analysis/1000 flu cases.cPickle", zipped=False) # transform the results to the required format newResults = {} diff --git a/docs/source/pyplots/primExample.py b/docs/source/pyplots/primExample.py index 55e5d747b..e0407bb6f 100644 --- a/docs/source/pyplots/primExample.py +++ b/docs/source/pyplots/primExample.py @@ -37,9 +37,7 @@ def classify(data): results = (newExperiments, newResults) # perform prim on modified results tuple -prims, uncertainties, x = prim.perform_prim( - results, classify, threshold=0.8, threshold_type=1 -) +prims, uncertainties, x = prim.perform_prim(results, classify, threshold=0.8, threshold_type=1) # visualize diff --git a/ema_workbench/__init__.py b/ema_workbench/__init__.py index 1fc0d198b..b1372ad1a 100644 --- a/ema_workbench/__init__.py +++ b/ema_workbench/__init__.py @@ -21,13 +21,7 @@ ArrayOutcome, Samplers, ) -from .util import ( - save_results, - load_results, - ema_logging, - EMAError, - process_replications, -) +from .util import save_results, load_results, ema_logging, EMAError, process_replications # from . import analysis diff --git a/ema_workbench/analysis/b_and_w_plotting.py b/ema_workbench/analysis/b_and_w_plotting.py index fdbe37e7a..2ed71a6a2 100644 --- a/ema_workbench/analysis/b_and_w_plotting.py +++ b/ema_workbench/analysis/b_and_w_plotting.py @@ -194,9 +194,7 @@ def _set_ax_polycollection_to_bw(collection, ax, style, colormap): collection.update({"alpha": 1}) for path in collection.get_paths(): - p1 = mpl.patches.PathPatch( - path, fc="none", hatch=colormap[orig_color]["hatch"] - ) + p1 = mpl.patches.PathPatch(path, fc="none", hatch=colormap[orig_color]["hatch"]) ax.add_patch(p1) p1.set_zorder(collection.get_zorder() - 0.1) @@ -335,9 +333,7 @@ def set_fig_to_bw(fig, style=HATCHING, line_style="continuous"): if len(all_colors) > len(bw_mapping): mapping_cycle = itertools.cycle(bw_mapping) - _logger.warning( - "more colors used than provided in B&W mapping, cycling over mapping" - ) + _logger.warning("more colors used than provided in B&W mapping, cycling over mapping") else: mapping_cycle = bw_mapping colormap = dict(zip(all_colors, mapping_cycle)) diff --git a/ema_workbench/analysis/cart.py b/ema_workbench/analysis/cart.py index 794a40901..a4d793893 100644 --- a/ema_workbench/analysis/cart.py +++ b/ema_workbench/analysis/cart.py @@ -304,7 +304,7 @@ def build_tree(self): def show_tree(self, mplfig=True, format="png"): """return a png (defaults) or svg of the tree - + On Windows, graphviz needs to be installed with conda. Parameters @@ -320,9 +320,7 @@ def show_tree(self, mplfig=True, format="png"): import pydot # dirty hack for read the docs dot_data = StringIO() - tree.export_graphviz( - self.clf, out_file=dot_data, feature_names=self.feature_names - ) + tree.export_graphviz(self.clf, out_file=dot_data, feature_names=self.feature_names) dot_data = dot_data.getvalue() # .encode('ascii') # @UndefinedVariable graphs = pydot.graph_from_dot_data(dot_data) diff --git a/ema_workbench/analysis/dimensional_stacking.py b/ema_workbench/analysis/dimensional_stacking.py index 4bb03ee03..43b31ece8 100644 --- a/ema_workbench/analysis/dimensional_stacking.py +++ b/ema_workbench/analysis/dimensional_stacking.py @@ -72,9 +72,7 @@ def discretize(data, nbins=3, with_labels=False): if with_labels: indices = pd.cut(column_data, n, precision=2, retbins=True)[0] else: - indices = pd.cut( - column_data, n, retbins=False, labels=False, precision=2 - ) + indices = pd.cut(column_data, n, retbins=False, labels=False, precision=2) discretized[column] = indices @@ -165,9 +163,7 @@ def plot_index(ax, ax_plot, axis, index, plot_labels=True, plot_cats=True): ax.set_xticks([]) if plot_labels: - tick_locs = np.linspace( - 1 / (2 * len(names)), 1 - 1 / (2 * len(names)), len(names) - ) + tick_locs = np.linspace(1 / (2 * len(names)), 1 - 1 / (2 * len(names)), len(names)) ax.set_yticks(tick_locs) ax.set_yticklabels(names) else: @@ -183,9 +179,7 @@ def plot_index(ax, ax_plot, axis, index, plot_labels=True, plot_cats=True): ax.spines["right"].set_linewidth(1.0) if plot_labels: - tick_locs = np.linspace( - 1 / (2 * len(names)), 1 - 1 / (2 * len(names)), len(names) - ) + tick_locs = np.linspace(1 / (2 * len(names)), 1 - 1 / (2 * len(names)), len(names)) ax.set_xticks(tick_locs) ax.set_xticklabels(names, rotation="vertical") else: @@ -282,12 +276,7 @@ def plot_pivot_table( height_ratios = dim_ratios(figsize=figsize, axis=0) gs = mpl.gridspec.GridSpec( - 3, - 3, - wspace=0.01, - hspace=0.01, - width_ratios=width_ratios, - height_ratios=height_ratios, + 3, 3, wspace=0.01, hspace=0.01, width_ratios=width_ratios, height_ratios=height_ratios ) ax_plot = fig.add_subplot(gs[2, 2]) @@ -297,9 +286,7 @@ def plot_pivot_table( # actual plotting plot_data = table.values - sns.heatmap( - plot_data, ax=ax_plot, cbar_ax=cax, cmap=cmap, vmin=0, vmax=1, **kwargs - ) + sns.heatmap(plot_data, ax=ax_plot, cbar_ax=cax, cmap=cmap, vmin=0, vmax=1, **kwargs) # set the tick labels ax_plot.set_xticks([]) @@ -359,9 +346,7 @@ def _prepare_experiments(experiments): if np.unique(x[column]).shape == (1,): x = x.drop(column, axis=1) _logger.info( - ("{} dropped from analysis " "because only a single category").format( - column - ) + ("{} dropped from analysis " "because only a single category").format(column) ) else: x[column] = x[column].astype("category") @@ -369,9 +354,7 @@ def _prepare_experiments(experiments): return x -def create_pivot_plot( - x, y, nr_levels=3, labels=True, categories=True, nbins=3, bin_labels=False -): +def create_pivot_plot(x, y, nr_levels=3, labels=True, categories=True, nbins=3, bin_labels=False): """convenience function for easily creating a pivot plot Parameters @@ -421,9 +404,7 @@ def create_pivot_plot( ooi = pd.DataFrame(y[:, np.newaxis], columns=[ooi_label]) x_y_concat = pd.concat([discretized_x, ooi], axis=1) - pvt = pd.pivot_table( - x_y_concat, values=ooi_label, index=rows, columns=columns, dropna=False - ) + pvt = pd.pivot_table(x_y_concat, values=ooi_label, index=rows, columns=columns, dropna=False) fig = plot_pivot_table(pvt, plot_labels=labels, plot_cats=categories) diff --git a/ema_workbench/analysis/feature_scoring.py b/ema_workbench/analysis/feature_scoring.py index 07de54bea..4072fbdc7 100644 --- a/ema_workbench/analysis/feature_scoring.py +++ b/ema_workbench/analysis/feature_scoring.py @@ -67,9 +67,7 @@ def _prepare_experiments(experiments): if np.unique(x[column]).shape == (1,): x = x.drop(column, axis=1) _logger.info( - ("{} dropped from analysis " "because only a single category").format( - column - ) + ("{} dropped from analysis " "because only a single category").format(column) ) else: x[column] = x[column].astype("category").cat.codes @@ -354,9 +352,7 @@ def get_ex_feature_scores( } -def get_feature_scores_all( - x, y, alg="extra trees", mode=RuleInductionType.REGRESSION, **kwargs -): +def get_feature_scores_all(x, y, alg="extra trees", mode=RuleInductionType.REGRESSION, **kwargs): """perform feature scoring for all outcomes using the specified feature scoring algorithm diff --git a/ema_workbench/analysis/logistic_regression.py b/ema_workbench/analysis/logistic_regression.py index 24043f196..851f1f6fb 100644 --- a/ema_workbench/analysis/logistic_regression.py +++ b/ema_workbench/analysis/logistic_regression.py @@ -106,9 +106,7 @@ def contours(ax, model, xlabel, ylabel, levels): # rgb = [255*entry for entry in sns.color_palette()[1]] # hsl = 28, 100, 52.7 - cmap = sns.diverging_palette( - 244, 28, s=99.9, l=52.7, n=len(levels) - 1, as_cmap=True - ) + cmap = sns.diverging_palette(244, 28, s=99.9, l=52.7, n=len(levels) - 1, as_cmap=True) ax.contourf(Xgrid, Ygrid, Zgrid, levels, cmap=cmap, zorder=0) @@ -269,9 +267,7 @@ def show_tradeoff(self, cmap=mpl.cm.viridis, annotated=False): # @UndefinedVari a Figure instance """ - return sdutil.plot_tradeoff( - self.peeling_trajectory, cmap=cmap, annotated=annotated - ) + return sdutil.plot_tradeoff(self.peeling_trajectory, cmap=cmap, annotated=annotated) # @UndefinedVariable def show_threshold_tradeoff(self, i, cmap=mpl.cm.viridis_r, step=0.1): @@ -293,9 +289,7 @@ def show_threshold_tradeoff(self, i, cmap=mpl.cm.viridis_r, step=0.1): fitted_model = self.models[i] x = self._normalized.loc[:, fitted_model.params.index.values] - coverage, density, thresholds = calculate_covden( - fitted_model, x, self.y, step=step - ) + coverage, density, thresholds = calculate_covden(fitted_model, x, self.y, step=step) fig = plt.figure() ax = fig.add_subplot(111, aspect="equal") @@ -329,9 +323,7 @@ def inspect(self, i, step=0.1): model = self.models[i] x = self._normalized.loc[:, model.params.index.values] coverage, density, thresholds = calculate_covden(model, x, self.y, step=step) - data = pd.DataFrame( - {"coverage": coverage, "density": density, "thresholds": thresholds} - ) + data = pd.DataFrame({"coverage": coverage, "density": density, "thresholds": thresholds}) print(data) print() diff --git a/ema_workbench/analysis/pairs_plotting.py b/ema_workbench/analysis/pairs_plotting.py index 201772f5a..cb5287031 100644 --- a/ema_workbench/analysis/pairs_plotting.py +++ b/ema_workbench/analysis/pairs_plotting.py @@ -87,9 +87,7 @@ def pairs_lines( figure = plt.figure() axes_dict = {} - combis = [ - (field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show - ] + combis = [(field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show] for field1, field2 in combis: i = list(outcomes_to_show).index(field1) @@ -120,13 +118,7 @@ def pairs_lines( for ax in figure.axes: gs2 = ax._subplotspec - if all( - ( - gs1._gridspec == gs2._gridspec, - gs1.num1 == gs2.num1, - gs1.num2 == gs2.num2, - ) - ): + if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)): break make_legend(grouping_labels, ax, legend_type=LegendEnum.LINE) @@ -255,11 +247,7 @@ def pairs_density( figures.append(figure) # harmonize the color scaling across figures - combis = [ - (field1, field2) - for field1 in outcomes_to_show - for field2 in outcomes_to_show - ] + combis = [(field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show] for combi in combis: if combi[0] == combi[1]: continue @@ -276,9 +264,7 @@ def pairs_density( return figures, axes_dicts else: - return simple_pairs_density( - outcomes, outcomes_to_show, log, colormap, gridsize, ylabels - ) + return simple_pairs_density(outcomes, outcomes_to_show, log, colormap, gridsize, ylabels) def determine_extents(outcomes, outcomes_to_show): @@ -311,9 +297,7 @@ def determine_extents(outcomes, outcomes_to_show): except KeyError: limits[entry] = (minimum, maximum) extents = {} - combis = [ - (field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show - ] + combis = [(field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show] for field1, field2 in combis: limits_1 = limits[field1] limits_2 = limits[field2] @@ -322,14 +306,7 @@ def determine_extents(outcomes, outcomes_to_show): def simple_pairs_density( - outcomes, - outcomes_to_show, - log, - colormap, - gridsize, - ylabels, - extents=None, - title=None, + outcomes, outcomes_to_show, log, colormap, gridsize, ylabels, extents=None, title=None ): """ @@ -358,9 +335,7 @@ def simple_pairs_density( # the plotting figure = plt.figure() - combis = [ - (field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show - ] + combis = [(field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show] axes_dict = {} for field1, field2 in combis: i = list(outcomes_to_show).index(field1) @@ -485,9 +460,7 @@ def pairs_scatter( figure = plt.figure() axes_dict = {} - combis = [ - (field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show - ] + combis = [(field1, field2) for field1 in outcomes_to_show for field2 in outcomes_to_show] for field1, field2 in combis: i = list(outcomes_to_show).index(field1) @@ -523,13 +496,7 @@ def pairs_scatter( for ax in figure.axes: gs2 = ax._subplotspec - if all( - ( - gs1._gridspec == gs2._gridspec, - gs1.num1 == gs2.num1, - gs1.num2 == gs2.num2, - ) - ): + if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)): break make_legend(grouping_labels, ax, legend_type=LegendEnum.SCATTER) diff --git a/ema_workbench/analysis/parcoords.py b/ema_workbench/analysis/parcoords.py index 2a84d7590..1fb71f94f 100644 --- a/ema_workbench/analysis/parcoords.py +++ b/ema_workbench/analysis/parcoords.py @@ -231,9 +231,7 @@ def plot(self, data, color=None, label=None, **kwargs): recoded[key] = data[key].astype(value).cat.codes # normalize the data - normalized_data = pd.DataFrame( - self.normalizer.transform(recoded), columns=recoded.columns - ) + normalized_data = pd.DataFrame(self.normalizer.transform(recoded), columns=recoded.columns) # plot the data self._plot(normalized_data, color=color, **kwargs) @@ -270,9 +268,7 @@ def _plot(self, data, **kwargs): """ j = -1 - for ax, label_i, label_j in zip( - self.axes, self.axis_labels[:-1], self.axis_labels[1::] - ): + for ax, label_i, label_j in zip(self.axes, self.axis_labels[:-1], self.axis_labels[1::]): plotdata = data.loc[:, [label_i, label_j]] j += 1 lines = ax.plot([j + 1, j + 2], plotdata.values.T, **kwargs) diff --git a/ema_workbench/analysis/plotting.py b/ema_workbench/analysis/plotting.py index 0a445d8cd..14aba3e3b 100644 --- a/ema_workbench/analysis/plotting.py +++ b/ema_workbench/analysis/plotting.py @@ -145,20 +145,10 @@ def envelopes( if group_by: group_by_envelopes( - outcomes, - outcome_to_plot, - time, - density, - ax, - ax_d, - fill, - grouping_labels, - log, + outcomes, outcome_to_plot, time, density, ax, ax_d, fill, grouping_labels, log ) else: - single_envelope( - outcomes, outcome_to_plot, time, density, ax, ax_d, fill, log - ) + single_envelope(outcomes, outcome_to_plot, time, density, ax, ax_d, fill, log) if ax_d: for tl in ax_d.get_yticklabels(): @@ -173,13 +163,7 @@ def envelopes( for ax in figure.axes: gs2 = ax._subplotspec - if all( - ( - gs1._gridspec == gs2._gridspec, - gs1.num1 == gs2.num1, - gs1.num2 == gs2.num2, - ) - ): + if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)): break if fill: make_legend(grouping_labels, ax, alpha=0.3, legend_type=LegendEnum.PATCH) @@ -189,9 +173,7 @@ def envelopes( return figure, axes_dict -def group_by_envelopes( - outcomes, outcome_to_plot, time, density, ax, ax_d, fill, group_labels, log -): +def group_by_envelopes(outcomes, outcome_to_plot, time, density, ax, ax_d, fill, group_labels, log): """Helper function responsible for generating an envelope plot based on a grouping. @@ -358,12 +340,7 @@ def lines( ) data = prepare_data( - experiments, - experiments_to_show, - outcomes, - outcomes_to_show, - group_by, - grouping_specifiers, + experiments, experiments_to_show, outcomes, outcomes_to_show, group_by, grouping_specifiers ) experiments, outcomes, outcomes_to_show, time, grouping_labels = data @@ -384,9 +361,7 @@ def lines( tl.set_visible(False) if group_by: - group_by_lines( - outcomes, outcome_to_plot, time, density, ax, ax_d, grouping_labels, log - ) + group_by_lines(outcomes, outcome_to_plot, time, density, ax, ax_d, grouping_labels, log) else: simple_lines(outcomes, outcome_to_plot, time, density, ax, ax_d, log) ax.set_xlabel(TIME_LABEL) @@ -398,13 +373,7 @@ def lines( for ax in figure.axes: gs2 = ax._subplotspec - if all( - ( - gs1._gridspec == gs2._gridspec, - gs1.num1 == gs2.num1, - gs1.num2 == gs2.num2, - ) - ): + if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)): break make_legend(grouping_labels, ax) @@ -478,12 +447,7 @@ def plot_lines_with_envelopes( experiments, None, outcomes, outcomes_to_show, group_by, grouping_specifiers )[1] data = prepare_data( - experiments, - experiments_to_show, - outcomes, - outcomes_to_show, - group_by, - grouping_specifiers, + experiments, experiments_to_show, outcomes, outcomes_to_show, group_by, grouping_specifiers ) experiments, outcomes, outcomes_to_show, time, grouping_labels = data @@ -513,13 +477,10 @@ def plot_lines_with_envelopes( ax.plot(time.T[:, np.newaxis], value.T, c=get_color(j)) if density: - group_density( - ax_d, density, full_outcomes, outcome_to_plot, grouping_labels, log - ) + group_density(ax_d, density, full_outcomes, outcome_to_plot, grouping_labels, log) ax_d.get_yaxis().set_view_interval( - ax.get_yaxis().get_view_interval()[0], - ax.get_yaxis().get_view_interval()[1], + ax.get_yaxis().get_view_interval()[0], ax.get_yaxis().get_view_interval()[1] ) else: @@ -541,22 +502,14 @@ def plot_lines_with_envelopes( for ax in figure.axes: gs2 = ax._subplotspec - if all( - ( - gs1._gridspec == gs2._gridspec, - gs1.num1 == gs2.num1, - gs1.num2 == gs2.num2, - ) - ): + if all((gs1._gridspec == gs2._gridspec, gs1.num1 == gs2.num1, gs1.num2 == gs2.num2)): break make_legend(grouping_labels, ax) return figure, axes_dict -def group_by_lines( - outcomes, outcome_to_plot, time, density, ax, ax_d, group_by_labels, log -): +def group_by_lines(outcomes, outcome_to_plot, time, density, ax, ax_d, group_by_labels, log): """ Helper function responsible for generating a grouped lines plot. @@ -681,9 +634,7 @@ def kde_over_time( figures = [] axes_dicts = {} for key, value in outcomes.items(): - fig, axes_dict = simple_kde( - value, outcomes_to_show, colormap, log, minima, maxima - ) + fig, axes_dict = simple_kde(value, outcomes_to_show, colormap, log, minima, maxima) fig.suptitle(key) figures.append(fig) axes_dicts[key] = axes_dict @@ -835,14 +786,7 @@ def multiple_densities( ax4 = plt.subplot2grid((2, 6), (1, 3), sharex=ax1, sharey=ax_env) ax5 = plt.subplot2grid((2, 6), (1, 4), sharex=ax1, sharey=ax_env) ax6 = plt.subplot2grid((2, 6), (1, 5), sharex=ax1, sharey=ax_env) - kde_axes = [ - ax1, - ax2, - ax3, - ax4, - ax5, - ax6, - ] + kde_axes = [ax1, ax2, ax3, ax4, ax5, ax6] else: raise EMAError("too many points in time provided") @@ -887,13 +831,7 @@ def multiple_densities( # TODO grouping labels, boxplots, and sharex # create a problem group_density( - ax, - density, - outcomes, - outcome_to_show, - grouping_labels, - index=index, - log=log, + ax, density, outcomes, outcome_to_show, grouping_labels, index=index, log=log ) min_y, max_y = ax_env.get_ylim() diff --git a/ema_workbench/analysis/plotting_util.py b/ema_workbench/analysis/plotting_util.py index d1c33d1dd..f74b342b0 100644 --- a/ema_workbench/analysis/plotting_util.py +++ b/ema_workbench/analysis/plotting_util.py @@ -101,9 +101,7 @@ def plot_envelope(ax, j, time, value, fill=False): if fill: # ax.plot(time, minimum, color=color, alpha=0.3) # ax.plot(time, maximum, color=color, alpha=0.3) - ax.fill_between( - time, minimum, maximum, facecolor=color, alpha=0.3, - ) + ax.fill_between(time, minimum, maximum, facecolor=color, alpha=0.3) else: ax.plot(time, minimum, c=color) ax.plot(time, maximum, c=color) @@ -252,9 +250,7 @@ def plot_boxenplot(ax, values, log, group_labels=None): sns.boxenplot(x="variable", y="value", data=data, order=group_labels, ax=ax) -def group_density( - ax_d, density, outcomes, outcome_to_plot, group_labels, log=False, index=-1 -): +def group_density(ax_d, density, outcomes, outcome_to_plot, group_labels, log=False, index=-1): """ helper function for plotting densities in case of grouped data @@ -326,8 +322,7 @@ def simple_density(density, value, ax_d, ax, log): ax.get_yaxis().get_view_interval()[0], ax.get_yaxis().get_view_interval()[1] ) ax_d.set_ylim( - bottom=ax.get_yaxis().get_view_interval()[0], - top=ax.get_yaxis().get_view_interval()[1], + bottom=ax.get_yaxis().get_view_interval()[0], top=ax.get_yaxis().get_view_interval()[1] ) ax_d.set_xlabel("") @@ -425,9 +420,7 @@ def make_legend(categories, ax, ncol=3, legend_type=LegendEnum.LINE, alpha=1): artist = mpl.lines.Line2D([0], [0], linestyle="none", c=color, marker="o") elif legend_type == LegendEnum.PATCH: - artist = plt.Rectangle( - (0, 0), 1, 1, edgecolor=color, facecolor=color, alpha=alpha - ) + artist = plt.Rectangle((0, 0), 1, 1, edgecolor=color, facecolor=color, alpha=alpha) some_identifiers.append(artist) @@ -515,9 +508,7 @@ def filter_scalar_outcomes(outcomes): temp = {} for key, value in outcomes.items(): if value.ndim < 2: - _logger.info( - ("{} not shown because it is " "not time series data").format(key) - ) + _logger.info(("{} not shown because it is " "not time series data").format(key)) else: temp[key] = value return temp @@ -556,9 +547,7 @@ def determine_time_dimension(outcomes): return time, outcomes -def group_results( - experiments, outcomes, group_by, grouping_specifiers, grouping_labels -): +def group_results(experiments, outcomes, group_by, grouping_specifiers, grouping_labels): """ Helper function that takes the experiments and results and returns a list based on groupings. Each element in the dictionary contains the experiments @@ -608,13 +597,9 @@ def group_results( if grouping_specifiers.index(specifier) == len(grouping_specifiers) - 1: # last case - logical = (column_to_group_by >= lower_limit) & ( - column_to_group_by <= upper_limit - ) + logical = (column_to_group_by >= lower_limit) & (column_to_group_by <= upper_limit) else: - logical = (column_to_group_by >= lower_limit) & ( - column_to_group_by < upper_limit - ) + logical = (column_to_group_by >= lower_limit) & (column_to_group_by < upper_limit) elif group_by == "index": # the grouping is based on indices logical = specifier @@ -687,18 +672,10 @@ def prepare_pairs_data( """ if isinstance(outcomes_to_show, str): - raise EMAError( - "for pair wise plotting, more than one outcome needs to be provided" - ) + raise EMAError("for pair wise plotting, more than one outcome needs to be provided") experiments, outcomes, outcomes_to_show, time, grouping_labels = prepare_data( - experiments, - None, - outcomes, - outcomes_to_show, - group_by, - grouping_specifiers, - filter_scalar, + experiments, None, outcomes, outcomes_to_show, group_by, grouping_specifiers, filter_scalar ) def filter_outcomes(outcomes, point_in_time): @@ -778,9 +755,7 @@ def prepare_data( if not grouping_specifiers: # no grouping specifier, so infer from the data if group_by == "index": - raise EMAError( - "no grouping specifiers provided while " "trying to group on index" - ) + raise EMAError("no grouping specifiers provided while " "trying to group on index") else: column_to_group_by = experiments[group_by] if column_to_group_by.dtype in (object, "category"): @@ -796,9 +771,7 @@ def prepare_data( grouping_labels = grouping_specifiers elif isinstance(grouping_specifiers, dict): grouping_labels = sorted(grouping_specifiers.keys()) - grouping_specifiers = [ - grouping_specifiers[key] for key in grouping_labels - ] + grouping_specifiers = [grouping_specifiers[key] for key in grouping_labels] else: grouping_labels = grouping_specifiers @@ -837,9 +810,7 @@ def do_titles(ax, titles, outcome): try: ax.set_title(titles[outcome]) except KeyError: - _logger.warning( - f"key error in do_titles, no title provided for `{outcome}`" - ) + _logger.warning(f"key error in do_titles, no title provided for `{outcome}`") ax.set_title(outcome) @@ -864,9 +835,7 @@ def do_ylabels(ax, ylabels, outcome): try: ax.set_ylabel(ylabels[outcome]) except KeyError: - _logger.warning( - f"key error in do_ylabels, no ylabel provided for `{outcome}`" - ) + _logger.warning(f"key error in do_ylabels, no ylabel provided for `{outcome}`") ax.set_ylabel(outcome) diff --git a/ema_workbench/analysis/prim.py b/ema_workbench/analysis/prim.py index 41ef90e5f..3a88dbcba 100644 --- a/ema_workbench/analysis/prim.py +++ b/ema_workbench/analysis/prim.py @@ -27,9 +27,7 @@ import altair as alt except ImportError: alt = None - warnings.warn( - ("altair based interactive " "inspection not available"), ImportWarning - ) + warnings.warn(("altair based interactive " "inspection not available"), ImportWarning) from ..util import EMAError, temporary_filter, INFO, get_module_logger from . import scenario_discovery_util as sdutil @@ -159,9 +157,7 @@ def pca_preprocess(experiments, y, subsets=None, exclude=set()): rotated_experiments[name] = subset_experiments[:, i] [column_names.append(name)] - rotation_matrix = pd.DataFrame( - rotation_matrix, index=row_names, columns=column_names - ) + rotation_matrix = pd.DataFrame(rotation_matrix, index=row_names, columns=column_names) return rotated_experiments, rotation_matrix @@ -372,16 +368,7 @@ def __init__(self, prim, box_lims, indices): self._resampled = [] self.yi_initial = indices[:] - columns = [ - "name", - "lower", - "upper", - "minimum", - "maximum", - "qp_lower", - "qp_upper", - "id", - ] + columns = ["name", "lower", "upper", "minimum", "maximum", "qp_lower", "qp_upper", "id"] self.boxes_quantitative = pd.DataFrame(columns=columns) columns = ["item", "name", "n_items", "x", "id"] @@ -475,9 +462,7 @@ def _inspect_data(self, i, uncs, qp_values): stats = self.peeling_trajectory.iloc[i] # make the box definition - columns = pd.MultiIndex.from_product( - [[f"box {i}"], ["min", "max", "qp value", "qp value"]] - ) + columns = pd.MultiIndex.from_product([[f"box {i}"], ["min", "max", "qp value", "qp value"]]) box_lim = pd.DataFrame(np.zeros((len(uncs), 4)), index=uncs, columns=columns) for unc in uncs: @@ -565,11 +550,7 @@ def inspect_tradeoff(self): # unless we can force a selection? name = f"{dim}, {qp.loc[qp.index[0], dim]: .2g}" entry = dict( - name=name, - n_items=len(items) + 1, - item=item, - id=int(i), - x=j / len(items), + name=name, n_items=len(items) + 1, item=item, id=int(i), x=j / len(items) ) nominal_vars.append(entry) @@ -592,9 +573,7 @@ def inspect_tradeoff(self): y=alt.Y("density:Q", scale=alt.Scale(domain=(0, 1.1))), color=alt.Color( "res_dim:O", - scale=alt.Scale( - range=sns.color_palette("YlGnBu", n_colors=8).as_hex() - ), + scale=alt.Scale(range=sns.color_palette("YlGnBu", n_colors=8).as_hex()), ), opacity=alt.condition(point_selector, alt.value(1), alt.value(0.4)), tooltip=[ @@ -624,7 +603,7 @@ def inspect_tradeoff(self): x_upper="(datum.x2-datum.minimum)/(datum.maximum-datum.minimum)", ) .transform_filter(point_selector) - .properties(width=width,) + .properties(width=width) ) lines = base.mark_rule() @@ -654,7 +633,7 @@ def inspect_tradeoff(self): ) data = pd.DataFrame([dict(start=0, end=1)]) - rect = alt.Chart(data).mark_rect(opacity=0.05).encode(x="start:Q", x2="end:Q",) + rect = alt.Chart(data).mark_rect(opacity=0.05).encode(x="start:Q", x2="end:Q") # TODO:: for qp can we do something with the y encoding here and # connecting this to a selection? @@ -663,14 +642,12 @@ def inspect_tradeoff(self): nominal = ( alt.Chart(nominal_vars) .mark_point() - .encode(x="x:Q", y="name:N",) + .encode(x="x:Q", y="name:N") .transform_filter(point_selector) - .properties(width=width,) + .properties(width=width) ) - texts3 = nominal.mark_text(baseline="top", dy=5, align="center").encode( - text="item:N" - ) + texts3 = nominal.mark_text(baseline="top", dy=5, align="center").encode(text="item:N") layered = alt.layer(lines, texts1, texts2, rect, nominal, texts3) @@ -701,9 +678,7 @@ def resample(self, i=None, iterations=10, p=1 / 2): with temporary_filter(__name__, INFO, "find_box"): for j in range(len(self._resampled), iterations): _logger.info(f"resample {j}") - index = np.random.choice( - x.index, size=int(x.shape[0] * p), replace=False - ) + index = np.random.choice(x.index, size=int(x.shape[0] * p), replace=False) x_temp = x.loc[index, :].reset_index(drop=True) y_temp = y[index] @@ -739,9 +714,7 @@ def resample(self, i=None, iterations=10, p=1 / 2): ).T * 100 ) - return scores.sort_values( - by=["reproduce coverage", "reproduce density"], ascending=False - ) + return scores.sort_values(by=["reproduce coverage", "reproduce density"], ascending=False) def select(self, i): """ @@ -756,14 +729,10 @@ def select(self, i): """ if self._frozen: raise PrimException( - "box has been frozen because PRIM " - "has found at least one more recent " - "box" + "box has been frozen because PRIM " "has found at least one more recent " "box" ) - res_dim = sdutil._determine_restricted_dims( - self.box_lims[i], self.prim.box_init - ) + res_dim = sdutil._determine_restricted_dims(self.box_lims[i], self.prim.box_init) indices = sdutil._in_box( self.prim.x.loc[self.prim.yi_remaining, res_dim], self.box_lims[i][res_dim] @@ -792,9 +761,7 @@ def drop_restriction(self, uncertainty="", i=-1): new_box_lim = self.box_lims[i].copy() new_box_lim.loc[:, uncertainty] = self.box_lims[0].loc[:, uncertainty] - indices = sdutil._in_box( - self.prim.x.loc[self.prim.yi_remaining, :], new_box_lim - ) + indices = sdutil._in_box(self.prim.x.loc[self.prim.yi_remaining, :], new_box_lim) indices = self.prim.yi_remaining[indices] self.update(new_box_lim, indices) @@ -817,9 +784,7 @@ def update(self, box_lims, indices): y = self.prim.y[self.yi] coi = self.prim.determine_coi(self.yi) - restricted_dims = sdutil._determine_restricted_dims( - self.box_lims[-1], self.prim.box_init - ) + restricted_dims = sdutil._determine_restricted_dims(self.box_lims[-1], self.prim.box_init) data = { "coverage": coi / self.prim.t_coi, @@ -860,9 +825,7 @@ def show_tradeoff(self, cmap=mpl.cm.viridis, annotated=False): # @UndefinedVari a Figure instance """ - return sdutil.plot_tradeoff( - self.peeling_trajectory, cmap=cmap, annotated=annotated - ) + return sdutil.plot_tradeoff(self.peeling_trajectory, cmap=cmap, annotated=annotated) def show_pairs_scatter(self, i=None, dims=None, cdf=False): """Make a pair wise scatter plot of all the restricted @@ -886,9 +849,7 @@ def show_pairs_scatter(self, i=None, dims=None, cdf=False): i = self._cur_box if dims is None: - dims = sdutil._determine_restricted_dims( - self.box_lims[i], self.prim.box_init - ) + dims = sdutil._determine_restricted_dims(self.box_lims[i], self.prim.box_init) # x = # y = self.prim.y[self.yi_initial] @@ -1014,10 +975,7 @@ def __init__( mode=sdutil.RuleInductionType.BINARY, update_function="default", ): - assert mode in { - sdutil.RuleInductionType.BINARY, - sdutil.RuleInductionType.REGRESSION, - } + assert mode in {sdutil.RuleInductionType.BINARY, sdutil.RuleInductionType.REGRESSION} assert self._assert_mode(y, mode, update_function) # preprocess x try: @@ -1034,9 +992,7 @@ def __init__( self.x_int = x_int.values self.x_int_columns = x_int.columns.values - self.x_numeric_columns = np.concatenate( - [self.x_float_colums, self.x_int_columns] - ) + self.x_numeric_columns = np.concatenate([self.x_float_colums, self.x_int_columns]) x_nominal = x.select_dtypes(exclude=np.number) @@ -1044,9 +1000,7 @@ def __init__( for column in x_nominal.columns.values: if np.unique(x[column]).shape == (1,): x = x.drop(column, axis=1) - _logger.info( - f"{column} dropped from analysis " "because only a single category" - ) + _logger.info(f"{column} dropped from analysis " "because only a single category") x_nominal = x.select_dtypes(exclude=np.number) self.x_nominal = x_nominal.values @@ -1125,9 +1079,7 @@ def find_box(self): # log how much data and how many coi are remaining _logger.info( - self.message.format( - self.yi_remaining.shape[0], self.determine_coi(self.yi_remaining) - ) + self.message.format(self.yi_remaining.shape[0], self.determine_coi(self.yi_remaining)) ) # make a new box that contains all the remaining data points @@ -1141,13 +1093,8 @@ def find_box(self): box = self._paste(box) _logger.debug("pasting completed") - message = ( - "mean: {0}, mass: {1}, coverage: {2}, " - "density: {3} restricted_dimensions: {4}" - ) - message = message.format( - box.mean, box.mass, box.coverage, box.density, box.res_dim - ) + message = "mean: {0}, mass: {1}, coverage: {2}, " "density: {3} restricted_dimensions: {4}" + message = message.format(box.mean, box.mass, box.coverage, box.density, box.res_dim) if (self.threshold_type == ABOVE) & (box.mean >= self.threshold): _logger.info(message) @@ -1244,7 +1191,7 @@ def _peel(self, box): # identify all possible peels possible_peels = [] - for x, columns, dtype, in [ + for x, columns, dtype in [ (x_float, self.x_float_colums, "float"), (x_int, self.x_int_columns, "int"), (x_nominal, self.x_nominal_columns, "object"), @@ -1263,9 +1210,7 @@ def _peel(self, box): for entry in possible_peels: i, box_lim = entry obj = self.obj_func(self, self.y[box.yi], self.y[i]) - non_res_dim = self.n_cols - sdutil._determine_nr_restricted_dims( - box_lim, self.box_init - ) + non_res_dim = self.n_cols - sdutil._determine_nr_restricted_dims(box_lim, self.box_init) score = (obj, non_res_dim, box_lim, i) scores.append(score) @@ -1453,16 +1398,14 @@ def _paste(self, box): mass_old = box.yi.shape[0] / self.n # need to break this down by dtype - restricted_dims = sdutil._determine_restricted_dims( - box.box_lims[-1], self.box_init - ) + restricted_dims = sdutil._determine_restricted_dims(box.box_lims[-1], self.box_init) res_dim = set(restricted_dims) x = self.x.loc[self.yi_remaining, :] # identify all possible pastes possible_pastes = [] - for columns, dtype, in [ + for columns, dtype in [ (self.x_float_colums, "float"), (self.x_int_columns, "int"), (self.x_nominal_columns, "object"), @@ -1497,12 +1440,7 @@ def _paste(self, box): mean_old = np.mean(self.y[box.yi]) mean_new = np.mean(self.y[indices]) - if ( - (mass_new >= self.mass_min) - & (mass_new > mass_old) - & (obj > 0) - & (mean_new > mean_old) - ): + if (mass_new >= self.mass_min) & (mass_new > mass_old) & (obj > 0) & (mean_new > mean_old): box.update(box_new, indices) return self._paste(box) else: diff --git a/ema_workbench/analysis/prim_util.py b/ema_workbench/analysis/prim_util.py index 78f8dbc4d..4912c205b 100644 --- a/ema_workbench/analysis/prim_util.py +++ b/ema_workbench/analysis/prim_util.py @@ -60,9 +60,7 @@ def get_quantile(data, quantile): value = (data[index_lower] + data[index_higher]) / 2 else: # lower - while (data[index_lower] == data[index_higher]) & ( - index_higher < len(data) - 1 - ): + while (data[index_lower] == data[index_higher]) & (index_higher < len(data) - 1): index_higher += 1 value = (data[index_lower] + data[index_higher]) / 2 @@ -170,9 +168,7 @@ def determine_rotation(experiments): # make the eigen vectors unit length for i in range(eigen_vectors.shape[1]): - eigen_vectors[:, i] / np.linalg.norm(eigen_vectors[:, i]) * np.sqrt( - eigen_vals[i] - ) + eigen_vectors[:, i] / np.linalg.norm(eigen_vectors[:, i]) * np.sqrt(eigen_vals[i]) return eigen_vectors diff --git a/ema_workbench/analysis/regional_sa.py b/ema_workbench/analysis/regional_sa.py index a72181fc6..4db637ab2 100644 --- a/ema_workbench/analysis/regional_sa.py +++ b/ema_workbench/analysis/regional_sa.py @@ -80,7 +80,7 @@ def plot_discrete_cdf(ax, unc, x, y, xticklabels_on, ccdf): freq = 1 - cum_freq x_plot = [j * 1, j * 1 + 1] - y_plot = [freq,] * 2 + y_plot = [freq] * 2 ax.plot(x_plot, y_plot, c=cp[i + 1], label=i == 1) ax.scatter( @@ -105,9 +105,7 @@ def plot_discrete_cdf(ax, unc, x, y, xticklabels_on, ccdf): if ccdf: cum_freq_un = (len(freqs) - j - 1) / n_cat - ax.plot( - x_plot, [cum_freq_un,] * 2, lw=1, c="darkgrey", zorder=1, label="x==y", - ) + ax.plot(x_plot, [cum_freq_un] * 2, lw=1, c="darkgrey", zorder=1, label="x==y") ax.scatter( x_plot[0], cum_freq_un, diff --git a/ema_workbench/analysis/scenario_discovery_util.py b/ema_workbench/analysis/scenario_discovery_util.py index 0bd63dc63..140575cda 100644 --- a/ema_workbench/analysis/scenario_discovery_util.py +++ b/ema_workbench/analysis/scenario_discovery_util.py @@ -155,9 +155,7 @@ def _determine_restricted_dims(box_limits, box_init): """ cols = box_init.columns.values - restricted_dims = cols[ - np.all(box_init.values == box_limits.values, axis=0) == False - ] + restricted_dims = cols[np.all(box_init.values == box_limits.values, axis=0) == False] # restricted_dims = [column for column in box_init.columns if not # np.all(box_init[column].values == box_limits[column].values)] return restricted_dims @@ -190,12 +188,10 @@ def _compare(a, b): """compare two boxes, for each dimension return True if the same and false otherwise""" dtypesDesc = a.dtype.descr - logical = np.ones((len(dtypesDesc,)), dtype=bool) + logical = np.ones((len(dtypesDesc)), dtype=bool) for i, entry in enumerate(dtypesDesc): name = entry[0] - logical[i] = ( - logical[i] & (a[name][0] == b[name][0]) & (a[name][1] == b[name][1]) - ) + logical[i] = logical[i] & (a[name][0] == b[name][0]) & (a[name][1] == b[name][1]) return logical @@ -399,9 +395,7 @@ def plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims, cdf=False): width = xlim[1] - xlim[0] xy = x, y - box = patches.Rectangle( - xy, width, height, edgecolor="red", facecolor="none", lw=3 - ) + box = patches.Rectangle(xy, width, height, edgecolor="red", facecolor="none", lw=3) ax.add_patch(box) # do the yticklabeling for categorical rows @@ -642,15 +636,11 @@ def plot_ppt(peeling_trajectory): fig = plt.gcf() - make_legend( - ["mean", "mass", "coverage", "density", "restricted_dim"], ax, ncol=5, alpha=1 - ) + make_legend(["mean", "mass", "coverage", "density", "restricted_dim"], ax, ncol=5, alpha=1) return fig -def plot_tradeoff( - peeling_trajectory, cmap=mpl.cm.viridis, annotated=False -): # @UndefinedVariable +def plot_tradeoff(peeling_trajectory, cmap=mpl.cm.viridis, annotated=False): # @UndefinedVariable """Visualize the trade off between coverage and density. Color is used to denote the number of restricted dimensions. @@ -695,9 +685,7 @@ def plot_tradeoff( return fig -def plot_unc( - box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0] -): +def plot_unc(box_init, xi, i, j, norm_box_lim, box_lim, u, ax, color=sns.color_palette()[0]): """ Parameters: @@ -819,12 +807,9 @@ def boxes_to_dataframe(self): dtype = object break - columns = pd.MultiIndex.from_product([index, ["min", "max",],]) + columns = pd.MultiIndex.from_product([index, ["min", "max"]]) df_boxes = pd.DataFrame( - np.zeros((len(uncs), nr_boxes * 2)), - index=uncs, - dtype=dtype, - columns=columns, + np.zeros((len(uncs), nr_boxes * 2)), index=uncs, dtype=dtype, columns=columns ) # TODO should be possible to make more efficient diff --git a/ema_workbench/connectors/excel.py b/ema_workbench/connectors/excel.py index 4388fc6a0..e2a461a27 100644 --- a/ema_workbench/connectors/excel.py +++ b/ema_workbench/connectors/excel.py @@ -72,9 +72,7 @@ class BaseExcelModel(FileModel): com_warning_msg = "com error: no cell(s) named %s found" - def __init__( - self, name, wd=None, model_file=None, default_sheet=None, pointers=None - ): + def __init__(self, name, wd=None, model_file=None, default_sheet=None, pointers=None): super().__init__(name, wd=wd, model_file=model_file) #: Reference to the Excel application. This attribute is `None` until #: model_init has been invoked. @@ -191,14 +189,14 @@ def cleanup(self): try: self.wb.Close(False) except com_error as err: - _logger.warning(f"com error on wb.Close: {err}",) + _logger.warning(f"com error on wb.Close: {err}") del self.wb if self.xl: try: self.xl.DisplayAlerts = False self.xl.Quit() except com_error as err: - _logger.warning(f"com error on xl.Quit: {err}",) + _logger.warning(f"com error on xl.Quit: {err}") del self.xl self.xl = None @@ -266,9 +264,7 @@ def get_wb_value(self, name): value = sheet.Range(this_range).Value except com_error: _logger.warning( - "com error: no cell(s) named {} found on sheet {}".format( - this_range, this_sheet - ), + "com error: no cell(s) named {} found on sheet {}".format(this_range, this_sheet) ) value = None @@ -307,9 +303,7 @@ def set_wb_value(self, name, value): sheet.Range(this_range).Value = value except com_error: _logger.warning( - "com error: no cell(s) named {} found on sheet {}".format( - this_range, this_sheet - ), + "com error: no cell(s) named {} found on sheet {}".format(this_range, this_sheet) ) def get_wb_sheetnames(self): diff --git a/ema_workbench/connectors/netlogo.py b/ema_workbench/connectors/netlogo.py index 7c804a398..1de34eba4 100644 --- a/ema_workbench/connectors/netlogo.py +++ b/ema_workbench/connectors/netlogo.py @@ -60,9 +60,7 @@ def ts_output_variables(self): if self._ts_output_variables is None: timeseries = [o for o in self.outcomes if isinstance(o, ArrayOutcome)] - self._ts_output_variables = [ - var for o in timeseries for var in o.variable_name - ] + self._ts_output_variables = [var for o in timeseries for var in o.variable_name] return self._ts_output_variables @@ -185,9 +183,7 @@ def run_experiment(self, experiment): commands = [] fns = {} for variable in self.ts_output_variables: - fn = r"{0}{3}{1}{2}".format( - self.working_directory, variable, ".txt", os.sep - ) + fn = r"{0}{3}{1}{2}".format(self.working_directory, variable, ".txt", os.sep) fns[variable] = fn fn = f'"{fn}"' fn = fn.replace(os.sep, "/") @@ -195,7 +191,7 @@ def run_experiment(self, experiment): if self.netlogo.report(f"is-agentset? {variable}"): # if name is name of an agentset, we # assume that we should count the total number of agents - nc = r"file-open {} file-write count {}".format(fn, variable,) + nc = r"file-open {} file-write count {}".format(fn, variable) else: # it is not an agentset, so assume that it is # a reporter / global variable diff --git a/ema_workbench/connectors/simio_connector.py b/ema_workbench/connectors/simio_connector.py index b1ed267b5..2fb08c003 100644 --- a/ema_workbench/connectors/simio_connector.py +++ b/ema_workbench/connectors/simio_connector.py @@ -63,9 +63,7 @@ class SimioModel(FileModel, SingleReplication): """ @method_logger(__name__) - def __init__( - self, name, wd=None, model_file=None, main_model=None, n_replications=10 - ): + def __init__(self, name, wd=None, model_file=None, main_model=None, n_replications=10): """interface to the model Parameters @@ -126,9 +124,7 @@ def model_init(self, policy): # set up new EMA specific experiment on model _logger.debug("setting up EMA experiment") - self.experiment = SimioAPI.IExperiment( - model.Experiments.Create("ema experiment") - ) + self.experiment = SimioAPI.IExperiment(model.Experiments.Create("ema experiment")) SimioAPI.IExperimentResponses(self.experiment.Responses).Clear() # use all available responses as template for experiment responses @@ -142,9 +138,7 @@ def model_init(self, policy): except KeyError: raise EMAError(f"response with name '{name}' not found") - response = SimioAPI.IExperimentResponse( - self.experiment.Responses.Create(name) - ) + response = SimioAPI.IExperimentResponse(self.experiment.Responses.Create(name)) response.set_Expression(value.Expression) response.set_Objective(value.Objective) diff --git a/ema_workbench/connectors/vadere.py b/ema_workbench/connectors/vadere.py index efabdfb31..34b015666 100644 --- a/ema_workbench/connectors/vadere.py +++ b/ema_workbench/connectors/vadere.py @@ -200,9 +200,7 @@ def run_experiment(self, experiment): scalar_res = [] for file in self.processor_files: if file.endswith(".csv"): - timeseries_res[file] = pd.read_csv( - os.path.join(output_dir, file), sep=" " - ) + timeseries_res[file] = pd.read_csv(os.path.join(output_dir, file), sep=" ") if file.endswith(".txt"): scalar_res.append(os.path.join(output_dir, file)) diff --git a/ema_workbench/connectors/vensim.py b/ema_workbench/connectors/vensim.py index 91c8d72fb..e68569551 100644 --- a/ema_workbench/connectors/vensim.py +++ b/ema_workbench/connectors/vensim.py @@ -188,9 +188,7 @@ def get_data(filename, varname, step=1): def VensimModelStructureInterface(name, wd=None, model_file=None): - warnings.warn( - "VensimModelStructureInterface is deprecated use " "VensimModel instead" - ) + warnings.warn("VensimModelStructureInterface is deprecated use " "VensimModel instead") return VensimModel(name, wd=wd, model_file=model_file) @@ -347,9 +345,7 @@ def run_experiment(self, experiment): for lookup_uncertainty in self._lookup_uncertainties: # ask the lookup to transform the retrieved uncertainties to the # proper lookup value - experiment[lookup_uncertainty.name] = lookup_uncertainty.transform( - experiment - ) + experiment[lookup_uncertainty.name] = lookup_uncertainty.transform(experiment) for key, value in experiment.items(): set_value(key, value) @@ -395,9 +391,7 @@ def _delete_lookup_uncertainties(self): deleting lookup uncertainties from the uncertainty list """ self._lookup_uncertainties = self._lookup_uncertainties[:] - self.uncertainties = [ - x for x in self.uncertainties if x not in self._lookup_uncertainties - ] + self.uncertainties = [x for x in self.uncertainties if x not in self._lookup_uncertainties] class LookupUncertainty(Parameter): @@ -495,9 +489,7 @@ def __init__(self, lookup_type, values, name, msi, ymin=None, ymax=None): } if self.lookup_type == "categories": - msi.uncertainties.append( - CategoricalParameter("c-" + self.name), range(len(values)) - ) + msi.uncertainties.append(CategoricalParameter("c-" + self.name), range(len(values))) msi._lookup_uncertainties.append(self) elif self.lookup_type == "hearne1": msi.uncertainties.append(RealParameter("m-" + self.name, *values[0])) @@ -624,9 +616,7 @@ def _hearne1(self, case): df.append(l + m - ((m + l - u) * (i - p) / (self.x_min - p))) new_lookup = [] for i in range(len(self.x)): - new_lookup.append( - (self.x[i], max(min(df[i] * self.y[i], self.y_max), self.y_min)) - ) + new_lookup.append((self.x[i], max(min(df[i] * self.y[i], self.y_max), self.y_min))) return new_lookup def _hearne2(self, case): @@ -651,9 +641,7 @@ def _hearne2(self, case): df.append(u + m2 - (m2 * (i - p2) / (self.x_max - p2))) new_lookup = [] for i in range(len(self.x)): - new_lookup.append( - (self.x[i], max(min(df[i] * self.y[i], self.y_max), self.y_min)) - ) + new_lookup.append((self.x[i], max(min(df[i] * self.y[i], self.y_max), self.y_min))) return new_lookup def _approx(self, case): @@ -670,25 +658,12 @@ def _approx(self, case): if self.x_max > 10: for i in range(int(self.x_min), int(self.x_max + 1)): new_lookup.append( - ( - i, - max( - min(self._gen_log(i, A, K, B, Q, M), self.y_max), self.y_min - ), - ) + (i, max(min(self._gen_log(i, A, K, B, Q, M), self.y_max), self.y_min)) ) else: - for i in range( - int(self.x_min * 10), int(self.x_max * 10 + 1), max(int(self.x_max), 1) - ): + for i in range(int(self.x_min * 10), int(self.x_max * 10 + 1), max(int(self.x_max), 1)): new_lookup.append( - ( - i / 10, - max( - min(self._gen_log(i / 10, A, K, B, Q, M), self.y_max), - self.y_min, - ), - ) + (i / 10, max(min(self._gen_log(i / 10, A, K, B, Q, M), self.y_max), self.y_min)) ) return new_lookup diff --git a/ema_workbench/connectors/vensimDLLwrapper.py b/ema_workbench/connectors/vensimDLLwrapper.py index 9e2918592..cd6df88ca 100644 --- a/ema_workbench/connectors/vensimDLLwrapper.py +++ b/ema_workbench/connectors/vensimDLLwrapper.py @@ -322,9 +322,7 @@ def get_varattrib(varname, attribute): buf = ctypes.create_string_buffer("", 10) maxBuf = ctypes.c_int(10) - bufferlength = vensim.vensim_get_varattrib( - varname.encode("utf-8"), attribute, buf, maxBuf - ) + bufferlength = vensim.vensim_get_varattrib(varname.encode("utf-8"), attribute, buf, maxBuf) if bufferlength == -1: raise VensimWarning("variable not found") diff --git a/ema_workbench/em_framework/callbacks.py b/ema_workbench/em_framework/callbacks.py index 248f8b9f9..0daa39721 100644 --- a/ema_workbench/em_framework/callbacks.py +++ b/ema_workbench/em_framework/callbacks.py @@ -78,9 +78,7 @@ def __init__( super().__init__(nr_experiments, reporting_frequency, _logger, log_progress) if reporting_interval is None: - reporting_interval = max( - 1, int(round(nr_experiments / reporting_frequency)) - ) + reporting_interval = max(1, int(round(nr_experiments / reporting_frequency))) self.reporting_interval = reporting_interval @@ -124,9 +122,7 @@ class DefaultCallback(AbstractCallback): """ shape_error_msg = "can only save up to 2d arrays, this array is {}d" - constraint_error_msg = ( - "can only save 1d arrays for constraint, " "this array is {}d" - ) + constraint_error_msg = "can only save 1d arrays for constraint, " "this array is {}d" def __init__( self, @@ -198,8 +194,7 @@ def __init__( # FIXME:: issue with fragmented data frame warning index = np.arange(nr_experiments) column_dict = { - name: pd.Series(dtype=dtype, index=index) - for name, dtype in zip(columns, dtypes) + name: pd.Series(dtype=dtype, index=index) for name, dtype in zip(columns, dtypes) } df = pd.concat(column_dict, axis=1).copy() @@ -210,9 +205,7 @@ def __init__( shape = outcome.shape if shape is not None: shape = (nr_experiments,) + shape - self.results[outcome.name] = self._setup_outcomes_array( - shape, dtype=float - ) + self.results[outcome.name] = self._setup_outcomes_array(shape, dtype=float) def _store_case(self, experiment): scenario = experiment.scenario @@ -241,7 +234,7 @@ def _store_outcomes(self, case_id, outcomes): _logger.debug(message) else: try: - self.results[outcome][case_id,] = outcome_res + self.results[outcome][case_id] = outcome_res except KeyError: data = np.asarray(outcome_res) @@ -254,10 +247,8 @@ def _store_outcomes(self, case_id, outcomes): shape = list(shape) shape.insert(0, self.nr_experiments) - self.results[outcome] = self._setup_outcomes_array( - shape, data.dtype - ) - self.results[outcome][case_id,] = outcome_res + self.results[outcome] = self._setup_outcomes_array(shape, data.dtype) + self.results[outcome][case_id] = outcome_res def __call__(self, experiment, outcomes): """ @@ -348,9 +339,7 @@ def __init__( self.outcome_fhs = {} for outcome in self.outcomes: - self.outcome_fhs[outcome] = open( - os.path.join(self.directory, outcome + ".csv"), "w" - ) + self.outcome_fhs[outcome] = open(os.path.join(self.directory, outcome + ".csv"), "w") def _store_case(self, experiment): scenario = experiment.scenario diff --git a/ema_workbench/em_framework/ema_ipyparallel.py b/ema_workbench/em_framework/ema_ipyparallel.py index c752ebe18..0bdb451a8 100644 --- a/ema_workbench/em_framework/ema_ipyparallel.py +++ b/ema_workbench/em_framework/ema_ipyparallel.py @@ -162,10 +162,7 @@ class LogWatcher(LoggingConfigurable): # configurables topics = List( - [""], - help=( - "The ZMQ topics to subscribe to. Default is to" "subscribe to all messages" - ), + [""], help=("The ZMQ topics to subscribe to. Default is to" "subscribe to all messages") ).tag(config=True) url = Unicode(help="ZMQ url on which to listen for log messages").tag(config=True) diff --git a/ema_workbench/em_framework/evaluators.py b/ema_workbench/em_framework/evaluators.py index ab9532187..fad1af482 100644 --- a/ema_workbench/em_framework/evaluators.py +++ b/ema_workbench/em_framework/evaluators.py @@ -346,9 +346,7 @@ def __init__(self, msis, n_processes=None, maxtasksperchild=None, **kwargs): if isinstance(n_processes, int): if n_processes > 0: if max_processes > n_processes: - warnings.warn( - f"the number of processes cannot be more then {max_processes}" - ) + warnings.warn(f"the number of processes cannot be more then {max_processes}") self.n_processes = min(n_processes, max_processes) else: self.n_processes = max(max_processes + self.n_processes, 1) @@ -378,9 +376,7 @@ def initialize(self): self.root_dir = None break else: - random_part = [ - random.choice(string.ascii_letters + string.digits) for _ in range(5) - ] + random_part = [random.choice(string.ascii_letters + string.digits) for _ in range(5)] random_part = "".join(random_part) self.root_dir = os.path.abspath("tmp" + random_part) os.makedirs(self.root_dir) @@ -521,16 +517,12 @@ def perform_experiments( # unreadable in this form if not scenarios and not policies: - raise EMAError( - "no experiments possible since both " "scenarios and policies are 0" - ) + raise EMAError("no experiments possible since both " "scenarios and policies are 0") scenarios, uncertainties, n_scenarios = setup_scenarios( scenarios, uncertainty_sampling, uncertainty_union, models ) - policies, levers, n_policies = setup_policies( - policies, lever_sampling, lever_union, models - ) + policies, levers, n_policies = setup_policies(policies, lever_sampling, lever_union, models) try: n_models = len(models) @@ -545,20 +537,18 @@ def perform_experiments( # TODO:: change to 0 policies / 0 scenarios is sampling set to 0 for # it _logger.info( - ( - "performing {} scenarios * {} policies * {} model(s) = " - "{} experiments" - ).format(n_scenarios, n_policies, n_models, nr_of_exp) + ("performing {} scenarios * {} policies * {} model(s) = " "{} experiments").format( + n_scenarios, n_policies, n_models, nr_of_exp + ) ) else: nr_of_exp = n_models * max(n_scenarios, n_policies) # TODO:: change to 0 policies / 0 scenarios is sampling set to 0 for # it _logger.info( - ( - "performing max({} scenarios, {} policies) * {} model(s) = " - "{} experiments" - ).format(n_scenarios, n_policies, n_models, nr_of_exp) + ("performing max({} scenarios, {} policies) * {} model(s) = " "{} experiments").format( + n_scenarios, n_policies, n_models, nr_of_exp + ) ) callback = setup_callback( @@ -730,37 +720,25 @@ def optimize( """ if searchover not in ("levers", "uncertainties"): raise EMAError( - "searchover should be one of 'levers' or" - "'uncertainties' not {}".format(searchover) + "searchover should be one of 'levers' or" "'uncertainties' not {}".format(searchover) ) try: if len(models) == 1: models = models[0] else: - raise NotImplementedError( - "optimization over multiple" "models yet supported" - ) + raise NotImplementedError("optimization over multiple" "models yet supported") except TypeError: pass - problem = to_problem( - models, searchover, constraints=constraints, reference=reference - ) + problem = to_problem(models, searchover, constraints=constraints, reference=reference) # solve the optimization problem if not evaluator: evaluator = SequentialEvaluator(models) return _optimize( - problem, - evaluator, - algorithm, - convergence, - nfe, - convergence_freq, - logging_freq, - **kwargs, + problem, evaluator, algorithm, convergence, nfe, convergence_freq, logging_freq, **kwargs ) @@ -812,10 +790,7 @@ def robust_optimize( assert rf.function is not None problem = to_robust_problem( - model, - scenarios, - constraints=constraints, - robustness_functions=robustness_functions, + model, scenarios, constraints=constraints, robustness_functions=robustness_functions ) # solve the optimization problem @@ -831,4 +806,4 @@ def robust_optimize( convergence_freq, logging_freq, **kwargs, - ) \ No newline at end of file + ) diff --git a/ema_workbench/em_framework/experiment_runner.py b/ema_workbench/em_framework/experiment_runner.py index 54f05650f..d7aa25e9a 100644 --- a/ema_workbench/em_framework/experiment_runner.py +++ b/ema_workbench/em_framework/experiment_runner.py @@ -39,8 +39,7 @@ class ExperimentRunner: def __init__(self, msis): self.msis = msis self.log_message = ( - "running scenario {scenario_id} for policy " - "{policy_name} on model {model_name}" + "running scenario {scenario_id} for policy " "{policy_name} on model {model_name}" ) @method_logger(__name__) diff --git a/ema_workbench/em_framework/model.py b/ema_workbench/em_framework/model.py index ad218bfb3..3e7c7a849 100644 --- a/ema_workbench/em_framework/model.py +++ b/ema_workbench/em_framework/model.py @@ -70,9 +70,7 @@ def outcomes_output(self, outputs): @property def output_variables(self): if self._output_variables is None: - self._output_variables = [ - var for o in self.outcomes for var in o.variable_name - ] + self._output_variables = [var for o in self.outcomes for var in o.variable_name] return self._output_variables @@ -98,9 +96,7 @@ def __init__(self, name): super().__init__(name) if not self.name.isalnum(): - raise EMAError( - "name of model should only contain " "alpha numerical characters" - ) + raise EMAError("name of model should only contain " "alpha numerical characters") self._output_variables = None self._outcomes_output = {} @@ -242,10 +238,7 @@ def as_dict(self): def join_attr(field): joined = ", ".join( - [ - repr(entry) - for entry in sorted(field, key=operator.attrgetter("name")) - ] + [repr(entry) for entry in sorted(field, key=operator.attrgetter("name"))] ) return f"[{joined}]" @@ -293,9 +286,7 @@ def replications(self, replications): self._replications = [MyDict(**entry) for entry in replications] self.nreplications = len(replications) else: - raise TypeError( - f"replications should be int or list not {type(replications)}" - ) + raise TypeError(f"replications should be int or list not {type(replications)}") @method_logger(__name__) def run_model(self, scenario, policy): diff --git a/ema_workbench/em_framework/optimization.py b/ema_workbench/em_framework/optimization.py index e94d1c5b8..30de81641 100644 --- a/ema_workbench/em_framework/optimization.py +++ b/ema_workbench/em_framework/optimization.py @@ -14,12 +14,7 @@ from . import callbacks, evaluators from .points import Scenario, Policy from .outcomes import AbstractOutcome -from .parameters import ( - IntegerParameter, - RealParameter, - CategoricalParameter, - BooleanParameter, -) +from .parameters import IntegerParameter, RealParameter, CategoricalParameter, BooleanParameter from .samplers import determine_parameters from .util import determine_objects, ProgressTrackingMixIn from ..util import get_module_logger, EMAError, temporary_filter, INFO @@ -107,9 +102,7 @@ class Problem(PlatypusProblem): def parameter_names(self): return [e.name for e in self.parameters] - def __init__( - self, searchover, parameters, outcome_names, constraints, reference=None - ): + def __init__(self, searchover, parameters, outcome_names, constraints, reference=None): if constraints is None: constraints = [] @@ -136,9 +129,7 @@ class RobustProblem(Problem): """small extension to Problem object for robust optimization, adds the scenarios and the robustness functions""" - def __init__( - self, parameters, outcome_names, scenarios, robustness_functions, constraints - ): + def __init__(self, parameters, outcome_names, scenarios, robustness_functions, constraints): super().__init__("robust", parameters, outcome_names, constraints) assert len(robustness_functions) == len(outcome_names) self.scenarios = scenarios @@ -172,9 +163,7 @@ def to_problem(model, searchover, reference=None, constraints=None): outcome_names = [outcome.name for outcome in outcomes] if not outcomes: - raise EMAError( - "no outcomes specified to optimize over, " "all outcomes are of kind=INFO" - ) + raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO") problem = Problem( searchover, decision_variables, outcome_names, constraints, reference=reference @@ -211,9 +200,7 @@ def to_robust_problem(model, scenarios, robustness_functions, constraints=None): outcome_names = [outcome.name for outcome in outcomes] if not outcomes: - raise EMAError( - "no outcomes specified to optimize over, " "all outcomes are of kind=INFO" - ) + raise EMAError("no outcomes specified to optimize over, " "all outcomes are of kind=INFO") problem = RobustProblem( decision_variables, outcome_names, scenarios, robustness_functions, constraints @@ -267,9 +254,7 @@ def to_dataframe(optimizer, dvnames, outcome_names): solutions = [] for solution in platypus.unique(platypus.nondominated(optimizer.result)): - vars = transform_variables( - solution.problem, solution.variables # @ReservedAssignment - ) + vars = transform_variables(solution.problem, solution.variables) # @ReservedAssignment decision_vars = dict(zip(dvnames, vars)) decision_out = dict(zip(outcome_names, solution.objectives)) @@ -407,9 +392,7 @@ def evaluate(jobs_collection, experiments, outcomes, problem): # TODO:: only retain uncertainties job_experiment = experiments[logical] - job_constraints = _evaluate_constraints( - job_experiment, job_outputs, constraints - ) + job_constraints = _evaluate_constraints(job_experiment, job_outputs, constraints) job_outcomes = [job_outputs[key] for key in outcome_names] if job_constraints: @@ -439,9 +422,7 @@ def evaluate_robust(jobs_collection, experiments, outcomes, problem): # TODO:: only retain levers job_experiment = experiments[logical].iloc[0] - job_constraints = _evaluate_constraints( - job_experiment, job_outcomes_dict, constraints - ) + job_constraints = _evaluate_constraints(job_experiment, job_outcomes_dict, constraints) if job_constraints: job.solution.problem.function = lambda _: (job_outcomes, job_constraints) @@ -511,9 +492,7 @@ def __init__(self, minimum, maximum): self.hypervolume_func = Hypervolume(minimum=minimum, maximum=maximum) def __call__(self, optimizer): - self.results.append( - self.hypervolume_func.calculate(optimizer.algorithm.archive) - ) + self.results.append(self.hypervolume_func.calculate(optimizer.algorithm.archive)) @classmethod def from_outcomes(cls, outcomes): @@ -537,9 +516,7 @@ class ArchiveLogger(AbstractConvergenceMetric): """ - def __init__( - self, directory, decision_varnames, outcome_varnames, base_filename="archive" - ): + def __init__(self, directory, decision_varnames, outcome_varnames, base_filename="archive"): super().__init__("archive_logger") self.directory = os.path.abspath(directory) self.base = base_filename @@ -574,21 +551,13 @@ class Convergence(ProgressTrackingMixIn): valid_metrics = {"hypervolume", "epsilon_progress", "archive_logger"} - def __init__( - self, - metrics, - max_nfe, - convergence_freq=1000, - logging_freq=5, - log_progress=False, - ): + def __init__(self, metrics, max_nfe, convergence_freq=1000, logging_freq=5, log_progress=False): super().__init__( max_nfe, logging_freq, _logger, log_progress=log_progress, - log_func=lambda self: f"generation" - f" {self.generation}, {self.i}/{self.max_nfe}", + log_func=lambda self: f"generation" f" {self.generation}, {self.i}/{self.max_nfe}", ) self.max_nfe = max_nfe @@ -607,9 +576,7 @@ def __init__( assert isinstance(metric, AbstractConvergenceMetric) metric.reset() - def __call__( - self, optimizer, - ): + def __call__(self, optimizer): nfe = optimizer.algorithm.nfe super().__call__(nfe - self.i) @@ -623,9 +590,7 @@ def __call__( metric(optimizer) def to_dataframe(self): - progress = { - metric.name: metric.results for metric in self.metrics if metric.results - } + progress = {metric.name: metric.results for metric in self.metrics if metric.results} progress = pd.DataFrame.from_dict(progress) @@ -729,9 +694,7 @@ def mutate_real(self, child, i, type, distribution_index=20): # @ReservedAssign delta = pow(b, 1.0 / (distribution_index + 1.0)) - 1.0 else: bu = (upper - x) / dx - b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow( - 1.0 - bu, distribution_index + 1.0 - ) + b = 2.0 * (1.0 - u) + 2.0 * (u - 0.5) * pow(1.0 - bu, distribution_index + 1.0) delta = 1.0 - pow(b, 1.0 / (distribution_index + 1.0)) x = x + delta * dx @@ -773,7 +736,11 @@ def mutate_categorical(self, child, i, type): # @ReservedAssignment Subset: crossover_categorical, } - _mutate = {Real: mutate_real, Integer: mutate_integer, Subset: mutate_categorical} + _mutate = { + Real: mutate_real, + Integer: mutate_integer, + Subset: mutate_categorical, + } class CombinedMutator(CombinedVariator): @@ -840,9 +807,7 @@ def _optimize( pass else: if len(eps_values) != len(problem.outcome_names): - raise EMAError( - "number of epsilon values does not match number " "of outcomes" - ) + raise EMAError("number of epsilon values does not match number " "of outcomes") if all(isinstance(t, klass) for t in problem.types): variator = None @@ -962,9 +927,7 @@ def __init__( PM(probability=self.pm_p, distribution_index=self.pm_dist), ), GAOperator( - DifferentialEvolution( - crossover_rate=self.de_rate, step_size=self.de_stepsize - ), + DifferentialEvolution(crossover_rate=self.de_rate, step_size=self.de_stepsize), PM(probability=self.pm_p, distribution_index=self.pm_dist), ), GAOperator( diff --git a/ema_workbench/em_framework/outcomes.py b/ema_workbench/em_framework/outcomes.py index 4c7e5b46c..82923d35a 100644 --- a/ema_workbench/em_framework/outcomes.py +++ b/ema_workbench/em_framework/outcomes.py @@ -50,9 +50,7 @@ def __call__(self, outcome): if outcome.name not in self.outcomes: self.outcomes[outcome.name] = outcome.__class__ elif not isinstance(outcome, self.outcomes[outcome.name]): - raise ValueError( - "outcome with this name but of different class " "already exists" - ) + raise ValueError("outcome with this name but of different class " "already exists") else: pass # multiple instances of the same class and name is fine @@ -73,9 +71,7 @@ def serialize(self, name, values): try: stream, extension = self.outcomes[name].to_disk(values) except KeyError: - _logger.warning( - "outcome not defined, falling back on " "ArrayOutcome.to_disk" - ) + _logger.warning("outcome not defined, falling back on " "ArrayOutcome.to_disk") stream, extension = ArrayOutcome.to_disk(values) return stream, f"{name}.{extension}" @@ -126,13 +122,7 @@ class AbstractOutcome(Variable): INFO = 0 def __init__( - self, - name, - kind=INFO, - variable_name=None, - function=None, - expected_range=None, - shape=None, + self, name, kind=INFO, variable_name=None, function=None, expected_range=None, shape=None ): super().__init__(name) @@ -152,9 +142,7 @@ def __init__( if variable_name: if isinstance(variable_name, str): - variable_name = [ - variable_name, - ] + variable_name = [variable_name] self.variable_name = tuple(variable_name) else: @@ -186,9 +174,7 @@ def process(self, values): return self.function(*values) else: if len(values) > 1: - raise EMAError( - "more than one value returned without " "processing function" - ) + raise EMAError("more than one value returned without " "processing function") return values[0] @@ -321,8 +307,7 @@ def process(self, values): values = super().process(values) if not isinstance(values, numbers.Number): raise EMAError( - f"outcome {self.name} should be a scalar, but is" - f" {type(values)}: {values}" + f"outcome {self.name} should be a scalar, but is" f" {type(values)}: {values}" ) return values @@ -389,9 +374,7 @@ class ArrayOutcome(AbstractOutcome): """ - def __init__( - self, name, variable_name=None, function=None, expected_range=None, shape=None - ): + def __init__(self, name, variable_name=None, function=None, expected_range=None, shape=None): super().__init__( name, variable_name=variable_name, @@ -480,9 +463,7 @@ class TimeSeriesOutcome(ArrayOutcome): """ - def __init__( - self, name, variable_name=None, function=None, expected_range=None, shape=None - ): + def __init__(self, name, variable_name=None, function=None, expected_range=None, shape=None): super().__init__( name, variable_name=variable_name, @@ -563,10 +544,7 @@ def __init__(self, name, parameter_names=None, outcome_names=None, function=None variable_names = parameter_names + outcome_names super().__init__( - name, - kind=AbstractOutcome.INFO, - variable_name=variable_names, - function=function, + name, kind=AbstractOutcome.INFO, variable_name=variable_names, function=function ) self.parameter_names = parameter_names diff --git a/ema_workbench/em_framework/parameters.py b/ema_workbench/em_framework/parameters.py index 2a6c0674f..f4d1b8dfb 100644 --- a/ema_workbench/em_framework/parameters.py +++ b/ema_workbench/em_framework/parameters.py @@ -124,9 +124,7 @@ def resolution(self): def resolution(self, value): if value: if (min(value) < self.lower_bound) or (max(value) > self.upper_bound): - raise ValueError( - "resolution not consistent with lower and " "upper bound" - ) + raise ValueError("resolution not consistent with lower and " "upper bound") self._resolution = value def __init__( @@ -158,9 +156,7 @@ def from_dist(cls, name, dist, **kwargs): **kwargs : valid keyword arguments for Parameter instance """ - assert isinstance( - dist, sp.stats._distn_infrastructure.rv_frozen - ) # @UndefinedVariable + assert isinstance(dist, sp.stats._distn_infrastructure.rv_frozen) # @UndefinedVariable self = cls.__new__(cls) self.dist = dist self.name = name @@ -246,9 +242,7 @@ def __init__( pff=pff, ) - self.dist = sp.stats.uniform( - lower_bound, upper_bound - lower_bound - ) # @UndefinedVariable + self.dist = sp.stats.uniform(lower_bound, upper_bound - lower_bound) # @UndefinedVariable @classmethod def from_dist(cls, name, dist, **kwargs): @@ -309,9 +303,7 @@ def __init__( self.lower_bound = int(lower_bound) self.upper_bound = int(upper_bound) - self.dist = sp.stats.randint( - self.lower_bound, self.upper_bound + 1 - ) # @UndefinedVariable + self.dist = sp.stats.randint(self.lower_bound, self.upper_bound + 1) # @UndefinedVariable try: for idx, entry in enumerate(self.resolution): @@ -355,13 +347,7 @@ def categories(self, values): self._categories.extend(values) def __init__( - self, - name, - categories, - default=None, - variable_name=None, - pff=False, - multivalue=False, + self, name, categories, default=None, variable_name=None, pff=False, multivalue=False ): lower_bound = 0 upper_bound = len(categories) - 1 @@ -434,9 +420,7 @@ def from_dist(self, name, dist): # TODO:: how to handle this # probably need to pass categories as list and zip # categories to integers implied by dist - raise NotImplementedError( - "custom distributions over categories " "not supported yet" - ) + raise NotImplementedError("custom distributions over categories " "not supported yet") class BooleanParameter(CategoricalParameter): @@ -454,11 +438,7 @@ class BooleanParameter(CategoricalParameter): def __init__(self, name, default=None, variable_name=None, pff=False): super().__init__( - name, - categories=[True, False], - default=default, - variable_name=variable_name, - pff=pff, + name, categories=[True, False], default=default, variable_name=variable_name, pff=pff ) diff --git a/ema_workbench/em_framework/salib_samplers.py b/ema_workbench/em_framework/salib_samplers.py index bfc036ed9..33d65dbaa 100644 --- a/ema_workbench/em_framework/salib_samplers.py +++ b/ema_workbench/em_framework/salib_samplers.py @@ -154,9 +154,7 @@ class MorrisSampler(SALibSampler): Stating this variable to be true causes the function to ignore gurobi. """ - def __init__( - self, num_levels=4, optimal_trajectories=None, local_optimization=True - ): + def __init__(self, num_levels=4, optimal_trajectories=None, local_optimization=True): super().__init__() self.num_levels = num_levels self.optimal_trajectories = optimal_trajectories @@ -164,11 +162,7 @@ def __init__( def sample(self, problem, size): return morris.sample( - problem, - size, - self.num_levels, - self.optimal_trajectories, - self.local_optimization, + problem, size, self.num_levels, self.optimal_trajectories, self.local_optimization ) diff --git a/ema_workbench/em_framework/samplers.py b/ema_workbench/em_framework/samplers.py index ef00680d2..80f71ae32 100644 --- a/ema_workbench/em_framework/samplers.py +++ b/ema_workbench/em_framework/samplers.py @@ -605,9 +605,7 @@ def from_experiments(models, experiments): # we sample ff over models and policies so we need to ensure # we only get the experiments for a single model policy combination - logical = (experiments["model"] == model_names[0]) & ( - experiments["policy"] == policy_names[0] - ) + logical = (experiments["model"] == model_names[0]) & (experiments["policy"] == policy_names[0]) experiments = experiments[logical] diff --git a/ema_workbench/em_framework/util.py b/ema_workbench/em_framework/util.py index fac4405e0..15a34e38c 100644 --- a/ema_workbench/em_framework/util.py +++ b/ema_workbench/em_framework/util.py @@ -198,9 +198,7 @@ def combine(*args): for entry in args[1::]: overlap = set(experiment.keys()).intersection(set(entry.keys())) if overlap: - raise EMAError( - f"parameters exist in {experiment} and {entry}, overlap is {overlap}" - ) + raise EMAError(f"parameters exist in {experiment} and {entry}, overlap is {overlap}") experiment.update(entry) return experiment diff --git a/ema_workbench/examples/boostedtrees_flu_example.py b/ema_workbench/examples/boostedtrees_flu_example.py index b8f503be7..073624d68 100644 --- a/ema_workbench/examples/boostedtrees_flu_example.py +++ b/ema_workbench/examples/boostedtrees_flu_example.py @@ -58,9 +58,7 @@ def plot_diag(x1, ax): nominal = minima + (maxima - minima) / 2 # fit the boosted tree -bdt = AdaBoostClassifier( - DecisionTreeClassifier(max_depth=3), algorithm="SAMME", n_estimators=200 -) +bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), algorithm="SAMME", n_estimators=200) bdt.fit(x, y) # determine which dimensions are most important diff --git a/ema_workbench/examples/eijgenraam_example.py b/ema_workbench/examples/eijgenraam_example.py index 28950fed9..f07d39c2e 100644 --- a/ema_workbench/examples/eijgenraam_example.py +++ b/ema_workbench/examples/eijgenraam_example.py @@ -14,13 +14,7 @@ import numpy as np import scipy as sp -from ema_workbench import ( - Model, - RealParameter, - ScalarOutcome, - ema_logging, - MultiprocessingEvaluator, -) +from ema_workbench import Model, RealParameter, ScalarOutcome, ema_logging, MultiprocessingEvaluator ##============================================================================== ## Implement the model described by Eijgenraam et al. (2012) @@ -32,77 +26,17 @@ raw_data = { 10: (16.6939, 0.6258, 0.0014, 0.033027, 0.320, 0.003774, 1564.9, 0.00044, 1 / 2000), 11: (42.6200, 1.7068, 0.0000, 0.032000, 0.320, 0.003469, 1700.1, 0.00117, 1 / 2000), - 15: ( - 125.6422, - 1.1268, - 0.0098, - 0.050200, - 0.760, - 0.003764, - 11810.4, - 0.00137, - 1 / 2000, - ), - 16: ( - 324.6287, - 2.1304, - 0.0100, - 0.057400, - 0.760, - 0.002032, - 22656.5, - 0.00110, - 1 / 2000, - ), - 22: ( - 154.4388, - 0.9325, - 0.0066, - 0.070000, - 0.620, - 0.002893, - 9641.1, - 0.00055, - 1 / 2000, - ), + 15: (125.6422, 1.1268, 0.0098, 0.050200, 0.760, 0.003764, 11810.4, 0.00137, 1 / 2000), + 16: (324.6287, 2.1304, 0.0100, 0.057400, 0.760, 0.002032, 22656.5, 0.00110, 1 / 2000), + 22: (154.4388, 0.9325, 0.0066, 0.070000, 0.620, 0.002893, 9641.1, 0.00055, 1 / 2000), 23: (26.4653, 0.5250, 0.0034, 0.053400, 0.800, 0.002031, 61.6, 0.00137, 1 / 2000), 24: (71.6923, 1.0750, 0.0059, 0.043900, 1.060, 0.003733, 2706.4, 0.00188, 1 / 2000), 35: (49.7384, 0.6888, 0.0088, 0.036000, 1.060, 0.004105, 4534.7, 0.00196, 1 / 2000), 38: (24.3404, 0.7000, 0.0040, 0.025321, 0.412, 0.004153, 3062.6, 0.00171, 1 / 1250), - 41: ( - 58.8110, - 0.9250, - 0.0033, - 0.025321, - 0.422, - 0.002749, - 10013.1, - 0.00171, - 1 / 1250, - ), + 41: (58.8110, 0.9250, 0.0033, 0.025321, 0.422, 0.002749, 10013.1, 0.00171, 1 / 1250), 42: (21.8254, 0.4625, 0.0019, 0.026194, 0.442, 0.001241, 1090.8, 0.00171, 1 / 1250), - 43: ( - 340.5081, - 4.2975, - 0.0043, - 0.025321, - 0.448, - 0.002043, - 19767.6, - 0.00171, - 1 / 1250, - ), - 44: ( - 24.0977, - 0.7300, - 0.0054, - 0.031651, - 0.316, - 0.003485, - 37596.3, - 0.00033, - 1 / 1250, - ), + 43: (340.5081, 4.2975, 0.0043, 0.025321, 0.448, 0.002043, 19767.6, 0.00171, 1 / 1250), + 44: (24.0977, 0.7300, 0.0054, 0.031651, 0.316, 0.003485, 37596.3, 0.00033, 1 / 1250), 45: (3.4375, 0.1375, 0.0069, 0.033027, 0.320, 0.002397, 10421.2, 0.00016, 1 / 1250), 47: (8.7813, 0.3513, 0.0026, 0.029000, 0.358, 0.003257, 1369.0, 0.00171, 1 / 1250), 48: (35.6250, 1.4250, 0.0063, 0.023019, 0.496, 0.003076, 7046.4, 0.00171, 1 / 1250), @@ -216,9 +150,7 @@ def eijgenraam_model( investment = 0 for i in range(len(Xs)): - step_cost = exponential_investment_cost( - Xs[i], 0 if i == 0 else sum(Xs[:i]), c, b, lam - ) + step_cost = exponential_investment_cost(Xs[i], 0 if i == 0 else sum(Xs[:i]), c, b, lam) step_discount = math.exp(-delta * Ts[i]) investment += step_cost * step_discount @@ -238,13 +170,7 @@ def eijgenraam_model( losses = losses * S0 / (beta - delta) # salvage term - losses += ( - S0 - * math.exp(beta * T) - * math.exp(-theta * sum(Xs)) - * math.exp(-delta * T) - / delta - ) + losses += S0 * math.exp(beta * T) * math.exp(-theta * sum(Xs)) * math.exp(-delta * T) / delta def find_height(t): if t < Ts[0]: @@ -255,23 +181,13 @@ def find_height(t): return sum(Xs[: bisect.bisect_right(Ts, t)]) failure_probability = [ - P0 * np.exp(alpha * eta * t) * np.exp(-alpha * find_height(t)) - for t in range(T + 1) + P0 * np.exp(alpha * eta * t) * np.exp(-alpha * find_height(t)) for t in range(T + 1) ] - total_failure = 1 - functools.reduce( - operator.mul, [1 - p for p in failure_probability], 1 - ) + total_failure = 1 - functools.reduce(operator.mul, [1 - p for p in failure_probability], 1) mean_failure = sum(failure_probability) / (T + 1) max_failure = max(failure_probability) - return ( - investment, - losses, - investment + losses, - total_failure, - mean_failure, - max_failure, - ) + return (investment, losses, investment + losses, total_failure, mean_failure, max_failure) if __name__ == "__main__": diff --git a/ema_workbench/examples/excel_example.py b/ema_workbench/examples/excel_example.py index dc863e06b..528c38aa9 100644 --- a/ema_workbench/examples/excel_example.py +++ b/ema_workbench/examples/excel_example.py @@ -10,12 +10,7 @@ .. codeauthor:: jhkwakkel """ -from ema_workbench import ( - RealParameter, - TimeSeriesOutcome, - ema_logging, - perform_experiments, -) +from ema_workbench import RealParameter, TimeSeriesOutcome, ema_logging, perform_experiments from ema_workbench.connectors.excel import ExcelModel from ema_workbench.em_framework.evaluators import MultiprocessingEvaluator @@ -23,9 +18,7 @@ if __name__ == "__main__": ema_logging.log_to_stderr(level=ema_logging.INFO) - model = ExcelModel( - "predatorPrey", wd="./models/excelModel", model_file="excel example.xlsx" - ) + model = ExcelModel("predatorPrey", wd="./models/excelModel", model_file="excel example.xlsx") model.uncertainties = [ RealParameter("K2", 0.01, 0.2), # we can refer to a cell in the normal way @@ -48,6 +41,4 @@ model.default_sheet = "Sheet1" with MultiprocessingEvaluator(model) as evaluator: - results = perform_experiments( - model, 100, reporting_interval=1, evaluator=evaluator - ) + results = perform_experiments(model, 100, reporting_interval=1, evaluator=evaluator) diff --git a/ema_workbench/examples/feature_scoring_flu_confidence.py b/ema_workbench/examples/feature_scoring_flu_confidence.py index 6a9094908..ea3a947d8 100644 --- a/ema_workbench/examples/feature_scoring_flu_confidence.py +++ b/ema_workbench/examples/feature_scoring_flu_confidence.py @@ -9,10 +9,7 @@ import seaborn as sns from ema_workbench import ema_logging, load_results -from ema_workbench.analysis.feature_scoring import ( - get_ex_feature_scores, - RuleInductionType, -) +from ema_workbench.analysis.feature_scoring import get_ex_feature_scores, RuleInductionType ema_logging.log_to_stderr(level=ema_logging.INFO) @@ -29,9 +26,7 @@ selected_x = x.iloc[indices, :] selected_y = y[indices] - scores = get_ex_feature_scores( - selected_x, selected_y, mode=RuleInductionType.REGRESSION - )[0] + scores = get_ex_feature_scores(selected_x, selected_y, mode=RuleInductionType.REGRESSION)[0] all_scores.append(scores) all_scores = pd.concat(all_scores, axis=1, sort=False) diff --git a/ema_workbench/examples/flu_example.py b/ema_workbench/examples/flu_example.py index 729ac31eb..7b0184dc4 100644 --- a/ema_workbench/examples/flu_example.py +++ b/ema_workbench/examples/flu_example.py @@ -16,13 +16,7 @@ from numpy import sin, min from scipy import exp -from ema_workbench import ( - Model, - RealParameter, - TimeSeriesOutcome, - perform_experiments, - ema_logging, -) +from ema_workbench import Model, RealParameter, TimeSeriesOutcome, perform_experiments, ema_logging from ema_workbench import MultiprocessingEvaluator from ema_workbench.analysis import lines, Density @@ -43,9 +37,7 @@ def LookupFunctionX(variable, start, end, step, skew, growth, v=0.5): - return start + ( - (end - start) / ((1 + skew * exp(-growth * (variable - step))) ** (1 / v)) - ) + return start + ((end - start) / ((1 + skew * exp(-growth * (variable - step))) ** (1 / v))) def flu_model( @@ -103,8 +95,8 @@ def flu_model( susceptible_to_immune_population_flow_region_2 = 0.0 ###### - initial_value_population_region_1 = 6.0 * 10 ** 8 - initial_value_population_region_2 = 3.0 * 10 ** 9 + initial_value_population_region_1 = 6.0 * 10**8 + initial_value_population_region_2 = 3.0 * 10**9 initial_value_infected_population_region_1 = 10.0 initial_value_infected_population_region_2 = 10.0 @@ -165,12 +157,8 @@ def flu_model( infected_population_region_1 = max(0, infected_population_region_1) infected_population_region_2 = max(0, infected_population_region_2) - infected_fraction_region_1 = ( - infected_population_region_1 / total_population_region_1 - ) - infected_fraction_region_2 = ( - infected_population_region_2 / total_population_region_2 - ) + infected_fraction_region_1 = infected_population_region_1 / total_population_region_1 + infected_fraction_region_2 = infected_population_region_2 / total_population_region_2 impact_infected_population_on_contact_rate_region_1 = 1 - ( infected_fraction_region_1 ** (1 / root_contact_rate_region_1) @@ -188,12 +176,10 @@ def flu_model( ) contact_rate_region_1 = ( - normal_contact_rate_region_1 - * impact_infected_population_on_contact_rate_region_1 + normal_contact_rate_region_1 * impact_infected_population_on_contact_rate_region_1 ) contact_rate_region_2 = ( - normal_contact_rate_region_2 - * impact_infected_population_on_contact_rate_region_2 + normal_contact_rate_region_2 * impact_infected_population_on_contact_rate_region_2 ) recoveries_region_1 = ( @@ -244,18 +230,13 @@ def flu_model( ) infected_population_region_1_NEXT = infected_population_region_1 + ( - TIME_STEP - * (infections_region_1 - flu_deaths_region_1 - recoveries_region_1) + TIME_STEP * (infections_region_1 - flu_deaths_region_1 - recoveries_region_1) ) infected_population_region_2_NEXT = infected_population_region_2 + ( - TIME_STEP - * (infections_region_2 - flu_deaths_region_2 - recoveries_region_2) + TIME_STEP * (infections_region_2 - flu_deaths_region_2 - recoveries_region_2) ) - if ( - infected_population_region_1_NEXT < 0 - or infected_population_region_2_NEXT < 0 - ): + if infected_population_region_1_NEXT < 0 or infected_population_region_2_NEXT < 0: pass recovered_population_region_1_NEXT = recovered_population_region_1 + ( @@ -353,8 +334,7 @@ def flu_model( / susceptible_to_immune_population_delay_time_region_1 ) susmaxreg1 = -( - immune_population_region_1 - / susceptible_to_immune_population_delay_time_region_1 + immune_population_region_1 / susceptible_to_immune_population_delay_time_region_1 ) if (susmaxreg1 >= susminreg1_1) or (susmaxreg1 >= susminreg1_2): susceptible_to_immune_population_flow_region_1 = susmaxreg1 @@ -374,8 +354,7 @@ def flu_model( / susceptible_to_immune_population_delay_time_region_2 ) susmaxreg2 = -( - immune_population_region_2 - / susceptible_to_immune_population_delay_time_region_2 + immune_population_region_2 / susceptible_to_immune_population_delay_time_region_2 ) if (susmaxreg2 >= susminreg2_1) or (susmaxreg2 >= susminreg2_2): susceptible_to_immune_population_flow_region_2 = susmaxreg2 @@ -387,12 +366,10 @@ def flu_model( susceptible_to_immune_population_flow_region_2 = 0 susceptible_population_region_1_NEXT = susceptible_population_region_1 - ( - TIME_STEP - * (infections_region_1 + susceptible_to_immune_population_flow_region_1) + TIME_STEP * (infections_region_1 + susceptible_to_immune_population_flow_region_1) ) susceptible_population_region_2_NEXT = susceptible_population_region_2 - ( - TIME_STEP - * (infections_region_2 + susceptible_to_immune_population_flow_region_2) + TIME_STEP * (infections_region_2 + susceptible_to_immune_population_flow_region_2) ) immune_population_region_1_NEXT = immune_population_region_1 + ( @@ -443,10 +420,7 @@ def flu_model( # End of main code - return { - "TIME": runTime, - "deceased_population_region_1": deceased_population_region_1, - } + return {"TIME": runTime, "deceased_population_region_1": deceased_population_region_1} if __name__ == "__main__": @@ -473,10 +447,7 @@ def flu_model( RealParameter("x102", 0, 200), ] - model.outcomes = [ - TimeSeriesOutcome("TIME"), - TimeSeriesOutcome("deceased_population_region_1"), - ] + model.outcomes = [TimeSeriesOutcome("TIME"), TimeSeriesOutcome("deceased_population_region_1")] nr_experiments = 500 diff --git a/ema_workbench/examples/flu_pairsplot.py b/ema_workbench/examples/flu_pairsplot.py index 33483b782..9e2ff92a7 100644 --- a/ema_workbench/examples/flu_pairsplot.py +++ b/ema_workbench/examples/flu_pairsplot.py @@ -7,11 +7,7 @@ import numpy as np from ema_workbench import load_results, ema_logging -from ema_workbench.analysis.pairs_plotting import ( - pairs_lines, - pairs_scatter, - pairs_density, -) +from ema_workbench.analysis.pairs_plotting import pairs_lines, pairs_scatter, pairs_density ema_logging.log_to_stderr(level=ema_logging.DEFAULT_LEVEL) diff --git a/ema_workbench/examples/flu_vensim_example.py b/ema_workbench/examples/flu_vensim_example.py index 3a6071b21..035fab3c1 100644 --- a/ema_workbench/examples/flu_vensim_example.py +++ b/ema_workbench/examples/flu_vensim_example.py @@ -23,18 +23,14 @@ if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) - model = VensimModel( - "fluCase", wd=r"./models/flu", model_file=r"FLUvensimV1basecase.vpm" - ) + model = VensimModel("fluCase", wd=r"./models/flu", model_file=r"FLUvensimV1basecase.vpm") # outcomes model.outcomes = [ TimeSeriesOutcome("deceased population region 1"), TimeSeriesOutcome("infected fraction R1"), ScalarOutcome( - "max infection fraction", - variable_name="infected fraction R1", - function=np.max, + "max infection fraction", variable_name="infected fraction R1", function=np.max ), ] diff --git a/ema_workbench/examples/flu_vensim_example_advanced.py b/ema_workbench/examples/flu_vensim_example_advanced.py index 91e60b78c..735d81892 100644 --- a/ema_workbench/examples/flu_vensim_example_advanced.py +++ b/ema_workbench/examples/flu_vensim_example_advanced.py @@ -31,23 +31,17 @@ def time_of_max(infected_fraction, time): if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) - model = VensimModel( - "fluCase", wd="./models/flu", model_file="FLUvensimV1basecase.vpm" - ) + model = VensimModel("fluCase", wd="./models/flu", model_file="FLUvensimV1basecase.vpm") # outcomes model.outcomes = [ TimeSeriesOutcome("deceased population region 1"), TimeSeriesOutcome("infected fraction R1"), ScalarOutcome( - "max infection fraction", - variable_name="infected fraction R1", - function=np.max, + "max infection fraction", variable_name="infected fraction R1", function=np.max ), ScalarOutcome( - "time of max", - variable_name=["infected fraction R1", "TIME"], - function=time_of_max, + "time of max", variable_name=["infected fraction R1", "TIME"], function=time_of_max ), ] diff --git a/ema_workbench/examples/flu_vensim_no_policy_example.py b/ema_workbench/examples/flu_vensim_no_policy_example.py index aa4498b76..ab9ad70d3 100644 --- a/ema_workbench/examples/flu_vensim_no_policy_example.py +++ b/ema_workbench/examples/flu_vensim_no_policy_example.py @@ -21,9 +21,7 @@ if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.INFO) - model = VensimModel( - "fluCase", wd="./models/flu", model_file="FLUvensimV1basecase.vpm" - ) + model = VensimModel("fluCase", wd="./models/flu", model_file="FLUvensimV1basecase.vpm") # outcomes model.outcomes = [ diff --git a/ema_workbench/examples/lake_model.py b/ema_workbench/examples/lake_model.py index 8a2bfed08..4f406c1d3 100644 --- a/ema_workbench/examples/lake_model.py +++ b/ema_workbench/examples/lake_model.py @@ -34,9 +34,9 @@ def lake_problem( try: decisions = [kwargs[str(i)] for i in range(100)] except KeyError: - decisions = [0,] * 100 + decisions = [0] * 100 - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) nvars = len(decisions) X = np.zeros((nvars,)) average_daily_P = np.zeros((nvars,)) @@ -47,8 +47,8 @@ def lake_problem( X[0] = 0.0 natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=nvars, ) @@ -87,9 +87,7 @@ def lake_problem( ] # set levers, one for each time step - lake_model.levers = [ - RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon) - ] + lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon)] # specify outcomes lake_model.outcomes = [ @@ -107,6 +105,4 @@ def lake_problem( n_policies = 4 with MultiprocessingEvaluator(lake_model) as evaluator: - res = evaluator.perform_experiments( - n_scenarios, n_policies, lever_sampling=Samplers.MC - ) + res = evaluator.perform_experiments(n_scenarios, n_policies, lever_sampling=Samplers.MC) diff --git a/ema_workbench/examples/lake_model_dps.py b/ema_workbench/examples/lake_model_dps.py index ce87ab079..08c02372a 100644 --- a/ema_workbench/examples/lake_model_dps.py +++ b/ema_workbench/examples/lake_model_dps.py @@ -76,10 +76,10 @@ def lake_problem( r2=0.5, w1=0.5, ): - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) - X = np.zeros((myears,)) - average_daily_P = np.zeros((myears,)) + X = np.zeros((myears)) + average_daily_P = np.zeros((myears)) reliability = 0.0 inertia = 0 utility = 0 @@ -88,12 +88,12 @@ def lake_problem( X[0] = 0.0 decision = 0.1 - decisions = np.zeros(myears,) + decisions = np.zeros(myears) decisions[0] = decision natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=myears, ) @@ -112,9 +112,7 @@ def lake_problem( reliability += np.sum(X < Pcrit) / (nsamples * myears) inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears) - utility += ( - np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples - ) + utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples max_P = np.max(average_daily_P) return max_P, utility, inertia, reliability @@ -166,6 +164,6 @@ def lake_problem( evaluator.optimize( searchover="levers", nfe=100000, - epsilons=[0.1,] * len(lake_model.outcomes), + epsilons=[0.1] * len(lake_model.outcomes), reference=reference, ) diff --git a/ema_workbench/examples/lake_model_dps_robust.py b/ema_workbench/examples/lake_model_dps_robust.py index bbf7e9542..5d29725c5 100644 --- a/ema_workbench/examples/lake_model_dps_robust.py +++ b/ema_workbench/examples/lake_model_dps_robust.py @@ -75,10 +75,10 @@ def lake_problem( r2=0.5, w1=0.5, ): - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) - X = np.zeros((myears,)) - average_daily_P = np.zeros((myears,)) + X = np.zeros((myears)) + average_daily_P = np.zeros((myears)) reliability = 0.0 inertia = 0 utility = 0 @@ -87,12 +87,12 @@ def lake_problem( X[0] = 0.0 decision = 0.1 - decisions = np.zeros(myears,) + decisions = np.zeros(myears) decisions[0] = decision natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=myears, ) @@ -111,9 +111,7 @@ def lake_problem( reliability += np.sum(X < Pcrit) / (nsamples * myears) inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears) - utility += ( - np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples - ) + utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples max_P = np.max(average_daily_P) return max_P, utility, inertia, reliability @@ -170,10 +168,7 @@ def signal_to_noise(data): ScalarOutcome("mean p", kind=MINIMIZE, variable_name="max_P", function=np.mean), ScalarOutcome("std p", kind=MINIMIZE, variable_name="max_P", function=np.std), ScalarOutcome( - "sn reliability", - kind=MAXIMIZE, - variable_name="reliability", - function=signal_to_noise, + "sn reliability", kind=MAXIMIZE, variable_name="reliability", function=signal_to_noise ), ] n_scenarios = 10 @@ -185,6 +180,6 @@ def signal_to_noise(data): robustnes_functions, scenarios, nfe=nfe, - epsilons=[0.1,] * len(robustnes_functions), + epsilons=[0.1] * len(robustnes_functions), population_size=5, ) diff --git a/ema_workbench/examples/lake_model_intertemporal.py b/ema_workbench/examples/lake_model_intertemporal.py index 22b7b8dd6..dd0a2538b 100644 --- a/ema_workbench/examples/lake_model_intertemporal.py +++ b/ema_workbench/examples/lake_model_intertemporal.py @@ -235,7 +235,7 @@ def lake_problem( ] ) - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) nvars = len(decisions) X = np.zeros((nvars,)) average_daily_P = np.zeros((nvars,)) @@ -246,8 +246,8 @@ def lake_problem( X[0] = 0.0 natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=nvars, ) @@ -286,9 +286,7 @@ def lake_problem( ] # set levers, one for each time step - lake_model.levers = [ - RealParameter(f"l{i}", 0, 0.1) for i in range(lake_model.time_horizon) - ] + lake_model.levers = [RealParameter(f"l{i}", 0, 0.1) for i in range(lake_model.time_horizon)] # specify outcomes # specify outcomes @@ -296,20 +294,13 @@ def lake_problem( ScalarOutcome("max_P", kind=ScalarOutcome.MINIMIZE, expected_range=(0, 5)), ScalarOutcome("utility", kind=ScalarOutcome.MAXIMIZE, expected_range=(0, 2)), ScalarOutcome("inertia", kind=ScalarOutcome.MAXIMIZE, expected_range=(0, 1)), - ScalarOutcome( - "reliability", kind=ScalarOutcome.MAXIMIZE, expected_range=(0, 1) - ), + ScalarOutcome("reliability", kind=ScalarOutcome.MAXIMIZE, expected_range=(0, 1)), ] - convergence_metrics = [ - HyperVolume.from_outcomes(lake_model.outcomes), - EpsilonProgress(), - ] + convergence_metrics = [HyperVolume.from_outcomes(lake_model.outcomes), EpsilonProgress()] constraints = [ - Constraint( - "max pollution", outcome_names="max_P", function=lambda x: max(0, x - 5) - ) + Constraint("max pollution", outcome_names="max_P", function=lambda x: max(0, x - 5)) ] with MultiprocessingEvaluator(lake_model) as evaluator: diff --git a/ema_workbench/examples/lake_model_sample_jointly.py b/ema_workbench/examples/lake_model_sample_jointly.py index 88c2dc9c9..f4a309ad4 100644 --- a/ema_workbench/examples/lake_model_sample_jointly.py +++ b/ema_workbench/examples/lake_model_sample_jointly.py @@ -71,7 +71,7 @@ def lake_problem( r2=0.5, w1=0.5, ): - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) X = np.zeros((myears,)) average_daily_P = np.zeros((myears,)) @@ -83,12 +83,12 @@ def lake_problem( X[0] = 0.0 decision = 0.1 - decisions = np.zeros(myears,) + decisions = np.zeros(myears) decisions[0] = decision natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=myears, ) @@ -107,9 +107,7 @@ def lake_problem( reliability += np.sum(X < Pcrit) / (nsamples * myears) inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears) - utility += ( - np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples - ) + utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples max_P = np.max(average_daily_P) return max_P, utility, inertia, reliability @@ -125,14 +123,10 @@ def analyze(results, ooi): problem = get_SALib_problem(parameters) y = outcomes[ooi] sobol_indices = sobol.analyze(problem, y) - sobol_stats = { - key: sobol_indices[key] for key in ["ST", "ST_conf", "S1", "S1_conf"] - } + sobol_stats = {key: sobol_indices[key] for key in ["ST", "ST_conf", "S1", "S1_conf"]} sobol_stats = pd.DataFrame(sobol_stats, index=problem["names"]) sobol_stats.sort_values(by="ST", ascending=False) - s2 = pd.DataFrame( - sobol_indices["S2"], index=problem["names"], columns=problem["names"] - ) + s2 = pd.DataFrame(sobol_indices["S2"], index=problem["names"], columns=problem["names"]) s2_conf = pd.DataFrame( sobol_indices["S2_conf"], index=problem["names"], columns=problem["names"] ) diff --git a/ema_workbench/examples/lake_model_sobol.py b/ema_workbench/examples/lake_model_sobol.py index bd7d81a4f..4afffa2af 100644 --- a/ema_workbench/examples/lake_model_sobol.py +++ b/ema_workbench/examples/lake_model_sobol.py @@ -38,9 +38,9 @@ def lake_problem( try: decisions = [kwargs[str(i)] for i in range(100)] except KeyError: - decisions = [0,] * 100 + decisions = [0] * 100 - Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5) + Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5) nvars = len(decisions) X = np.zeros((nvars,)) average_daily_P = np.zeros((nvars,)) @@ -51,8 +51,8 @@ def lake_problem( X[0] = 0.0 natural_inflows = np.random.lognormal( - math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)), - math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)), + math.log(mean**2 / math.sqrt(stdev**2 + mean**2)), + math.sqrt(math.log(1.0 + stdev**2 / mean**2)), size=nvars, ) @@ -82,14 +82,10 @@ def analyze(results, ooi): problem = get_SALib_problem(lake_model.uncertainties) y = outcomes[ooi] sobol_indices = sobol.analyze(problem, y) - sobol_stats = { - key: sobol_indices[key] for key in ["ST", "ST_conf", "S1", "S1_conf"] - } + sobol_stats = {key: sobol_indices[key] for key in ["ST", "ST_conf", "S1", "S1_conf"]} sobol_stats = pd.DataFrame(sobol_stats, index=problem["names"]) sobol_stats.sort_values(by="ST", ascending=False) - s2 = pd.DataFrame( - sobol_indices["S2"], index=problem["names"], columns=problem["names"] - ) + s2 = pd.DataFrame(sobol_indices["S2"], index=problem["names"], columns=problem["names"]) s2_conf = pd.DataFrame( sobol_indices["S2_conf"], index=problem["names"], columns=problem["names"] ) @@ -114,13 +110,11 @@ def analyze(results, ooi): ] # set levers, one for each time step - lake_model.levers = [ - RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon) - ] + lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon)] # specify outcomes lake_model.outcomes = [ - ScalarOutcome("max_P",), + ScalarOutcome("max_P"), ScalarOutcome("utility"), ScalarOutcome("inertia"), ScalarOutcome("reliability"), diff --git a/ema_workbench/examples/lookup_example.py b/ema_workbench/examples/lookup_example.py index 965f86ccd..6e2bb815f 100644 --- a/ema_workbench/examples/lookup_example.py +++ b/ema_workbench/examples/lookup_example.py @@ -38,14 +38,7 @@ def __init__(self, working_directory, name): ), LookupUncertainty( "hearne2", - [ - (-0.75, 0.75), - (-0.75, 0.75), - (0, 1.5), - (0.1, 1.6), - (-0.3, 1.5), - (0.25, 2.5), - ], + [(-0.75, 0.75), (-0.75, 0.75), (0, 1.5), (0.1, 1.6), (-0.3, 1.5), (0.25, 2.5)], "fractional change in expectations from perceived adequacy lookup", self, -1, diff --git a/ema_workbench/examples/netlogo_example.py b/ema_workbench/examples/netlogo_example.py index 99e7f1ebb..28cfdf8c3 100644 --- a/ema_workbench/examples/netlogo_example.py +++ b/ema_workbench/examples/netlogo_example.py @@ -28,9 +28,7 @@ ema_logging.log_to_stderr(ema_logging.INFO) model = NetLogoModel( - "predprey", - wd="./models/predatorPreyNetlogo", - model_file="Wolf Sheep Predation.nlogo", + "predprey", wd="./models/predatorPreyNetlogo", model_file="Wolf Sheep Predation.nlogo" ) model.run_length = 100 model.replications = 10 @@ -52,9 +50,7 @@ # perform experiments n = 10 - with MultiprocessingEvaluator( - model, n_processes=2, maxtasksperchild=4 - ) as evaluator: + with MultiprocessingEvaluator(model, n_processes=2, maxtasksperchild=4) as evaluator: results = evaluator.perform_experiments(n) print() diff --git a/ema_workbench/examples/plotting_flu_envelopes.py b/ema_workbench/examples/plotting_flu_envelopes.py index 15d48bb86..b8d612ece 100644 --- a/ema_workbench/examples/plotting_flu_envelopes.py +++ b/ema_workbench/examples/plotting_flu_envelopes.py @@ -14,9 +14,7 @@ experiments, outcomes = load_results(file_name) # the plotting functions return the figure and a dict of axes -fig, axes = envelopes( - experiments, outcomes, group_by="policy", density=Density.KDE, fill=True -) +fig, axes = envelopes(experiments, outcomes, group_by="policy", density=Density.KDE, fill=True) # we can access each of the axes and make changes for key, value in axes.items(): diff --git a/ema_workbench/examples/prim_PCA_flu_example.py b/ema_workbench/examples/prim_PCA_flu_example.py index 2fb5e9285..1982500fa 100644 --- a/ema_workbench/examples/prim_PCA_flu_example.py +++ b/ema_workbench/examples/prim_PCA_flu_example.py @@ -25,9 +25,7 @@ # specify y y = outcomes["deceased population region 1"][:, -1] > 1000000 -rotated_experiments, rotation_matrix = prim.pca_preprocess( - x, y, exclude=["model", "policy"] -) +rotated_experiments, rotation_matrix = prim.pca_preprocess(x, y, exclude=["model", "policy"]) # perform prim on modified results tuple prim_obj = prim.Prim(rotated_experiments, y, threshold=0.8) diff --git a/ema_workbench/examples/python_example.py b/ema_workbench/examples/python_example.py index 96917242d..810c69379 100644 --- a/ema_workbench/examples/python_example.py +++ b/ema_workbench/examples/python_example.py @@ -6,13 +6,7 @@ .. codeauthor:: jhkwakkel """ -from ema_workbench import ( - Model, - RealParameter, - ScalarOutcome, - ema_logging, - perform_experiments, -) +from ema_workbench import Model, RealParameter, ScalarOutcome, ema_logging, perform_experiments def some_model(x1=None, x2=None, x3=None): diff --git a/ema_workbench/examples/scarcity_example.py b/ema_workbench/examples/scarcity_example.py index 4839ab65c..2a326dd0f 100644 --- a/ema_workbench/examples/scarcity_example.py +++ b/ema_workbench/examples/scarcity_example.py @@ -57,9 +57,7 @@ def run_model(self, scenario, policy): scale = kwargs.pop("lookup approximated learning speed") speed = kwargs.pop("lookup approximated learning scale") start = kwargs.pop("lookup approximated learning start") - lookup = [ - self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10) - ] + lookup = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)] kwargs["approximated learning effect lookup"] = lookup super().run_model(kwargs, policy) @@ -68,9 +66,7 @@ def run_model(self, scenario, policy): if __name__ == "__main__": ema_logging.log_to_stderr(ema_logging.DEBUG) - model = ScarcityModel( - "scarcity", wd=r"./models/scarcity", model_file=r"\MetalsEMA.vpm" - ) + model = ScarcityModel("scarcity", wd=r"./models/scarcity", model_file=r"\MetalsEMA.vpm") model.outcomes = [ TimeSeriesOutcome("relative market price"), diff --git a/ema_workbench/examples/simio_example.py b/ema_workbench/examples/simio_example.py index 4805387bc..36268dcf3 100644 --- a/ema_workbench/examples/simio_example.py +++ b/ema_workbench/examples/simio_example.py @@ -3,12 +3,7 @@ @author: jhkwakkel """ -from ema_workbench import ( - ema_logging, - CategoricalParameter, - MultiprocessingEvaluator, - ScalarOutcome, -) +from ema_workbench import ema_logging, CategoricalParameter, MultiprocessingEvaluator, ScalarOutcome from ema_workbench.connectors.simio_connector import SimioModel @@ -16,17 +11,12 @@ ema_logging.log_to_stderr(ema_logging.INFO) model = SimioModel( - "simioDemo", - wd="./model_bahareh", - model_file="SupplyChainV3.spfx", - main_model="Model", + "simioDemo", wd="./model_bahareh", model_file="SupplyChainV3.spfx", main_model="Model" ) model.uncertainties = [ CategoricalParameter("DemandDistributionParameter", (20, 30, 40, 50, 60)), - CategoricalParameter( - "DemandInterarrivalTime", (0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2) - ), + CategoricalParameter("DemandInterarrivalTime", (0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2)), ] model.levers = [ @@ -36,10 +26,7 @@ CategoricalParameter("ReviewPeriod", (3, 4, 5, 6, 7)), ] - model.outcomes = [ - ScalarOutcome("AverageInventory"), - ScalarOutcome("AverageServiceLevel"), - ] + model.outcomes = [ScalarOutcome("AverageInventory"), ScalarOutcome("AverageServiceLevel")] n_scenarios = 10 n_policies = 2 diff --git a/ema_workbench/examples/teacup_example.py b/ema_workbench/examples/teacup_example.py index 97f64c8a9..bc3240836 100644 --- a/ema_workbench/examples/teacup_example.py +++ b/ema_workbench/examples/teacup_example.py @@ -7,12 +7,7 @@ # # .. codeauthor::jhkwakkel -from ema_workbench import ( - RealParameter, - TimeSeriesOutcome, - ema_logging, - perform_experiments, -) +from ema_workbench import RealParameter, TimeSeriesOutcome, ema_logging, perform_experiments from ema_workbench.connectors.pysd_connector import PysdModel diff --git a/ema_workbench/examples/vadere_demo.ipynb b/ema_workbench/examples/vadere_demo.ipynb index 6099b90f6..186c1cb4a 100644 --- a/ema_workbench/examples/vadere_demo.ipynb +++ b/ema_workbench/examples/vadere_demo.ipynb @@ -22,9 +22,16 @@ "metadata": {}, "outputs": [], "source": [ - "from ema_workbench import (perform_experiments, RealParameter, ema_logging,\n", - " MultiprocessingEvaluator, Samplers,\n", - " ScalarOutcome, IntegerParameter, RealParameter)\n", + "from ema_workbench import (\n", + " perform_experiments,\n", + " RealParameter,\n", + " ema_logging,\n", + " MultiprocessingEvaluator,\n", + " Samplers,\n", + " ScalarOutcome,\n", + " IntegerParameter,\n", + " RealParameter,\n", + ")\n", "from ema_workbench.connectors.vadere import VadereModel\n", "import pandas as pd\n", "import numpy as np" @@ -52,14 +59,13 @@ "# This model saves scalar results to a density.txt and speed.txt file.\n", "# Please acquire your own copy of Vadere, and place the vadere-console.jar in your model/scenarios directory\n", "# Note that the vadere model files and the console.jar should always be placed in a separate wd as the python runfile\n", - "model = VadereModel('demoModel', \n", - " vadere_jar='vadere-console.jar',\n", - " processor_files=[\n", - " 'evacuationTime.txt',\n", - " 'speed.csv'\n", - " ],\n", - " model_file='demo.scenario',\n", - " wd='models/vadereModel/scenarios/')" + "model = VadereModel(\n", + " \"demoModel\",\n", + " vadere_jar=\"vadere-console.jar\",\n", + " processor_files=[\"evacuationTime.txt\", \"speed.csv\"],\n", + " model_file=\"demo.scenario\",\n", + " wd=\"models/vadereModel/scenarios/\",\n", + ")" ] }, { @@ -92,29 +98,25 @@ "source": [ "model.uncertainties = [\n", " IntegerParameter(\n", - " name='spawnNumberA',\n", + " name=\"spawnNumberA\",\n", " lower_bound=1,\n", " upper_bound=50,\n", - " variable_name=[\n", - " '(\"scenario\", \"topography\", \"sources\", 0, \"spawnNumber\")',\n", - " ]\n", + " variable_name=['(\"scenario\", \"topography\", \"sources\", 0, \"spawnNumber\")'],\n", " ),\n", " IntegerParameter(\n", - " name='spawnNumberB',\n", + " name=\"spawnNumberB\",\n", " lower_bound=1,\n", " upper_bound=50,\n", - " variable_name=[\n", - " '(\"scenario\", \"topography\", \"sources\", 1, \"spawnNumber\")',\n", - " ]\n", + " variable_name=['(\"scenario\", \"topography\", \"sources\", 1, \"spawnNumber\")'],\n", " ),\n", " RealParameter(\n", - " name='μFreeFlowSpeed',\n", + " name=\"μFreeFlowSpeed\",\n", " lower_bound=0.7,\n", " upper_bound=1.5,\n", " variable_name=[\n", - " '(\"scenario\", \"topography\", \"attributesPedestrian\", \"speedDistributionMean\")',\n", - " ]\n", - " )\n", + " '(\"scenario\", \"topography\", \"attributesPedestrian\", \"speedDistributionMean\")'\n", + " ],\n", + " ),\n", "]" ] }, @@ -132,11 +134,7 @@ "outputs": [], "source": [ "model.outcomes = [\n", - " ScalarOutcome(\n", - " name='evacuationTime',\n", - " variable_name='meanEvacuationTime-PID8',\n", - " function=np.mean\n", - " )\n", + " ScalarOutcome(name=\"evacuationTime\", variable_name=\"meanEvacuationTime-PID8\", function=np.mean)\n", "]" ] }, @@ -187,11 +185,7 @@ ], "source": [ "# run in sequential 2 experiments\n", - "results_sequential = perform_experiments(\n", - " model,\n", - " scenarios=2,\n", - " uncertainty_sampling=Samplers.LHS\n", - ")" + "results_sequential = perform_experiments(model, scenarios=2, uncertainty_sampling=Samplers.LHS)" ] }, { @@ -218,10 +212,9 @@ "source": [ "# run 6 experiments in parallel\n", "with MultiprocessingEvaluator(model, n_processes=4) as evaluator:\n", - " experiments, outcomes = evaluator.perform_experiments(\n", - " scenarios=4,\n", - " uncertainty_sampling=Samplers.LHS\n", - ")" + " experiments, outcomes = evaluator.perform_experiments(\n", + " scenarios=4, uncertainty_sampling=Samplers.LHS\n", + " )" ] }, { @@ -420,14 +413,13 @@ "# This model saves scalar results to a density.txt and speed.txt file.\n", "# Please acquire your own copy of Vadere, and place the vadere-console.jar in your model/scenarios directory\n", "# Note that the vadere model files should always be placed in a separate wd as the python runfile\n", - "model = SingleReplicationVadereModel('demoModel', \n", - " vadere_jar='vadere-console.jar',\n", - " processor_files=[\n", - " 'evacuationTime.txt',\n", - " 'speed.csv'\n", - " ],\n", - " model_file='demo.scenario',\n", - " wd='models/vadereModel/scenarios/')" + "model = SingleReplicationVadereModel(\n", + " \"demoModel\",\n", + " vadere_jar=\"vadere-console.jar\",\n", + " processor_files=[\"evacuationTime.txt\", \"speed.csv\"],\n", + " model_file=\"demo.scenario\",\n", + " wd=\"models/vadereModel/scenarios/\",\n", + ")" ] }, { @@ -438,29 +430,25 @@ "source": [ "model.uncertainties = [\n", " IntegerParameter(\n", - " name='spawnNumberA',\n", + " name=\"spawnNumberA\",\n", " lower_bound=1,\n", " upper_bound=50,\n", - " variable_name=[\n", - " '(\"scenario\", \"topography\", \"sources\", 0, \"spawnNumber\")',\n", - " ]\n", + " variable_name=['(\"scenario\", \"topography\", \"sources\", 0, \"spawnNumber\")'],\n", " ),\n", " IntegerParameter(\n", - " name='spawnNumberB',\n", + " name=\"spawnNumberB\",\n", " lower_bound=1,\n", " upper_bound=50,\n", - " variable_name=[\n", - " '(\"scenario\", \"topography\", \"sources\", 1, \"spawnNumber\")',\n", - " ]\n", + " variable_name=['(\"scenario\", \"topography\", \"sources\", 1, \"spawnNumber\")'],\n", " ),\n", " RealParameter(\n", - " name='μFreeFlowSpeed',\n", + " name=\"μFreeFlowSpeed\",\n", " lower_bound=0.7,\n", " upper_bound=1.5,\n", " variable_name=[\n", - " '(\"scenario\", \"topography\", \"attributesPedestrian\", \"speedDistributionMean\")',\n", - " ]\n", - " )\n", + " '(\"scenario\", \"topography\", \"attributesPedestrian\", \"speedDistributionMean\")'\n", + " ],\n", + " ),\n", "]" ] }, @@ -470,12 +458,7 @@ "metadata": {}, "outputs": [], "source": [ - "model.outcomes = [\n", - " TimeSeriesOutcome(\n", - " name='speedTime',\n", - " variable_name='areaSpeed-PID5',\n", - " )\n", - "]" + "model.outcomes = [TimeSeriesOutcome(name=\"speedTime\", variable_name=\"areaSpeed-PID5\")]" ] }, { @@ -502,10 +485,9 @@ "source": [ "# run 4 experiments in parallel\n", "with MultiprocessingEvaluator(model, n_processes=4) as evaluator:\n", - " experiments, outcomes = evaluator.perform_experiments(\n", - " scenarios=4,\n", - " uncertainty_sampling=Samplers.LHS\n", - ")" + " experiments, outcomes = evaluator.perform_experiments(\n", + " scenarios=4, uncertainty_sampling=Samplers.LHS\n", + " )" ] }, { @@ -539,7 +521,8 @@ ], "source": [ "from ema_workbench.analysis.plotting import lines\n", - "lines(experiments, outcomes, outcomes_to_show='speedTime')" + "\n", + "lines(experiments, outcomes, outcomes_to_show=\"speedTime\")" ] } ], diff --git a/ema_workbench/examples/vensim_example.py b/ema_workbench/examples/vensim_example.py index d43975f27..176edd18d 100644 --- a/ema_workbench/examples/vensim_example.py +++ b/ema_workbench/examples/vensim_example.py @@ -8,12 +8,7 @@ .. codeauthor:: jhkwakkel chamarat """ -from ema_workbench import ( - TimeSeriesOutcome, - perform_experiments, - RealParameter, - ema_logging, -) +from ema_workbench import TimeSeriesOutcome, perform_experiments, RealParameter, ema_logging from ema_workbench.connectors.vensim import VensimModel @@ -24,10 +19,7 @@ # instantiate a model wd = "./models/vensim example" vensimModel = VensimModel("simpleModel", wd=wd, model_file="model.vpm") - vensimModel.uncertainties = [ - RealParameter("x11", 0, 2.5), - RealParameter("x12", -2.5, 2.5), - ] + vensimModel.uncertainties = [RealParameter("x11", 0, 2.5), RealParameter("x12", -2.5, 2.5)] vensimModel.outcomes = [TimeSeriesOutcome("a")] diff --git a/ema_workbench/util/ema_logging.py b/ema_workbench/util/ema_logging.py index 0a10aff84..c7ac80044 100644 --- a/ema_workbench/util/ema_logging.py +++ b/ema_workbench/util/ema_logging.py @@ -112,11 +112,11 @@ def temporary_filter(name=LOGGER_NAME, level=0, functname=None): # make a list equal lengths? if len(names) < max_length: - names = [name,] * max_length + names = [name] * max_length if len(levels) < max_length: - levels = [level,] * max_length + levels = [level] * max_length if len(functnames) < max_length: - functnames = [functname,] * max_length + functnames = [functname] * max_length filters = {} for name, level, functname in zip(names, levels, functnames): @@ -196,9 +196,7 @@ def log_to_stderr(level=None): # avoid creation of multiple stream handlers for logging to console for entry in logger.handlers: - if (isinstance(entry, logging.StreamHandler)) and ( - entry.formatter._fmt == LOG_FORMAT - ): + if (isinstance(entry, logging.StreamHandler)) and (entry.formatter._fmt == LOG_FORMAT): return logger formatter = logging.Formatter(LOG_FORMAT) diff --git a/ema_workbench/util/utilities.py b/ema_workbench/util/utilities.py index 7a9cdccd3..09e413bbc 100644 --- a/ema_workbench/util/utilities.py +++ b/ema_workbench/util/utilities.py @@ -210,9 +210,7 @@ def add_file(tararchive, stream, filename): with tarfile.open(file_name, "w:gz") as z: # store experiments stream = BytesIO() - stream.write( - experiments.to_csv(header=True, encoding="UTF-8", index=False).encode() - ) + stream.write(experiments.to_csv(header=True, encoding="UTF-8", index=False).encode()) add_file(z, stream, "experiments.csv") # store outcomes @@ -345,14 +343,10 @@ def process_replications(data, aggregation_func=np.mean): if isinstance(data, dict): # replications are the second dimension of the outcome arrays - outcomes_processed = { - key: aggregation_func(data[key], axis=1) for key in data.keys() - } + outcomes_processed = {key: aggregation_func(data[key], axis=1) for key in data.keys()} return outcomes_processed elif ( - isinstance(data, tuple) - and isinstance(data[0], pd.DataFrame) - and isinstance(data[1], dict) + isinstance(data, tuple) and isinstance(data[0], pd.DataFrame) and isinstance(data[1], dict) ): experiments, outcomes = data # split results outcomes_processed = { @@ -362,6 +356,4 @@ def process_replications(data, aggregation_func=np.mean): return results_processed else: - raise EMAError( - f"data should be a dict or tuple, but is a {type(data)}".format() - ) + raise EMAError(f"data should be a dict or tuple, but is a {type(data)}".format()) diff --git a/test/models/Sales_Agent_Market_Building_Dynamics.py b/test/models/Sales_Agent_Market_Building_Dynamics.py index 763f7225c..3ec674320 100644 --- a/test/models/Sales_Agent_Market_Building_Dynamics.py +++ b/test/models/Sales_Agent_Market_Building_Dynamics.py @@ -246,9 +246,7 @@ def effort_devoted_to_tier_2_leads(): """ return np.minimum( effort_remaining_after_servicing_existing_clients(), - effort_required_to_make_a_sale() - * tier_2_leads() - / minimum_time_to_make_a_sale(), + effort_required_to_make_a_sale() * tier_2_leads() / minimum_time_to_make_a_sale(), ) @@ -297,9 +295,7 @@ def effort_remaining_after_servicing_tier_2_leads(): activities are complete? """ return np.maximum( - effort_remaining_after_servicing_existing_clients() - - effort_devoted_to_tier_2_leads(), - 0, + effort_remaining_after_servicing_existing_clients() - effort_devoted_to_tier_2_leads(), 0 ) @@ -1061,23 +1057,17 @@ def time_step(): _integ_months_of_buffer = Integ(lambda: income() - expenses(), initial_buffer) -_integ_tier_2_clients = Integ( - lambda: tier_2_sales() - tier_2_client_turnover(), lambda: 0 -) +_integ_tier_2_clients = Integ(lambda: tier_2_sales() - tier_2_client_turnover(), lambda: 0) _integ_tier_2_leads = Integ( - lambda: tier_2_lead_aquisition() + tier_2_sales() - tier_2_leads_going_stale(), - lambda: 0, + lambda: tier_2_lead_aquisition() + tier_2_sales() - tier_2_leads_going_stale(), lambda: 0 ) _integ_tier_1_leads = Integ( - lambda: tier_1_lead_aquisition() + tier_1_sales() - tier_1_leads_going_stale(), - lambda: 100, + lambda: tier_1_lead_aquisition() + tier_1_sales() - tier_1_leads_going_stale(), lambda: 100 ) -_integ_tier_1_clients = Integ( - lambda: tier_1_sales() - tier_1_client_turnover(), lambda: 0 -) +_integ_tier_1_clients = Integ(lambda: tier_1_sales() - tier_1_client_turnover(), lambda: 0) diff --git a/test/models/Sales_Agent_Motivation_Dynamics.py b/test/models/Sales_Agent_Motivation_Dynamics.py index 57fe55c6f..db1970ac0 100644 --- a/test/models/Sales_Agent_Motivation_Dynamics.py +++ b/test/models/Sales_Agent_Motivation_Dynamics.py @@ -270,17 +270,7 @@ def impact_of_motivation_on_effort(x): return lookup( x, [0, 0.285132, 0.448065, 0.570265, 0.733198, 0.95723, 1.4664, 3.19756, 4.03259], - [ - 0, - 0.0616114, - 0.232228, - 0.492891, - 0.772512, - 0.862559, - 0.914692, - 0.952607, - 0.957346, - ], + [0, 0.0616114, 0.232228, 0.492891, 0.772512, 0.862559, 0.914692, 0.952607, 0.957346], ) diff --git a/test/test_analysis/test_b_and_w_plotting.py b/test/test_analysis/test_b_and_w_plotting.py index da22c4f61..5ae5e036c 100644 --- a/test/test_analysis/test_b_and_w_plotting.py +++ b/test/test_analysis/test_b_and_w_plotting.py @@ -6,11 +6,7 @@ import matplotlib.pyplot as plt import numpy as np -from ema_workbench.analysis.b_and_w_plotting import ( - set_fig_to_bw, - HATCHING, - GREYSCALE, -) +from ema_workbench.analysis.b_and_w_plotting import set_fig_to_bw, HATCHING, GREYSCALE def test_scatter(): diff --git a/test/test_analysis/test_cart.py b/test/test_analysis/test_cart.py index eb4589d41..256568b5a 100644 --- a/test/test_analysis/test_cart.py +++ b/test/test_analysis/test_cart.py @@ -57,9 +57,7 @@ def test_setup_cart(self): y[k] = v[:, -1] temp_results = (x, y) - alg = cart.setup_cart( - temp_results, "deceased population region 1", mass_min=0.05 - ) + alg = cart.setup_cart(temp_results, "deceased population region 1", mass_min=0.05) self.assertTrue(alg.mode == RuleInductionType.REGRESSION) n_cols = 5 @@ -165,9 +163,7 @@ def test_build_tree(self): y[k] = v temp_results = (x, y) - alg = cart.setup_cart( - temp_results, "deceased population region 1", mass_min=0.05 - ) + alg = cart.setup_cart(temp_results, "deceased population region 1", mass_min=0.05) alg.build_tree() self.assertTrue(isinstance(alg.clf, cart.tree.DecisionTreeRegressor)) diff --git a/test/test_analysis/test_dimensional_stacking.py b/test/test_analysis/test_dimensional_stacking.py index e98f74895..b6c5cf388 100644 --- a/test/test_analysis/test_dimensional_stacking.py +++ b/test/test_analysis/test_dimensional_stacking.py @@ -15,7 +15,7 @@ class DimStackTestCase(unittest.TestCase): def test_discretize(self): - float = np.random.rand(100,) # @ReservedAssignment + float = np.random.rand(100) # @ReservedAssignment integer = np.random.randint(0, 5, size=(100,)) categorical = [str(i) for i in np.random.randint(0, 3, size=(100,))] data = {"float": float, "integer": integer, "categorical": categorical} diff --git a/test/test_analysis/test_feature_scoring.py b/test/test_analysis/test_feature_scoring.py index d9fe6444a..e819d0796 100644 --- a/test/test_analysis/test_feature_scoring.py +++ b/test/test_analysis/test_feature_scoring.py @@ -9,11 +9,7 @@ import pandas as pd from sklearn.ensemble import RandomForestClassifier -from sklearn.ensemble import ( - RandomForestRegressor, - ExtraTreesClassifier, - ExtraTreesRegressor, -) +from sklearn.ensemble import RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor from ema_workbench.analysis import feature_scoring as fs from ema_workbench.analysis.feature_scoring import F_CLASSIFICATION, CHI2, F_REGRESSION @@ -27,9 +23,7 @@ class FeatureScoringTestCase(unittest.TestCase): def test_prepare_experiments(self): - x = pd.DataFrame( - [(0, 1, 2, 1), (2, 5, 6, 1), (3, 2, 1, 1)], columns=["a", "b", "c", "d"] - ) + x = pd.DataFrame([(0, 1, 2, 1), (2, 5, 6, 1), (3, 2, 1, 1)], columns=["a", "b", "c", "d"]) x, _ = fs._prepare_experiments(x) correct = np.array([[0, 1, 2, 1], [2, 5, 6, 1], [3, 2, 1, 1]], dtype=float) @@ -42,7 +36,7 @@ def test_prepare_experiments(self): { "a": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], "b": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], - "c": ["a", "b", "a", "b", "a", "a", "b", "a", "b", "a",], + "c": ["a", "b", "a", "b", "a", "a", "b", "a", "b", "a"], } ) x, _ = fs._prepare_experiments(x) @@ -151,9 +145,7 @@ def classify(data): self.assertEqual(len(scores), len(x.columns) - 3) self.assertTrue(isinstance(forest, RandomForestClassifier)) - self.assertRaises( - ValueError, fs.get_rf_feature_scores, x, y, mode="illegal argument" - ) + self.assertRaises(ValueError, fs.get_rf_feature_scores, x, y, mode="illegal argument") y = outcomes["deceased population region 1"][:, -1] scores, forest = fs.get_rf_feature_scores( @@ -174,9 +166,7 @@ def test_get_ex_feature_scores(self): self.assertEqual(len(scores), len(x.columns) - 3) self.assertTrue(isinstance(forest, ExtraTreesClassifier)) - self.assertRaises( - ValueError, fs.get_ex_feature_scores, x, y, mode="illegal argument" - ) + self.assertRaises(ValueError, fs.get_ex_feature_scores, x, y, mode="illegal argument") y = outcomes["deceased population region 1"][:, -1] scores, forest = fs.get_ex_feature_scores( diff --git a/test/test_analysis/test_logistic_regression.py b/test/test_analysis/test_logistic_regression.py index 9d20ea25b..272263cc6 100644 --- a/test/test_analysis/test_logistic_regression.py +++ b/test/test_analysis/test_logistic_regression.py @@ -27,9 +27,7 @@ def test_logit(self): logitmodel = lr.Logit(experiments, y) columns = set( - experiments.drop( - ["scenario", "policy", "model"], axis=1 - ).columns.values.tolist() + experiments.drop(["scenario", "policy", "model"], axis=1).columns.values.tolist() ) # check init diff --git a/test/test_analysis/test_pairs_plotting.py b/test/test_analysis/test_pairs_plotting.py index de924ceae..a18ebfe56 100644 --- a/test/test_analysis/test_pairs_plotting.py +++ b/test/test_analysis/test_pairs_plotting.py @@ -5,11 +5,7 @@ """ import matplotlib.pyplot as plt -from ema_workbench.analysis.pairs_plotting import ( - pairs_density, - pairs_lines, - pairs_scatter, -) +from ema_workbench.analysis.pairs_plotting import pairs_density, pairs_lines, pairs_scatter from test import utilities @@ -30,9 +26,7 @@ def test_pairs_density(): pairs_density(experiments, outcomes, colormap="binary") plt.draw() - pairs_density( - experiments, outcomes, group_by="policy", grouping_specifiers=["no policy"] - ) + pairs_density(experiments, outcomes, group_by="policy", grouping_specifiers=["no policy"]) plt.draw() plt.close("all") @@ -43,11 +37,7 @@ def test_pairs_scatter(): pairs_scatter(experiments, outcomes) pairs_scatter( - experiments, - outcomes, - group_by="policy", - grouping_specifiers="basic policy", - legend=False, + experiments, outcomes, group_by="policy", grouping_specifiers="basic policy", legend=False ) pairs_scatter( diff --git a/test/test_analysis/test_parcoords.py b/test/test_analysis/test_parcoords.py index a638aeba1..10a617fff 100644 --- a/test/test_analysis/test_parcoords.py +++ b/test/test_analysis/test_parcoords.py @@ -13,45 +13,31 @@ class TestParcoords(unittest.TestCase): def test_parallelaxis(self): - x = pd.DataFrame( - [[0.1, 0, {"a", "b"}], [1.0, 9, {"a", "b"}]], columns=["a", "b", "c"], - ) + x = pd.DataFrame([[0.1, 0, {"a", "b"}], [1.0, 9, {"a", "b"}]], columns=["a", "b", "c"]) axes = ParallelAxes(x) self.assertEqual(2, len(axes.axes)) def test_invert_axis(self): - x = pd.DataFrame( - [[0.1, 0, {"a", "b"}], [1.0, 9, {"a", "b"}]], columns=["a", "b", "c"], - ) + x = pd.DataFrame([[0.1, 0, {"a", "b"}], [1.0, 9, {"a", "b"}]], columns=["a", "b", "c"]) axes = ParallelAxes(x) axes.invert_axis("a") - self.assertEqual( - axes.flipped_axes, {"a"}, - ) + self.assertEqual(axes.flipped_axes, {"a"}) axes.invert_axis("a") - self.assertEqual( - axes.flipped_axes, set(), - ) + self.assertEqual(axes.flipped_axes, set()) axes.invert_axis("c") - self.assertEqual( - axes.flipped_axes, {"c"}, - ) + self.assertEqual(axes.flipped_axes, {"c"}) axes.invert_axis("c") - self.assertEqual( - axes.flipped_axes, set(), - ) + self.assertEqual(axes.flipped_axes, set()) axes.invert_axis(["a", "b"]) - self.assertEqual( - axes.flipped_axes, {"a", "b"}, - ) + self.assertEqual(axes.flipped_axes, {"a", "b"}) def test_plot(self): x = pd.DataFrame( diff --git a/test/test_analysis/test_plotting.py b/test/test_analysis/test_plotting.py index 3f15f0db5..e24a91797 100644 --- a/test/test_analysis/test_plotting.py +++ b/test/test_analysis/test_plotting.py @@ -89,9 +89,7 @@ def test_group_results(self): # test integer type array = experiments["seed PR T1"] - grouping_specifiers = make_continuous_grouping_specifiers( - array, nr_of_groups=10 - ) + grouping_specifiers = make_continuous_grouping_specifiers(array, nr_of_groups=10) groups = group_results( experiments, outcomes, @@ -271,20 +269,14 @@ def test_lines(self): set_fig_to_bw(lines(experiments, new_outcomes, density=Density.VIOLIN)[0]) # grouping and density - set_fig_to_bw( - lines(experiments, new_outcomes, group_by="policy", density=Density.KDE)[0] - ) + set_fig_to_bw(lines(experiments, new_outcomes, group_by="policy", density=Density.KDE)[0]) # grouping, density as histograms # grouping and density set_fig_to_bw( - lines( - experiments, - new_outcomes, - group_by="policy", - density=Density.HIST, - legend=False, - )[0] + lines(experiments, new_outcomes, group_by="policy", density=Density.HIST, legend=False)[ + 0 + ] ) plt.draw() @@ -300,10 +292,7 @@ def test_envelopes(self): envelopes(experiments, outcomes, density=None, titles=None) envelopes(experiments, outcomes, density=None, titles={}) envelopes( - experiments, - outcomes, - density=None, - titles={"total fraction new technologies": "a"}, + experiments, outcomes, density=None, titles={"total fraction new technologies": "a"} ) plt.draw() @@ -313,10 +302,7 @@ def test_envelopes(self): envelopes(experiments, outcomes, density=None, ylabels=None) envelopes(experiments, outcomes, density=None, ylabels={}) envelopes( - experiments, - outcomes, - density=None, - ylabels={"total fraction new technologies": "a"}, + experiments, outcomes, density=None, ylabels={"total fraction new technologies": "a"} ) plt.draw() @@ -375,25 +361,11 @@ def test_envelopes(self): plt.draw() plt.close("all") - envelopes( - experiments, outcomes, group_by="policy", density=Density.VIOLIN, log=True - ) - envelopes( - experiments, outcomes, group_by="policy", density=Density.BOXPLOT, log=True - ) - envelopes( - experiments, outcomes, group_by="policy", density=Density.KDE, log=True - ) - envelopes( - experiments, outcomes, group_by="policy", density=Density.HIST, log=True - ) - envelopes( - experiments, - outcomes, - group_by="policy", - density=Density.BOXENPLOT, - log=True, - ) + envelopes(experiments, outcomes, group_by="policy", density=Density.VIOLIN, log=True) + envelopes(experiments, outcomes, group_by="policy", density=Density.BOXPLOT, log=True) + envelopes(experiments, outcomes, group_by="policy", density=Density.KDE, log=True) + envelopes(experiments, outcomes, group_by="policy", density=Density.HIST, log=True) + envelopes(experiments, outcomes, group_by="policy", density=Density.BOXENPLOT, log=True) plt.draw() plt.close("all") @@ -402,18 +374,12 @@ def test_envelopes(self): envelopes(experiments, outcomes, group_by="policy", density=Density.HIST) envelopes(experiments, outcomes, group_by="policy", density=Density.HIST) - set_fig_to_bw( - envelopes(experiments, outcomes, group_by="policy", density=Density.KDE)[0] - ) + set_fig_to_bw(envelopes(experiments, outcomes, group_by="policy", density=Density.KDE)[0]) # grouping and density - envelopes( - experiments, outcomes, group_by="policy", density=Density.KDE, fill=True - ) + envelopes(experiments, outcomes, group_by="policy", density=Density.KDE, fill=True) set_fig_to_bw( - envelopes( - experiments, outcomes, group_by="policy", density=Density.KDE, fill=True - )[0] + envelopes(experiments, outcomes, group_by="policy", density=Density.KDE, fill=True)[0] ) plt.draw() @@ -437,15 +403,9 @@ def test_multiple_densities(self): experiments, outcomes = utilities.load_eng_trans_data() ooi = "total fraction new technologies" + multiple_densities(experiments, outcomes, group_by="policy", points_in_time=[2010]) multiple_densities( - experiments, outcomes, group_by="policy", points_in_time=[2010] - ) - multiple_densities( - experiments, - outcomes, - outcomes_to_show=ooi, - group_by="policy", - points_in_time=[2010], + experiments, outcomes, outcomes_to_show=ooi, group_by="policy", points_in_time=[2010] ) multiple_densities( experiments, diff --git a/test/test_analysis/test_prim.py b/test/test_analysis/test_prim.py index ed507097d..93b3fdaa8 100644 --- a/test/test_analysis/test_prim.py +++ b/test/test_analysis/test_prim.py @@ -171,10 +171,7 @@ def test_setup_prim(self): results = experiments, outcomes threshold = 10000 prim_obj = prim.setup_prim( - results, - classify="death toll", - threshold_type=prim.ABOVE, - threshold=threshold, + results, classify="death toll", threshold_type=prim.ABOVE, threshold=threshold ) value = np.ones((experiments.shape[0],)) @@ -185,10 +182,7 @@ def test_setup_prim(self): # for results equal to or lower than the threshold threshold = 1000 prim_obj = prim.setup_prim( - results, - classify="death toll", - threshold_type=prim.BELOW, - threshold=threshold, + results, classify="death toll", threshold_type=prim.BELOW, threshold=threshold ) value = np.ones((experiments.shape[0],)) @@ -208,9 +202,7 @@ def test_boxes(self): self.assertEqual(len(boxes), 1, "box length not correct") # real data test case - prim_obj = prim.setup_prim( - utilities.load_flu_data(), flu_classify, threshold=0.8 - ) + prim_obj = prim.setup_prim(utilities.load_flu_data(), flu_classify, threshold=0.8) prim_obj.find_box() boxes = prim_obj.boxes self.assertEqual(len(boxes), 1, "box length not correct") @@ -244,10 +236,7 @@ def test_prim_init_select(self): # for results equal to or lower than the threshold threshold = 1000 prim_obj = prim.setup_prim( - results, - classify="death toll", - threshold_type=prim.BELOW, - threshold=threshold, + results, classify="death toll", threshold_type=prim.BELOW, threshold=threshold ) value = np.ones((experiments.shape[0],)) @@ -326,12 +315,7 @@ def test_prim_exceptions(self): y = outcomes["deceased population region 1"] self.assertRaises( - prim.PrimException, - prim.Prim, - x, - y, - threshold=0.8, - mode=RuleInductionType.REGRESSION, + prim.PrimException, prim.Prim, x, y, threshold=0.8, mode=RuleInductionType.REGRESSION ) def test_find_box(self): @@ -348,15 +332,11 @@ def test_find_box(self): box_2 = prim_obj.find_box() prim_obj._update_yi_remaining(prim_obj) - after_find = ( - box_1.yi.shape[0] + box_2.yi.shape[0] + prim_obj.yi_remaining.shape[0] - ) + after_find = box_1.yi.shape[0] + box_2.yi.shape[0] + prim_obj.yi_remaining.shape[0] self.assertEqual(after_find, prim_obj.y.shape[0]) def test_discrete_peel(self): - x = pd.DataFrame( - np.random.randint(0, 10, size=(100,), dtype=int), columns=["a"] - ) + x = pd.DataFrame(np.random.randint(0, 10, size=(100,), dtype=int), columns=["a"]) y = np.zeros(100) y[x.a > 5] = 1 @@ -413,12 +393,7 @@ def test_discrete_peel(self): def test_categorical_peel(self): x = pd.DataFrame( - list( - zip( - np.random.rand(10,), - ["a", "b", "a", "b", "a", "a", "b", "a", "b", "a",], - ) - ), + list(zip(np.random.rand(10), ["a", "b", "a", "b", "a", "a", "b", "a", "b", "a"])), columns=["a", "b"], ) @@ -447,8 +422,7 @@ def test_categorical_peel(self): a = ("a",) b = ("b",) x = pd.DataFrame( - list(zip(np.random.rand(10,), [a, b, a, b, a, a, b, a, b, a])), - columns=["a", "b"], + list(zip(np.random.rand(10), [a, b, a, b, a, a, b, a, b, a])), columns=["a", "b"] ) y = np.random.randint(0, 2, (10,)) @@ -474,19 +448,8 @@ def test_categorical_peel(self): self.assertEqual(len(pl[1]), 1) def test_categorical_paste(self): - a = np.random.rand(10,) - b = [ - "a", - "b", - "a", - "b", - "a", - "a", - "b", - "a", - "b", - "a", - ] + a = np.random.rand(10) + b = ["a", "b", "a", "b", "a", "a", "b", "a", "b", "a"] x = pd.DataFrame(list(zip(a, b)), columns=["a", "b"]) x["b"] = x["b"].astype("category") @@ -497,7 +460,7 @@ def test_categorical_paste(self): classify = "y" prim_obj = prim.setup_prim(results, classify, threshold=0.8) - box_lims = pd.DataFrame([(0, {"a",},), (1, {"a",},),], columns=x.columns,) + box_lims = pd.DataFrame([(0, {"a"}), (1, {"a"})], columns=x.columns) yi = np.where(x.loc[:, "b"] == "a") diff --git a/test/test_analysis/test_regional_sa.py b/test/test_analysis/test_regional_sa.py index 0b776cce2..b95bec5b8 100644 --- a/test/test_analysis/test_regional_sa.py +++ b/test/test_analysis/test_regional_sa.py @@ -44,14 +44,7 @@ def test_plot__individual_cdf(self): unc = "model" regional_sa.plot_individual_cdf( - ax, - unc, - x[unc], - y, - discrete=True, - legend=True, - xticklabels_on=True, - yticklabels_on=True, + ax, unc, x[unc], y, discrete=True, legend=True, xticklabels_on=True, yticklabels_on=True ) diff --git a/test/test_analysis/test_scenario_discovery_util.py b/test/test_analysis/test_scenario_discovery_util.py index 101bc433c..0d9e60bd8 100644 --- a/test/test_analysis/test_scenario_discovery_util.py +++ b/test/test_analysis/test_scenario_discovery_util.py @@ -35,18 +35,7 @@ def test_in_box(self): self.assertTrue(np.all(correct_result == result.values)) x = pd.DataFrame( - [ - (0, 0), - (1, 1), - (2, 2), - (3, 3), - (4, 4), - (5, 5), - (6, 6), - (7, 7), - (8, 8), - (9, 9), - ], + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9)], columns=["a", "b"], ) boxlim = pd.DataFrame([(1, 0), (8, 7)], columns=["a", "b"]) @@ -70,9 +59,7 @@ def test_in_box(self): ], columns=["a", "b", "c"], ) - boxlim = pd.DataFrame( - [(1.2, 0, {"a", "b"}), (8.0, 7, {"a", "b"})], columns=["a", "b", "c"], - ) + boxlim = pd.DataFrame([(1.2, 0, {"a", "b"}), (8.0, 7, {"a", "b"})], columns=["a", "b", "c"]) x["c"] = x["c"].astype("category") correct_result = x.loc[[2, 3], :] @@ -81,7 +68,7 @@ def test_in_box(self): self.assertTrue(np.all(correct_result == result)) boxlim = pd.DataFrame( - [(0.1, 0, {"a", "b", "c", "d", "e"}), (9.1, 9, {"a", "b", "c", "d", "e"}),], + [(0.1, 0, {"a", "b", "c", "d", "e"}), (9.1, 9, {"a", "b", "c", "d", "e"})], columns=["a", "b", "c"], ) correct_result = x.loc[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], :] @@ -115,12 +102,8 @@ def test_normalize(self): for i, lims in enumerate([(0, 2 / 3), (0, 1), (0, 0.2)]): lower, upper = lims - self.assertAlmostEqual( - normalized[i, 0], lower, msg="lower unequal for " + uncs[i] - ) - self.assertAlmostEqual( - normalized[i, 1], upper, msg="upper unequal for " + uncs[i] - ) + self.assertAlmostEqual(normalized[i, 0], lower, msg="lower unequal for " + uncs[i]) + self.assertAlmostEqual(normalized[i, 1], upper, msg="upper unequal for " + uncs[i]) def test_determine_restricted_dims(self): x = np.random.rand(5, 2) @@ -197,7 +180,7 @@ def test_plot_box(self): box_init = sdutil._make_box(x) boxlim = box_init.copy() boxlim.a = [0.5, 1.0] - boxlim.c = [set("b",),] * 2 + boxlim.c = [set("b")] * 2 restricted_dims = ["a", "c"] qp_values = {"a": [0.05, 0.9], "c": [0.05, -1]} @@ -232,7 +215,7 @@ def test_plot_pairwise_scatter(self): box_init = sdutil._make_box(x) boxlim = box_init.copy() boxlim.a = [0.5, 1.0] - boxlim.c = [set("b",),] * 2 + boxlim.c = [set("b")] * 2 restricted_dims = ["a", "c"] sdutil.plot_pair_wise_scatter(x, y, boxlim, box_init, restricted_dims) @@ -267,11 +250,11 @@ def test_plot_boxes(self): box_init = sdutil._make_box(x) boxlim1 = box_init.copy() boxlim1.a = [0.5, 1] - boxlim1.c = [set("b",),] * 2 + boxlim1.c = [set("b")] * 2 boxlim2 = box_init.copy() boxlim2.a = [0.1, 0.5] - boxlim2.c = [set("a",),] * 2 + boxlim2.c = [set("a")] * 2 sdutil.plot_boxes(x, [boxlim1, boxlim2], together=True) sdutil.plot_boxes(x, [boxlim1, boxlim2], together=False) @@ -306,11 +289,11 @@ def test_OutputFormatterMixin(self): box_init = sdutil._make_box(x) boxlim1 = box_init.copy() boxlim1.a = [0.5, 1] - boxlim1.c = [set("b",),] * 2 + boxlim1.c = [set("b")] * 2 boxlim2 = box_init.copy() boxlim2.a = [0.1, 0.5] - boxlim2.c = [set("a",),] * 2 + boxlim2.c = [set("a")] * 2 with self.assertRaises(AttributeError): @@ -346,8 +329,7 @@ class TestFormatter(sdutil.OutputFormatterMixin): [[{"b"}, {"b"}, {"a"}, {"a"}], [0.5, 1, 0.1, 0.5]], index=["c", "a"], columns=pd.MultiIndex( - levels=[["box 1", "box 2"], ["max", "min"]], - codes=[[0, 0, 1, 1], [1, 0, 1, 0]], + levels=[["box 1", "box 2"], ["max", "min"]], codes=[[0, 0, 1, 1], [1, 0, 1, 0]] ), ) self.assertTrue(expected_boxes.equals(boxes)) @@ -355,9 +337,7 @@ class TestFormatter(sdutil.OutputFormatterMixin): # check stats stats = formatter.stats_to_dataframe() expected_stats = pd.DataFrame( - [[0.5, 1], [0.5, 1]], - index=["box 1", "box 2"], - columns=["coverage", "density"], + [[0.5, 1], [0.5, 1]], index=["box 1", "box 2"], columns=["coverage", "density"] ) self.assertTrue(expected_stats.equals(stats)) diff --git a/test/test_connectors/test_netlogo.py b/test/test_connectors/test_netlogo.py index 57e9d8ba9..81ab7fca4 100644 --- a/test/test_connectors/test_netlogo.py +++ b/test/test_connectors/test_netlogo.py @@ -10,11 +10,7 @@ # should be made conditional on the presence of jpype __test__ = False -from ema_workbench.em_framework import ( - RealParameter, - CategoricalParameter, - TimeSeriesOutcome, -) +from ema_workbench.em_framework import RealParameter, CategoricalParameter, TimeSeriesOutcome from ema_workbench.em_framework.parameters import Policy from ema_workbench.connectors import netlogo diff --git a/test/test_connectors/test_pysd_interface.py b/test/test_connectors/test_pysd_interface.py index eafddb18b..35da55ef6 100644 --- a/test/test_connectors/test_pysd_interface.py +++ b/test/test_connectors/test_pysd_interface.py @@ -4,16 +4,9 @@ import os import unittest -from ema_workbench.em_framework import ( - perform_experiments, - RealParameter, - TimeSeriesOutcome, -) +from ema_workbench.em_framework import perform_experiments, RealParameter, TimeSeriesOutcome from ema_workbench.connectors.pysd_connector import PysdModel -from ema_workbench.em_framework.evaluators import ( - MultiprocessingEvaluator, - SequentialEvaluator, -) +from ema_workbench.em_framework.evaluators import MultiprocessingEvaluator, SequentialEvaluator # TODO:: model classes should be tested for their pickleability prior to # initialization diff --git a/test/test_connectors/test_vensim.py b/test/test_connectors/test_vensim.py index 1756c7cc5..7d9762489 100644 --- a/test/test_connectors/test_vensim.py +++ b/test/test_connectors/test_vensim.py @@ -69,12 +69,7 @@ def __init__(self, working_directory, name): # self.uncertainties.pop() self.uncertainties.append( LookupUncertainty( - "approximation", - [(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], - "TF2", - self, - 0, - 10, + "approximation", [(0, 4), (1, 5), (1, 5), (0, 2), (0, 2)], "TF2", self, 0, 10 ) ) # self.uncertainties.pop() diff --git a/test/test_em_framework/test_callback.py b/test/test_em_framework/test_callback.py index a1e59a9b8..a17afddf9 100644 --- a/test/test_em_framework/test_callback.py +++ b/test/test_em_framework/test_callback.py @@ -17,15 +17,10 @@ ) from ema_workbench.em_framework.points import Policy, Scenario, Experiment from ema_workbench.util import EMAError -from ema_workbench.em_framework.outcomes import ( - ScalarOutcome, - ArrayOutcome, - TimeSeriesOutcome, -) +from ema_workbench.em_framework.outcomes import ScalarOutcome, ArrayOutcome, TimeSeriesOutcome from ema_workbench.em_framework.util import NamedObject - class TestDefaultCallback(unittest.TestCase): def test_init(self): # let's add some uncertainties to this @@ -195,4 +190,4 @@ def test_store_cases(self): if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/test/test_em_framework/test_ema_ipyparallel.py b/test/test_em_framework/test_ema_ipyparallel.py index b103ccd49..9520adfa6 100644 --- a/test/test_em_framework/test_ema_ipyparallel.py +++ b/test/test_em_framework/test_ema_ipyparallel.py @@ -50,11 +50,7 @@ def start(self): # Store stdout & stderr to show with failing tests. # This is defined in IPython.testing.iptest self.process = Popen( - self.args, - stdout=blackhole, - stderr=STDOUT, - env=os.environ, - cwd=self.work_dir, + self.args, stdout=blackhole, stderr=STDOUT, env=os.environ, cwd=self.work_dir ) self.notify_start(self.process.pid) self.poll = self.process.poll @@ -284,18 +280,14 @@ def test_log_message(self): mocked.return_value = mocked_logger raw = [b"engine.1.INFO.EMA", b"test"] self.watcher.log_message(raw) - mocked_logger.log.assert_called_once_with( - ema_logging.INFO, "[engine.1] test" - ) + mocked_logger.log.assert_called_once_with(ema_logging.INFO, "[engine.1] test") with mock.patch("logging.getLogger") as mocked: mocked_logger = mock.Mock(spec=logging.Logger) mocked.return_value = mocked_logger raw = [b"engine.1.DEBUG.EMA", b"test"] self.watcher.log_message(raw) - mocked_logger.log.assert_called_once_with( - ema_logging.DEBUG, "[engine.1] test" - ) + mocked_logger.log.assert_called_once_with(ema_logging.DEBUG, "[engine.1] test") with mock.patch("logging.getLogger") as mocked: mocked_logger = mock.Mock(spec=logging.Logger) @@ -328,16 +320,12 @@ def tearDownClass(cls): @mock.patch("ema_workbench.em_framework.ema_ipyparallel.get_engines_by_host") @mock.patch("ema_workbench.em_framework.ema_ipyparallel.os") @mock.patch("ema_workbench.em_framework.ema_ipyparallel.socket") - def test_update_cwd_on_all_engines( - self, mock_socket, mock_os, mock_engines_by_host - ): + def test_update_cwd_on_all_engines(self, mock_socket, mock_os, mock_engines_by_host): mock_socket.gethostname.return_value = "test host" mock_client = mock.create_autospec(ipyparallel.Client) mock_client.ids = [0, 1] # pretend we have two engines - mock_view = mock.create_autospec( - ipyparallel.client.view.View - ) # @ @UndefinedVariable + mock_view = mock.create_autospec(ipyparallel.client.view.View) # @ @UndefinedVariable mock_client.__getitem__.return_value = mock_view mock_engines_by_host.return_value = {"test host": [0, 1]} @@ -350,9 +338,7 @@ def test_update_cwd_on_all_engines( # engines on another host mock_engines_by_host.return_value = {"other host": [0, 1]} - self.assertRaises( - NotImplementedError, ema.update_cwd_on_all_engines, mock_client - ) + self.assertRaises(NotImplementedError, ema.update_cwd_on_all_engines, mock_client) def test_get_engines_by_host(self): engines_by_host = ema.get_engines_by_host(self.client) @@ -399,9 +385,7 @@ def test_initialize_engines(self): mock_client = mock.create_autospec(ipyparallel.Client) mock_client.ids = [0, 1] # pretend we have two engines - mock_view = mock.create_autospec( - ipyparallel.client.view.View - ) # @ @UndefinedVariable + mock_view = mock.create_autospec(ipyparallel.client.view.View) # @ @UndefinedVariable mock_client.__getitem__.return_value = mock_view cwd = "." diff --git a/test/test_em_framework/test_evaluators.py b/test/test_em_framework/test_evaluators.py index acf0d85de..206ee93e6 100644 --- a/test/test_em_framework/test_evaluators.py +++ b/test/test_em_framework/test_evaluators.py @@ -18,15 +18,11 @@ class TestEvaluators(unittest.TestCase): @mock.patch("ema_workbench.em_framework.evaluators.DefaultCallback") @mock.patch("ema_workbench.em_framework.evaluators.experiment_generator") @mock.patch("ema_workbench.em_framework.evaluators.ExperimentRunner") - def test_sequential_evalutor( - self, mocked_runner, mocked_generator, mocked_callback - ): + def test_sequential_evalutor(self, mocked_runner, mocked_generator, mocked_callback): model = mock.Mock(spec=ema_workbench.Model) model.name = "test" mocked_generator.return_value = [1] - mocked_runner.return_value = ( - mocked_runner # return the mock upon initialization - ) + mocked_runner.return_value = mocked_runner # return the mock upon initialization mocked_runner.run_experiment.return_value = {}, {} evaluator = evaluators.SequentialEvaluator(model) @@ -61,12 +57,7 @@ def test_multiprocessing_evaluator( @mock.patch("ema_workbench.em_framework.evaluators.DefaultCallback") @mock.patch("ema_workbench.em_framework.evaluators.experiment_generator") def test_ipyparallel_evaluator( - self, - mocked_generator, - mocked_callback, - mocked_start, - mocked_initialize, - mocked_set, + self, mocked_generator, mocked_callback, mocked_start, mocked_initialize, mocked_set ): model = mock.Mock(spec=ema_workbench.Model) model.name = "test" @@ -88,4 +79,4 @@ def test_perform_experiments(self): if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/test/test_em_framework/test_experiment_runner.py b/test/test_em_framework/test_experiment_runner.py index 88f6329f8..b669dbe28 100644 --- a/test/test_em_framework/test_experiment_runner.py +++ b/test/test_em_framework/test_experiment_runner.py @@ -36,9 +36,7 @@ def test_run_experiment(self): runner = ExperimentRunner(msis) - experiment = Experiment( - "test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0 - ) + experiment = Experiment("test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0) runner.run_experiment(experiment) @@ -57,9 +55,7 @@ def test_run_experiment(self): runner = ExperimentRunner(msis) - experiment = Experiment( - "test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0 - ) + experiment = Experiment("test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0) with self.assertRaises(EMAError): runner.run_experiment(experiment) @@ -72,9 +68,7 @@ def test_run_experiment(self): msis["test"] = mockMSI runner = ExperimentRunner(msis) - experiment = Experiment( - "test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0 - ) + experiment = Experiment("test", mockMSI.name, Policy("none"), Scenario(a=1, b=2), 0) runner.run_experiment(experiment) diff --git a/test/test_em_framework/test_model.py b/test/test_em_framework/test_model.py index 65c84bef3..0f596c366 100644 --- a/test/test_em_framework/test_model.py +++ b/test/test_em_framework/test_model.py @@ -9,11 +9,7 @@ from ema_workbench.em_framework.model import Model, FileModel, ReplicatorModel -from ema_workbench.em_framework.parameters import ( - RealParameter, - Category, - CategoricalParameter, -) +from ema_workbench.em_framework.parameters import RealParameter, Category, CategoricalParameter from ema_workbench.em_framework.points import Scenario, Policy from ema_workbench.util import EMAError from ema_workbench.em_framework.outcomes import ScalarOutcome, ArrayOutcome @@ -185,14 +181,7 @@ def test_as_dict(self): model = Model(model_name, lambda x: x) model.uncertainties = [RealParameter("a", 0, 1)] - expected_keys = [ - "class", - "name", - "uncertainties", - "outcomes", - "outcomes", - "constants", - ] + expected_keys = ["class", "name", "uncertainties", "outcomes", "outcomes", "constants"] dict_ = model.as_dict() diff --git a/test/test_em_framework/test_optimization.py b/test/test_em_framework/test_optimization.py index 9545b525c..65714f06d 100644 --- a/test/test_em_framework/test_optimization.py +++ b/test/test_em_framework/test_optimization.py @@ -111,9 +111,7 @@ def test_to_dataframe(self, mocked_platypus): for i, entry in enumerate(data): self.assertListEqual(list(df.loc[i, dvnames].values), entry.variables) - self.assertListEqual( - list(df.loc[i, outcome_names].values), entry.objectives - ) + self.assertListEqual(list(df.loc[i, outcome_names].values), entry.objectives) @mock.patch("ema_workbench.em_framework.optimization.platypus") def test_to_platypus_types(self, mocked_platypus): @@ -132,10 +130,7 @@ def test_to_platypus_types(self, mocked_platypus): def test_to_problem(self, mocked_platypus): mocked_model = Model("test", function=mock.Mock()) mocked_model.levers = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)] - mocked_model.uncertainties = [ - RealParameter("c", 0, 1), - RealParameter("d", 0, 1), - ] + mocked_model.uncertainties = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)] mocked_model.outcomes = [ScalarOutcome("x", kind=1), ScalarOutcome("y", kind=1)] searchover = "levers" @@ -170,20 +165,13 @@ class TestRobustOptimization(unittest.TestCase): def test_to_robust_problem(self, mocked_platypus): mocked_model = Model("test", function=mock.Mock()) mocked_model.levers = [RealParameter("a", 0, 1), RealParameter("b", 0, 1)] - mocked_model.uncertainties = [ - RealParameter("c", 0, 1), - RealParameter("d", 0, 1), - ] + mocked_model.uncertainties = [RealParameter("c", 0, 1), RealParameter("d", 0, 1)] mocked_model.outcomes = [ScalarOutcome("x"), ScalarOutcome("y")] scenarios = 5 robustness_functions = [ - ScalarOutcome( - "mean x", variable_name="x", function=mock.Mock(), kind="maximize" - ), - ScalarOutcome( - "mean y", variable_name="y", function=mock.Mock(), kind="maximize" - ), + ScalarOutcome("mean x", variable_name="x", function=mock.Mock(), kind="maximize"), + ScalarOutcome("mean y", variable_name="y", function=mock.Mock(), kind="maximize"), ] problem = to_robust_problem(mocked_model, scenarios, robustness_functions) diff --git a/test/test_em_framework/test_outcomes.py b/test/test_em_framework/test_outcomes.py index 4f2a674cb..dc32bafff 100644 --- a/test/test_em_framework/test_outcomes.py +++ b/test/test_em_framework/test_outcomes.py @@ -49,23 +49,17 @@ def test_outcome(self): name = "d" var_name = "something else" function = "not a function" - outcome = self.outcome_class( - name, variable_name=var_name, function=function - ) + outcome = self.outcome_class(name, variable_name=var_name, function=function) with self.assertRaises(ValueError): name = "e" var_name = 1 - outcome = self.outcome_class( - name, variable_name=var_name, function=function - ) + outcome = self.outcome_class(name, variable_name=var_name, function=function) with self.assertRaises(ValueError): name = "f" var_name = ["a variable", 1] - outcome = self.outcome_class( - name, variable_name=var_name, function=function - ) + outcome = self.outcome_class(name, variable_name=var_name, function=function) name = "g" var_name = "something else" @@ -98,9 +92,7 @@ def test_process(self): function.return_value = 2 variable_name = ["a", "b"] - outcome = self.outcome_class( - name, function=function, variable_name=variable_name - ) + outcome = self.outcome_class(name, function=function, variable_name=variable_name) outputs = [1, 2] self.assertEqual(outcome.process(outputs), 2) @@ -113,9 +105,7 @@ def test_process(self): function.return_value = 2 variable_name = ["a", "b"] - outcome = self.outcome_class( - name, function=function, variable_name=variable_name - ) + outcome = self.outcome_class(name, function=function, variable_name=variable_name) outcome.process([1]) @@ -146,9 +136,7 @@ def test_process(self): function.return_value = [2] variable_name = ["a", "b"] - outcome = self.outcome_class( - name, function=function, variable_name=variable_name - ) + outcome = self.outcome_class(name, function=function, variable_name=variable_name) outputs = [1, 2] self.assertEqual(outcome.process(outputs), [2]) @@ -161,9 +149,7 @@ def test_process(self): function.return_value = [2] variable_name = ["a", "b"] - outcome = self.outcome_class( - name, function=function, variable_name=variable_name - ) + outcome = self.outcome_class(name, function=function, variable_name=variable_name) outcome.process([1]) diff --git a/test/test_em_framework/test_parameter.py b/test/test_em_framework/test_parameter.py index 5b6b4555b..fe8dac540 100644 --- a/test/test_em_framework/test_parameter.py +++ b/test/test_em_framework/test_parameter.py @@ -82,9 +82,7 @@ def test_from_dist(self): self.assertEqual(par.resolution, [0, 1]) with self.assertRaises(ValueError): - parameters.RealParameter.from_dist( - "test", sp.stats.randint(0, 1) - ) # @UndefinedVariable + parameters.RealParameter.from_dist("test", sp.stats.randint(0, 1)) # @UndefinedVariable parameters.RealParameter.from_dist( "test", sp.stats.uniform(0, 1), blaat=[0, 1] # @UndefinedVariable ) @@ -109,20 +107,14 @@ def test_instantiation(self): upper_bound = 0 with self.assertRaises(ValueError): - par = parameters.IntegerParameter( - name, lower_bound, upper_bound, resolution - ) + par = parameters.IntegerParameter(name, lower_bound, upper_bound, resolution) with self.assertRaises(ValueError): resolution = [-1, 0] - par = parameters.IntegerParameter( - name, lower_bound, upper_bound, resolution - ) + par = parameters.IntegerParameter(name, lower_bound, upper_bound, resolution) resolution = [0, 1, 3] - par = parameters.IntegerParameter( - name, lower_bound, upper_bound, resolution - ) + par = parameters.IntegerParameter(name, lower_bound, upper_bound, resolution) with self.assertRaises(ValueError): par = parameters.IntegerParameter(name, lower_bound, 2.1, resolution) @@ -130,9 +122,7 @@ def test_instantiation(self): par = parameters.IntegerParameter(name, 0.0, 2, resolution) with self.assertRaises(ValueError): - par = parameters.IntegerParameter( - name, lower_bound, upper_bound, [0, 1.5, 2] - ) + par = parameters.IntegerParameter(name, lower_bound, upper_bound, [0, 1.5, 2]) def test_dist(self): name = "test" diff --git a/test/test_em_framework/test_points.py b/test/test_em_framework/test_points.py index ecd29fd78..21485b3ab 100644 --- a/test/test_em_framework/test_points.py +++ b/test/test_em_framework/test_points.py @@ -14,17 +14,13 @@ def test_experiment_gemerator(self): scenarios, model_structures, policies, combine="factorial" ) experiments = list(experiments) - self.assertEqual( - len(experiments), 6, ("wrong number of experiments " "for factorial") - ) + self.assertEqual(len(experiments), 6, ("wrong number of experiments " "for factorial")) experiments = points.experiment_generator( scenarios, model_structures, policies, combine="sample" ) experiments = list(experiments) - self.assertEqual( - len(experiments), 3, ("wrong number of experiments " "for zipover") - ) + self.assertEqual(len(experiments), 3, ("wrong number of experiments " "for zipover")) with self.assertRaises(ValueError): experiments = points.experiment_generator( diff --git a/test/test_em_framework/test_util.py b/test/test_em_framework/test_util.py index be0132dc1..7da8834e6 100644 --- a/test/test_em_framework/test_util.py +++ b/test/test_em_framework/test_util.py @@ -26,9 +26,7 @@ def test_namedict(self): self.assertEqual(nd.name, name, "name not equal") for key, value in nd.items(): - self.assertEqual( - kwargs[key], value, "kwargs not set on inner dict correctly" - ) + self.assertEqual(kwargs[key], value, "kwargs not set on inner dict correctly") kwargs = {"a": 1, "b": 2} @@ -36,9 +34,7 @@ def test_namedict(self): self.assertEqual(nd.name, repr(kwargs), "name not equal") for key, value in nd.items(): - self.assertEqual( - kwargs[key], value, "kwargs not set on inner dict correctly" - ) + self.assertEqual(kwargs[key], value, "kwargs not set on inner dict correctly") # test len self.assertEqual(2, len(nd), "length not correct")