diff --git a/docs/conf.py b/docs/conf.py index 33c15124aa..2e462b353d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,6 +34,7 @@ "sphinx.ext.autosectionlabel", "sphinxarg.ext", "sphinx.ext.doctest", + "enum_tools.autoenum", "sphinx_copybutton", ] diff --git a/docs/explanations/modeling_extensions/flexibility_analysis/index.rst b/docs/explanations/modeling_extensions/flexibility_analysis/index.rst new file mode 100644 index 0000000000..0bd0d40675 --- /dev/null +++ b/docs/explanations/modeling_extensions/flexibility_analysis/index.rst @@ -0,0 +1,12 @@ + +Flexibility Analysis +==================== + +A module for performing flexibility analysis. + +.. toctree:: + :maxdepth: 1 + + overview + reference + diff --git a/docs/explanations/modeling_extensions/flexibility_analysis/overview.rst b/docs/explanations/modeling_extensions/flexibility_analysis/overview.rst new file mode 100644 index 0000000000..d6be478912 --- /dev/null +++ b/docs/explanations/modeling_extensions/flexibility_analysis/overview.rst @@ -0,0 +1,147 @@ + +Flexibility Analysis Overview +============================= + +.. contents:: + :depth: 3 + :local: + +Introduction +------------ + +The flexibility analysis module within IDAES provides a framework for +evaluating how well a given system performs with respect to a set of +uncertain parameters. Two methods are provided. The flexibility (or feasibility) test +(FT) can be used to determine if a set of performance constraints can +be satisfied for any realization of uncertain parameters. The +flexibility index (FI) can be used to quantify the size of the +uncertainty region for which the performance constraints can be +satisfied [Grossmann1987]_. + +The FT is given by + +.. _FT: + +.. math:: + + \phi(\underline{\theta}, \overline{\theta}) = &\max_{\theta} \min_{z} u \\ + & s.t. \\ + & g_{j}(x,z,\theta) \leq u \\ + & h(x,z,\theta) = 0 \\ + & \underline{\theta} \leq \theta \leq \overline{\theta} + +where the uncertain parameters are given by :math:`\theta`, :math:`z` +are the controls, :math:`u` is the maximum constraint violation, +:math:`g_j` are the performance constraints, and :math:`h` are the +constraints which represent physics (e.g., mass balances). Note that +the dimension of :math:`x` must match the dimension of :math:`h`. In +other words, if :math:`\theta` and :math:`z` are fixed, then :math:`h` +should completely determine :math:`x`. If +:math:`\phi(\underline{\theta}, \overline{\theta})` is less than or +equal to zero, then the FT passes indicating that, for any +:math:`\theta` between :math:`\underline{\theta}` and +:math:`\overline{\theta}`, there exists a :math:`z` such that +:math:`g_j(x, z, \theta) \leq 0` and :math:`h(x, z, \theta) = 0`. If +:math:`\phi(\underline{\theta}, \overline{\theta})` is greater than +zero, then the FT fails indicating that the performance constraints +cannot be satisfied for at least one value of :math:`\theta` between +:math:`\underline{\theta}` and :math:`\overline{\theta}`. Also note +that this formulation assumes that :math:`h(x,z,\theta) = 0` can be +satisfied for any :math:`\theta` between :math:`\underline{\theta}` +and :math:`\overline{\theta}`. + +The FI is given by + +.. math:: + + \psi(\theta^{N}, \Delta \theta) = &\max \delta \\ + & s.t. \\ + & \phi(\underline{\theta}, \overline{\theta}) \leq 0 \\ + & \underline{\theta} = \theta^{N} - \delta \Delta \theta \\ + & \overline{\theta} = \theta^{N} + \delta \Delta \theta + +where :math:`\theta^{N}` is a "nominal" point. The goal of the FI is +to find the largest region around this nominal point for which the +performance constraints can be satisfied. As written, the FI searches +for the largest hyperrectangle, but other shapes (e.g., ellipses) can +be used. The hyperrectangle is all that is currently supported in the +flexibility analysis module in IDAES. Typically, :math:`\delta` is +bounded between 0 and 1. + +Flexibility Test Solution Methods +--------------------------------- + +Vertex Enumeration +^^^^^^^^^^^^^^^^^^ + +Vertex enumeration solves the inner minimization problem + +.. _innerProblem: + +.. math:: + + & \min_{z} u \\ + & s.t. \\ + & g_{j}(x,z,\theta) \leq u \\ + & h(x,z,\theta) = 0 + +at each vertex of the hyperrectangle :math:`[\underline{\theta}, \overline{\theta}]`. For certain problem types (e.g., linear), the solution to :ref:`FT` is guaranteed to be at one of the vertices of this hyperrectangle [Swaney1985]_. For other problem types, vertex enumeration is only a heuristic that may not find the value of :math:`\theta` that maximizes the violation of the performance constraints. + +Active Constraint Method +^^^^^^^^^^^^^^^^^^^^^^^^ + +The active constraint method converts the bilevel problem given by :ref:`FT` to a single-level problem by formulating the KKT conditions of the :ref:`inner minimization problem` and embedding them as constraints in the outer problem [Grossmann1987]_. Note that this method assumes the Haar Conditions hold and a constraint is added enforcing that the number of active inequalities (i.e., :math:`g_{j}(x,z,\theta) = u`) is equal to the number of controls plus one. If the resulting single-level problem is solved to global optimality, this method will be conservative because it will find the "worst" local minima of the inner minimization problem. + +Sampling +^^^^^^^^ + +Sampling is similar to `Vertex Enumeration`_ except that the :ref:`inner minimization problem` is solved at random samples of :math:`\theta` rather than only at the vertices of :math:`[\underline{\theta}, \overline{\theta}]`. This can be useful for nonlinear problems but still may miss the worst-case :math:`\theta`. + +Decision Rules +^^^^^^^^^^^^^^ + +Decision rules can be used to convert the bilevel problem given by :ref:`FT` to a single-level problem by removing all degrees of freedom of the inner problem with a control policy. Suppose we have a decision rule give by :math:`z = d(\theta)`. Because the only degrees of freedom in the inner problem are :math:`z`, the :ref:`FT` may be reformulated as + +.. math:: + + & \max_{\theta} \overline{u} \\ + & s.t. \\ + & g_{j}(x,z,\theta) = u_{j} \\ + & h(x,z,\theta) = 0 \\ + & \overline{u} = \sum u_{j} y_{j} \\ + & \sum y_{j} = 1 \\ + & z = d(\theta) \\ + & \underline{\theta} \leq \theta \leq \overline{\theta} + +Currently, the two types of decision rules supported are linear decision rules and neural network decision rules with ReLU activation functions. Because the decision rules result in suboptimal values of :math:`z`, this method is conservative. + +Flexibility Index Solution Methods +---------------------------------- + +Bisection Method +^^^^^^^^^^^^^^^^ + +The bisection method simply uses bisection to find the :math:`\delta` such that :math:`\phi(\underline{\theta}, \overline{\theta}) = 0`. Bisection works because :math:`\phi(\underline{\theta}, \overline{\theta})` is monotonically increasing with :math:`\delta`. Each subproblem solves the :ref:`FT` using one of the methods described above. + +Usage +----- + +The flexibility analysis module within IDAES provides two primary +functions. The first is +:meth:`solve_flextest`. The +:class:`FlexTestConfig` +specifies how the flexibility test should be solved. The second is +:meth:`solve_flex_index`. Examples +can be found `here +`_. + +.. [Grossmann1987] Grossmann, Ignacio E., and + Christodoulos A. Floudas. "Active constraint + strategy for flexibility analysis in chemical + processes." Computers & Chemical Engineering 11.6 + (1987): 675-693. + +.. [Swaney1985] Swaney, Ross Edward, and Ignacio E. Grossmann. + "An index for operational flexibility in chemical + process design. Part I: Formulation and theory." + AIChE Journal 31.4 (1985): 621-630. diff --git a/docs/explanations/modeling_extensions/flexibility_analysis/reference.rst b/docs/explanations/modeling_extensions/flexibility_analysis/reference.rst new file mode 100644 index 0000000000..fedd09bdd4 --- /dev/null +++ b/docs/explanations/modeling_extensions/flexibility_analysis/reference.rst @@ -0,0 +1,67 @@ +Flexibility Analysis Reference +============================== + +Table of Contents +----------------- +Enumerations + +* :py:enum:`FlexTestMethod` +* :py:enum:`FlexTestTermination` +* :py:enum:`SamplingStrategy` +* :py:enum:`SamplingIniStrategy` + +Configuration + +* :class:`FlexTestConfig` +* :class:`ActiveConstraintConfig` +* :class:`SamplingConfig` +* :class:`LinearDRConfig` +* :class:`ReluDRConfig` + +Results + +* :class:`FlexTestResults` + +Functions + +* :meth:`solve_flextest` +* :meth:`solve_flex_index` + +Enumerations +------------ + +.. autoenum:: idaes.apps.flexibility_analysis.FlexTestMethod + +.. autoenum:: idaes.apps.flexibility_analysis.FlexTestTermination + +.. autoenum:: idaes.apps.flexibility_analysis.SamplingStrategy + +.. autoenum:: idaes.apps.flexibility_analysis.SamplingInitStrategy + +Configuration +------------- + +.. autoclass:: idaes.apps.flexibility_analysis.FlexTestConfig + +.. autoclass:: idaes.apps.flexibility_analysis.ActiveConstraintConfig + +.. autoclass:: idaes.apps.flexibility_analysis.SamplingConfig + +.. autoclass:: idaes.apps.flexibility_analysis.decision_rules.dr_config.DRConfig + +.. autoclass:: idaes.apps.flexibility_analysis.LinearDRConfig + +.. autoclass:: idaes.apps.flexibility_analysis.ReluDRConfig + + +Results +------- + +.. autoclass:: idaes.apps.flexibility_analysis.FlexTestResults + +Functions +--------- + +.. autofunction:: idaes.apps.flexibility_analysis.solve_flextest + +.. autofunction:: idaes.apps.flexibility_analysis.solve_flex_index diff --git a/docs/explanations/modeling_extensions/index.rst b/docs/explanations/modeling_extensions/index.rst index e603091d71..57f48d5840 100644 --- a/docs/explanations/modeling_extensions/index.rst +++ b/docs/explanations/modeling_extensions/index.rst @@ -16,6 +16,7 @@ provided below. caprese/index uncertainty_propagation/index diagnostics/index + flexibility_analysis/index .. rubric:: PySMO: Python-based Surrogate Modeling Objects diff --git a/idaes/apps/flexibility_analysis/__init__.py b/idaes/apps/flexibility_analysis/__init__.py new file mode 100644 index 0000000000..1e74568112 --- /dev/null +++ b/idaes/apps/flexibility_analysis/__init__.py @@ -0,0 +1,34 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +A module for formulating flexibility analysis problems (feasibility test and +flexibility index). +""" +from .flextest import ( + solve_flextest, + SamplingStrategy, + FlexTestConfig, + FlexTestMethod, + FlexTestTermination, + FlexTestResults, + SamplingConfig, + FlexTest, + ActiveConstraintConfig, + build_active_constraint_flextest, + build_flextest_with_dr, +) +from .decision_rules.dr_enum import DecisionRuleTypes +from .decision_rules.linear_dr import LinearDRConfig +from .decision_rules.relu_dr_config import ReluDRConfig +from .flex_index import solve_flex_index +from .sampling import perform_sampling, SamplingInitStrategy diff --git a/idaes/apps/flexibility_analysis/_check_dependencies.py b/idaes/apps/flexibility_analysis/_check_dependencies.py new file mode 100644 index 0000000000..974ef1bf2d --- /dev/null +++ b/idaes/apps/flexibility_analysis/_check_dependencies.py @@ -0,0 +1,21 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module is only used to check dependencies for unit tests. +""" +import unittest +from pyomo.common.dependencies import attempt_import + +np, nump_available = attempt_import("numpy") +if not nump_available: + raise unittest.SkipTest("flexibility_analysis tests require numpy") diff --git a/idaes/apps/flexibility_analysis/_check_relu_dependencies.py b/idaes/apps/flexibility_analysis/_check_relu_dependencies.py new file mode 100644 index 0000000000..4f07719817 --- /dev/null +++ b/idaes/apps/flexibility_analysis/_check_relu_dependencies.py @@ -0,0 +1,22 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module is only used to check dependencies for unit tests. +""" +import unittest +from pyomo.common.dependencies import attempt_import + +tensorflow, tensorflow_available = attempt_import("tensorflow") +omlt, nump_available = attempt_import("omlt") +if not tensorflow_available or not nump_available: + raise unittest.SkipTest("flexibility_analysis tests require tensorflow and omlt") diff --git a/idaes/apps/flexibility_analysis/check_optimal.py b/idaes/apps/flexibility_analysis/check_optimal.py new file mode 100644 index 0000000000..f12c3812a1 --- /dev/null +++ b/idaes/apps/flexibility_analysis/check_optimal.py @@ -0,0 +1,32 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module provides a function like Pyomo's assert_optimal_termination but that +works for both APPSI solver interfaces and non-appsi solver interfaces. +""" +import pyomo.environ as pe +from pyomo.contrib import appsi + + +def assert_optimal_termination(results): + """ + Raise an exception if the termination condition was not optimal. + + Parameters + ---------- + results: pyomo results object from calling solve() + """ + if hasattr(results, "termination_condition"): + assert results.termination_condition == appsi.base.TerminationCondition.optimal + else: + pe.assert_optimal_termination(results) diff --git a/idaes/apps/flexibility_analysis/decision_rules/__init__.py b/idaes/apps/flexibility_analysis/decision_rules/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/apps/flexibility_analysis/decision_rules/dr_config.py b/idaes/apps/flexibility_analysis/decision_rules/dr_config.py new file mode 100644 index 0000000000..dbd49586e5 --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/dr_config.py @@ -0,0 +1,39 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module defines a config for specifying options related to decision rules +""" +from pyomo.common.config import ConfigDict + + +class DRConfig(ConfigDict): + r""" + A base class for specifying options for building + decision rules. + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) diff --git a/idaes/apps/flexibility_analysis/decision_rules/dr_enum.py b/idaes/apps/flexibility_analysis/decision_rules/dr_enum.py new file mode 100644 index 0000000000..1c1a4c6d7e --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/dr_enum.py @@ -0,0 +1,26 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module defines an enum for specifying the type of decision rule when a +decision rule is used. +""" +import enum + + +class DecisionRuleTypes(enum.Enum): + """ + An enum to specify the type of decision rule to use. + """ + + linear = enum.auto() + relu_nn = enum.auto() diff --git a/idaes/apps/flexibility_analysis/decision_rules/linear_dr.py b/idaes/apps/flexibility_analysis/decision_rules/linear_dr.py new file mode 100644 index 0000000000..519bf336e1 --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/linear_dr.py @@ -0,0 +1,127 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains a function for constructing a linear decision rule for +the inner problem of the flexibility test. +""" +from typing import MutableMapping, Sequence +import pyomo.environ as pe +from pyomo.core.base.var import _GeneralVarData, ScalarVar, IndexedVar +from pyomo.core.base.block import _BlockData +from pyomo.core.expr.numeric_expr import LinearExpression +from pyomo.common.config import ConfigValue +from .dr_config import DRConfig +from ..check_optimal import assert_optimal_termination + + +class LinearDRConfig(DRConfig): + r""" + A class for specifying options for constructing linear decision rules + for use in the flexibility test problem. + + Attributes + ---------- + solver: Union[Solver, OptSolver] + The solver to use for building the linear decision rule (an LP solver). + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + self.solver = self.declare( + "solver", ConfigValue(default=pe.SolverFactory("ipopt")) + ) + + +def construct_linear_decision_rule( + input_vals: MutableMapping[_GeneralVarData, Sequence[float]], + output_vals: MutableMapping[_GeneralVarData, Sequence[float]], + config: LinearDRConfig, +) -> _BlockData: + """ + Construct a linear decision rule from the data provided for the inputs + and outputs. + + Parameters + ---------- + input_vals: input_vals: MutableMapping[_GeneralVarData, Sequence[float]] + Data for the variables that are inputs to the decision rule + output_vals: input_vals: MutableMapping[_GeneralVarData, Sequence[float]] + Data for the variables that are outputs to the decision rule + config: LinearDRConfig + A config object to specify options for the decision rule + + Returns + ------- + res: _BlockData + A pyomo model containing the linear decision rule + """ + n_inputs = len(input_vals) + n_outputs = len(output_vals) + + res = pe.Block(concrete=True) + res.output_set = pe.Set(initialize=list(range(n_outputs))) + res.decision_rule = pe.Constraint(res.output_set) + + for out_ndx, (output_var, out_samples) in enumerate(output_vals.items()): + trainer = pe.ConcreteModel() + + n_samples = len(out_samples) + trainer.input_set = pe.Set(initialize=list(range(n_inputs))) + trainer.sample_set = pe.Set(initialize=list(range(n_samples))) + + trainer.const = ScalarVar() + trainer.coefs = IndexedVar(trainer.input_set) + trainer.out_est = IndexedVar(trainer.sample_set) + + obj_expr = sum( + (trainer.out_est[i] - out_samples[i]) ** 2 for i in trainer.sample_set + ) + trainer.objective = pe.Objective(expr=obj_expr) + + trainer.est_cons = pe.Constraint(trainer.sample_set) + for ndx in trainer.sample_set: + lin_coefs = [v[ndx] for k, v in input_vals.items()] + lin_vars = list(trainer.coefs.values()) + lin_coefs.append(1) + lin_vars.append(trainer.const) + lin_coefs.append(-1) + lin_vars.append(trainer.out_est[ndx]) + expr = LinearExpression(linear_coefs=lin_coefs, linear_vars=lin_vars) + trainer.est_cons[ndx] = (expr, 0) + + results = config.solver.solve(trainer) + assert_optimal_termination(results) + + lin_coefs = [v.value for v in trainer.coefs.values()] + lin_vars = [v for v in input_vals.keys()] + lin_coefs.append(-1) + lin_vars.append(output_var) + dr_expr = LinearExpression( + constant=trainer.const.value, linear_coefs=lin_coefs, linear_vars=lin_vars + ) + res.decision_rule[out_ndx] = (dr_expr, 0) + + return res diff --git a/idaes/apps/flexibility_analysis/decision_rules/relu_dr.py b/idaes/apps/flexibility_analysis/decision_rules/relu_dr.py new file mode 100644 index 0000000000..c27d988815 --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/relu_dr.py @@ -0,0 +1,160 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains a function for constructing a neural network-based decision +rule for the inner problem of the flexibility test. +""" +from typing import MutableMapping, Sequence +import numpy as np +from pyomo.common.dependencies import attempt_import +import pyomo.environ as pe +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.block import _BlockData +from idaes.apps.flexibility_analysis.indices import _VarIndex +from .relu_dr_config import ReluDRConfig + +tf, tensorflow_available = attempt_import("tensorflow") +keras, keras_available = attempt_import("tensorflow.keras") +omlt, omlt_available = attempt_import("omlt") +omlt_nn, _ = attempt_import("omlt.neuralnet") +omlt_io, _ = attempt_import("omlt.io") +plt, _ = attempt_import("matplotlib.pyplot") + + +def construct_relu_decision_rule( + input_vals: MutableMapping[_GeneralVarData, Sequence[float]], + output_vals: MutableMapping[_GeneralVarData, Sequence[float]], + config: ReluDRConfig, +) -> _BlockData: + """ + Construct a neural network-based decision rule with ReLU activation functions + from the data provided for the inputs and outputs. + + Parameters + ---------- + input_vals: input_vals: MutableMapping[_GeneralVarData, Sequence[float]] + Data for the variables that are inputs to the decision rule + output_vals: input_vals: MutableMapping[_GeneralVarData, Sequence[float]] + Data for the variables that are outputs to the decision rule + config: ReluDRConfig + A config object to specify options for the decision rule + + Returns + ------- + res: _BlockData + A pyomo model containing the linear decision rule + """ + tf.random.set_seed(config.tensorflow_seed) + inputs = list(input_vals.keys()) + outputs = list(output_vals.keys()) + n_samples = len(input_vals[inputs[0]]) + + config: ReluDRConfig = config() + if config.batch_size > n_samples: + config.batch_size = n_samples + + training_input = np.empty((n_samples, len(inputs))) + for ndx, inp in enumerate(inputs): + training_input[:, ndx] = np.array(input_vals[inp], dtype=np.float64) + + training_output = np.empty((n_samples, len(outputs))) + for ndx, outp in enumerate(outputs): + training_output[:, ndx] = np.array(output_vals[outp], dtype=np.float64) + + if config.scale_inputs: + input_mean = training_input.mean(axis=0) + input_std = training_input.std(axis=0) + for ndx in range(len(inputs)): + training_input[:, ndx] = ( + training_input[:, ndx] - input_mean[ndx] + ) / input_std[ndx] + else: + input_mean = [0] * len(inputs) + input_std = [1] * len(inputs) + + if config.scale_outputs: + output_mean = training_output.mean(axis=0) + output_std = training_output.std(axis=0) + for ndx in range(len(outputs)): + training_output[:, ndx] = ( + training_output[:, ndx] - output_mean[ndx] + ) / output_std[ndx] + else: + output_mean = [0] * len(outputs) + output_std = [1] * len(outputs) + + nn = keras.Sequential() + nn.add( + keras.layers.Dense( + units=config.n_nodes_per_layer, input_dim=len(inputs), activation="relu" + ) + ) + for _ in range(config.n_layers - 1): + nn.add(keras.layers.Dense(config.n_nodes_per_layer, activation="relu")) + nn.add(keras.layers.Dense(len(outputs))) + if config.learning_rate is None: + opt = keras.optimizers.Adam() + else: + opt = keras.optimizers.Adam(learning_rate=config.learning_rate) + nn.compile(optimizer=opt, loss="mse") + history = nn.fit( + training_input, + training_output, + batch_size=config.batch_size, + epochs=config.epochs, + # verbose=0, + ) + + if config.plot_history: + plt.scatter(history.epoch, history.history["loss"]) + plt.xlabel("Epoch") + plt.ylabel("Loss") + plt.yscale("log") + plt.show() + plt.close() + + res = pe.Block(concrete=True) + res.nn = omlt.OmltBlock() + + scaler = omlt.OffsetScaling( + offset_inputs=[float(i) for i in input_mean], + factor_inputs=[float(i) for i in input_std], + offset_outputs=[float(i) for i in output_mean], + factor_outputs=[float(i) for i in output_std], + ) + input_bounds = { + ndx: ( + (v.lb - input_mean[ndx]) / input_std[ndx], + (v.ub - input_mean[ndx]) / input_std[ndx], + ) + for ndx, v in enumerate(inputs) + } + net = omlt_io.load_keras_sequential(nn, scaler, input_bounds) + formulation = omlt_nn.ReluBigMFormulation(net) + res.nn.build_formulation(formulation) + + res.input_set = pe.Set() + res.input_links = pe.Constraint(res.input_set) + for ndx, v in enumerate(inputs): + key = _VarIndex(v, None) + res.input_set.add(key) # pylint: disable=no-member + res.input_links[key] = v == res.nn.inputs[ndx] + + res.output_set = pe.Set() + res.output_links = pe.Constraint(res.output_set) + for ndx, v in enumerate(outputs): + key = _VarIndex(v, None) + res.output_set.add(key) # pylint: disable=no-member + res.output_links[key] = v == res.nn.outputs[ndx] + + return res diff --git a/idaes/apps/flexibility_analysis/decision_rules/relu_dr_config.py b/idaes/apps/flexibility_analysis/decision_rules/relu_dr_config.py new file mode 100644 index 0000000000..546e689969 --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/relu_dr_config.py @@ -0,0 +1,87 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module defines a config for specifying options related to neural network-based +decision rules +""" +from pyomo.common.config import ConfigValue +from .dr_config import DRConfig + + +class ReluDRConfig(DRConfig): + r""" + A class for specifying options for constructing neural network based decision rules + for use in the flexibility test problem. + + Attributes + ---------- + n_layers: int + The number of layers in the neural network (default=: 4) + n_nodes_per_layer: int + The number of nodes in each layer of the neural network (default: 4) + tensorflow_seed: int + The seed to pass to tensorflow during training + scale_inputs: bool + If False, the inputs to the neural network (uncertain parameter values) + will not be scaled for training (default: True) + scale_outputs: bool + If False, the outputs to the neural network (controls) + will not be scaled for training (default: True) + epochs: int + The number of epochs to use in training the neural network (default: 2000) + batch_size: int + The batch size to use in training the neural network (default: 20) + learning_rate: float + The learning rate for training the neural network (default: None) + plot_history: bool + If True, the training history will be plotted (default: False) + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + self.n_layers: int = self.declare( + "n_layers", ConfigValue(domain=int, default=4) + ) + self.n_nodes_per_layer: int = self.declare( + "n_nodes_per_layer", ConfigValue(domain=int, default=4) + ) + self.tensorflow_seed: int = self.declare( + "tensorflow_seed", ConfigValue(domain=int, default=0) + ) + self.scale_inputs: bool = self.declare( + "scale_inputs", ConfigValue(domain=bool, default=True) + ) + self.scale_outputs: bool = self.declare( + "scale_outputs", ConfigValue(domain=bool, default=True) + ) + self.epochs: int = self.declare("epochs", ConfigValue(domain=int, default=2000)) + self.batch_size: int = self.declare( + "batch_size", ConfigValue(domain=int, default=20) + ) + self.learning_rate = self.declare("learning_rate", ConfigValue(default=None)) + self.plot_history: bool = self.declare( + "plot_history", ConfigValue(domain=bool, default=False) + ) diff --git a/idaes/apps/flexibility_analysis/decision_rules/tests/__init__.py b/idaes/apps/flexibility_analysis/decision_rules/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/apps/flexibility_analysis/decision_rules/tests/test_linear_dr.py b/idaes/apps/flexibility_analysis/decision_rules/tests/test_linear_dr.py new file mode 100644 index 0000000000..2728c3dd3a --- /dev/null +++ b/idaes/apps/flexibility_analysis/decision_rules/tests/test_linear_dr.py @@ -0,0 +1,78 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import unittest +import pyomo.environ as pe +from idaes.apps.flexibility_analysis.decision_rules.linear_dr import ( + construct_linear_decision_rule, + LinearDRConfig, +) +import numpy as np +import pytest + + +def y1_func(x1, x2): + return 3 * x1 - 2 * x2 + 5 + + +def y2_func(x1, x2): + return -x1 + 0.5 * x2 + + +class TestLinearDecisionRule(unittest.TestCase): + @pytest.mark.component + def test_construct_linear_dr(self): + x1_samples = [float(i) for i in np.linspace(-5, 5, 100)] + x2_samples = [float(i) for i in np.linspace(-5, 5, 100)] + x1_samples.extend(float(i) for i in np.linspace(-5, 5, 100)) + x2_samples.extend(float(i) for i in np.linspace(5, -5, 100)) + + x1_samples = np.array(x1_samples) + x2_samples = np.array(x2_samples) + y1_samples = y1_func(x1_samples, x2_samples) + y2_samples = y2_func(x1_samples, x2_samples) + + m = pe.ConcreteModel() + m.x1 = pe.Var(initialize=1.7) + m.x2 = pe.Var(initialize=-3.1) + m.y1 = pe.Var(initialize=0.2) + m.y2 = pe.Var(initialize=2.5) + + input_vals = pe.ComponentMap() + input_vals[m.x1] = [float(i) for i in x1_samples] + input_vals[m.x2] = [float(i) for i in x2_samples] + + output_vals = pe.ComponentMap() + output_vals[m.y1] = [float(i) for i in y1_samples] + output_vals[m.y2] = [float(i) for i in y2_samples] + + opt = pe.SolverFactory("ipopt") + config = LinearDRConfig() + config.solver = opt + m.dr = construct_linear_decision_rule( + input_vals=input_vals, output_vals=output_vals, config=config + ) + + self.assertEqual(pe.value(m.dr.decision_rule[0].lower), 0) + self.assertEqual(pe.value(m.dr.decision_rule[0].upper), 0) + self.assertAlmostEqual( + pe.value(m.dr.decision_rule[0].body), + y1_func(m.x1.value, m.x2.value) - m.y1.value, + ) + + self.assertEqual(pe.value(m.dr.decision_rule[1].lower), 0) + self.assertEqual(pe.value(m.dr.decision_rule[1].upper), 0) + self.assertAlmostEqual( + pe.value(m.dr.decision_rule[1].body), + y2_func(m.x1.value, m.x2.value) - m.y2.value, + ) diff --git a/idaes/apps/flexibility_analysis/examples/__init__.py b/idaes/apps/flexibility_analysis/examples/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/apps/flexibility_analysis/examples/idaes_hx_network.py b/idaes/apps/flexibility_analysis/examples/idaes_hx_network.py new file mode 100644 index 0000000000..f998aeace3 --- /dev/null +++ b/idaes/apps/flexibility_analysis/examples/idaes_hx_network.py @@ -0,0 +1,381 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This is a flexibility analysis example using an IDAES model of a +heat exchanger network. +""" +import logging +import pyomo.environ as pe +from pyomo.contrib.fbbt.fbbt import fbbt +from pyomo.network import Arc +from pyomo.core.base.block import _BlockData +from pyomo.contrib.solver.util import get_objective +from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import ( + BTXParameterBlock, +) +from idaes.core import FlowsheetBlock +from idaes.models.unit_models.heater import Heater + +from idaes.core.util.initialization import propagate_state +from idaes.core.base.control_volume_base import ControlVolumeBlockData +import idaes.apps.flexibility_analysis as flexibility +from idaes.apps.flexibility_analysis.var_utils import BoundsManager +from idaes.apps.flexibility_analysis.simplify import simplify_expr + + +logging.basicConfig(level=logging.INFO) + + +def create_model(): + """ + This function creates an IDAES model of a + heat exchanger network for flexibility analysis. + """ + m = pe.ConcreteModel() + m.fs = FlowsheetBlock(dynamic=False) + m.fs.properties = BTXParameterBlock( + valid_phase="Vap", + activity_coeff_model="Ideal", + state_vars="FTPz", + ) + + m.fs.heater1 = Heater(property_package=m.fs.properties) + m.fs.cooler1 = Heater(property_package=m.fs.properties) + m.fs.heater2 = Heater(property_package=m.fs.properties) + m.fs.cooler2 = Heater(property_package=m.fs.properties) + m.fs.heater3 = Heater(property_package=m.fs.properties) + m.fs.cooler3 = Heater(property_package=m.fs.properties) + m.fs.cooler4 = Heater(property_package=m.fs.properties) + + m.fs.s1 = Arc(source=m.fs.heater1.outlet, destination=m.fs.heater2.inlet) + m.fs.s2 = Arc(source=m.fs.cooler1.outlet, destination=m.fs.cooler4.inlet) + m.fs.s3 = Arc(source=m.fs.cooler2.outlet, destination=m.fs.cooler3.inlet) + + m.fs.duty_cons = pe.ConstraintList() + m.fs.duty_cons.add(m.fs.heater1.heat_duty[0] == -m.fs.cooler1.heat_duty[0]) + m.fs.duty_cons.add(m.fs.heater2.heat_duty[0] == -m.fs.cooler2.heat_duty[0]) + m.fs.duty_cons.add(m.fs.heater3.heat_duty[0] == -m.fs.cooler3.heat_duty[0]) + m.fs.duty_cons.add(m.fs.cooler1.heat_duty[0] <= 0) + m.fs.duty_cons.add(m.fs.cooler2.heat_duty[0] <= 0) + m.fs.duty_cons.add(m.fs.cooler3.heat_duty[0] <= 0) + m.fs.duty_cons.add(m.fs.cooler4.heat_duty[0] <= 0) + + approach_limit = 5 + m.fs.temp_approach = pe.ConstraintList() + m.fs.temp_approach.add( + m.fs.cooler1.inlet.temperature[0] + >= m.fs.heater1.outlet.temperature[0] + approach_limit + ) + m.fs.temp_approach.add( + m.fs.cooler1.outlet.temperature[0] + >= m.fs.heater1.inlet.temperature[0] + approach_limit + ) + m.fs.temp_approach.add( + m.fs.cooler2.inlet.temperature[0] + >= m.fs.heater2.outlet.temperature[0] + approach_limit + ) + m.fs.temp_approach.add( + m.fs.cooler2.outlet.temperature[0] + >= m.fs.heater2.inlet.temperature[0] + approach_limit + ) + m.fs.temp_approach.add( + m.fs.cooler3.inlet.temperature[0] + >= m.fs.heater3.outlet.temperature[0] + approach_limit + ) + m.fs.temp_approach.add( + m.fs.cooler3.outlet.temperature[0] + >= m.fs.heater3.inlet.temperature[0] + approach_limit + ) + + # specs + m.fs.heater1.inlet.temperature[0].fix(488) + m.fs.cooler1.inlet.temperature[0].fix(720) + m.fs.cooler2.inlet.temperature[0].fix(683) + m.fs.heater2.outlet.temperature[0].fix(663) + m.fs.heater3.inlet.temperature[0].fix(400) + m.fs.heater3.outlet.temperature[0].fix(550) + m.fs.cooler4.outlet.temperature[0].fix(450) + m.fs.cooler3_out_temp_spec = pe.Constraint( + expr=m.fs.cooler3.outlet.temperature[0] <= 423 + ) + + m.fs.heater1.inlet.flow_mol[0].fix(1) + m.fs.cooler1.inlet.flow_mol[0].fix(1) + m.fs.cooler2.inlet.flow_mol[0].fix(1) + # m.fs.heater3.inlet.flow_mol[0].fix(1) + m.fs.heater3.inlet.flow_mol[0].setlb(0.8) + m.fs.heater3.inlet.flow_mol[0].setub(1.2) + + m.fs.heater1.inlet.pressure[0].fix(101325) + m.fs.cooler1.inlet.pressure[0].fix(101325) + m.fs.cooler2.inlet.pressure[0].fix(101325) + m.fs.heater3.inlet.pressure[0].fix(101325) + + m.fs.heater1.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.cooler1.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.cooler2.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + m.fs.heater3.inlet.mole_frac_comp[0, "benzene"].fix(0.5) + + m.fs.heater1.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.cooler1.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.cooler2.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + m.fs.heater3.inlet.mole_frac_comp[0, "toluene"].fix(0.5) + + m.obj = pe.Objective(expr=-m.fs.cooler4.heat_duty[0]) + + pe.TransformationFactory("network.expand_arcs").apply_to(m) + scale_model(m) + + nominal_values = pe.ComponentMap() + # nominal_values[m.fs.cooler1.inlet.temperature[0]] = 7.20 + nominal_values[m.fs.heater1.inlet.temperature[0]] = 4.88 + nominal_values[m.fs.cooler2.inlet.temperature[0]] = 6.83 + # nominal_values[m.fs.heater3.inlet.temperature[0]] = 4.00 + nominal_values[m.fs.heater1.inlet.flow_mol[0]] = 1 + # nominal_values[m.fs.cooler1.inlet.flow_mol[0]] = 1 + nominal_values[m.fs.cooler2.inlet.flow_mol[0]] = 1 + # nominal_values[m.fs.heater3.inlet.flow_mol[0]] = 1 + + param_bounds = pe.ComponentMap() + # p = m.fs.cooler1.inlet.temperature[0] + # param_bounds[p] = (nominal_values[p] - 0.10, nominal_values[p] + 0.10) + p = m.fs.heater1.inlet.temperature[0] + param_bounds[p] = (nominal_values[p] - 0.10, nominal_values[p] + 0.10) + p = m.fs.cooler2.inlet.temperature[0] + param_bounds[p] = (nominal_values[p] - 0.10, nominal_values[p] + 0.10) + # p = m.fs.heater3.inlet.temperature[0] + # param_bounds[p] = (nominal_values[p] - 0.10, nominal_values[p] + 0.10) + p = m.fs.heater1.inlet.flow_mol[0] + param_bounds[p] = (nominal_values[p] - 0.2, nominal_values[p] + 0.2) + # p = m.fs.cooler1.inlet.flow_mol[0] + # param_bounds[p] = (nominal_values[p] - 0.2, nominal_values[p] + 0.2) + p = m.fs.cooler2.inlet.flow_mol[0] + param_bounds[p] = (nominal_values[p] - 0.2, nominal_values[p] + 0.2) + # p = m.fs.heater3.inlet.flow_mol[0] + # param_bounds[p] = (nominal_values[p] - 0.2, nominal_values[p] + 0.2) + + for p in nominal_values.keys(): + assert p.is_fixed() + p.unfix() + + for c in m.component_data_objects(pe.Constraint, active=True, descend_into=True): + body = simplify_expr(c.body) + c.set_value((c.lower, body, c.upper)) + + for p in nominal_values.keys(): + assert not p.is_fixed() + p.fix() + + return m, nominal_values, param_bounds + + +def initialize(m): + """ + Initialize the model + """ + m.fs.heater1.heat_duty[0].fix(0.1000) + m.fs.heater1.initialize() + m.fs.heater1.heat_duty[0].unfix() + propagate_state(m.fs.s1) + m.fs.cooler1.heat_duty[0].fix(-m.fs.heater1.heat_duty[0]) + m.fs.cooler1.initialize() + m.fs.cooler1.heat_duty[0].unfix() + m.fs.heater2.inlet.temperature[0].fix() + m.fs.heater2.initialize() + m.fs.heater2.inlet.temperature[0].unfix() + m.fs.cooler2.heat_duty[0].fix(-m.fs.heater2.heat_duty[0].value) + m.fs.cooler2.initialize() + m.fs.cooler2.heat_duty[0].unfix() + m.fs.heater3.initialize() + propagate_state(m.fs.s3) + m.fs.cooler3.inlet.temperature[0].fix() + m.fs.cooler3.heat_duty[0].fix(-m.fs.heater3.heat_duty[0].value) + m.fs.cooler3.initialize() + m.fs.cooler3.heat_duty[0].unfix() + m.fs.cooler3.inlet.temperature[0].unfix() + propagate_state(m.fs.s2) + m.fs.cooler4.inlet.temperature[0].fix() + m.fs.cooler4.initialize() + m.fs.cooler4.inlet.temperature[0].unfix() + + +def get_var_bounds(m: _BlockData, param_bounds): + """ + Generate a map with valid variable bounds for + any possible realization of the uncertain parameters + """ + for p, (p_lb, p_ub) in param_bounds.items(): + p.unfix() + p.setlb(p_lb) + p.setub(p_ub) + bounds_manager = BoundsManager(m) + bounds_manager.save_bounds() + for b in m.block_data_objects(active=True, descend_into=True): + if isinstance(b, ControlVolumeBlockData): + b.properties_in[0].flow_mol.setlb(0) + b.properties_in[0].flow_mol.setub(5) + b.properties_out[0].flow_mol.setlb(0) + b.properties_out[0].flow_mol.setub(5) + b.properties_in[0].flow_mol_phase["Vap"].setlb(0) + b.properties_in[0].flow_mol_phase["Vap"].setub(5) + b.properties_out[0].flow_mol_phase["Vap"].setlb(0) + b.properties_out[0].flow_mol_phase["Vap"].setub(5) + b.properties_in[0].mole_frac_comp["benzene"].setlb(0) + b.properties_in[0].mole_frac_comp["benzene"].setub(1) + b.properties_in[0].mole_frac_comp["toluene"].setlb(0) + b.properties_in[0].mole_frac_comp["toluene"].setub(1) + b.properties_out[0].mole_frac_comp["benzene"].setlb(0) + b.properties_out[0].mole_frac_comp["benzene"].setub(1) + b.properties_out[0].mole_frac_comp["toluene"].setlb(0) + b.properties_out[0].mole_frac_comp["toluene"].setub(1) + b.properties_in[0].mole_frac_phase_comp["Vap", "benzene"].setlb(0) + b.properties_in[0].mole_frac_phase_comp["Vap", "benzene"].setub(1) + b.properties_in[0].mole_frac_phase_comp["Vap", "toluene"].setlb(0) + b.properties_in[0].mole_frac_phase_comp["Vap", "toluene"].setub(1) + b.properties_out[0].mole_frac_phase_comp["Vap", "benzene"].setlb(0) + b.properties_out[0].mole_frac_phase_comp["Vap", "benzene"].setub(1) + b.properties_out[0].mole_frac_phase_comp["Vap", "toluene"].setlb(0) + b.properties_out[0].mole_frac_phase_comp["Vap", "toluene"].setub(1) + b.properties_in[0].pressure.setlb(1.01325) + b.properties_in[0].pressure.setub(1.01325) + b.properties_out[0].pressure.setlb(1.01325) + b.properties_out[0].pressure.setub(1.01325) + b.properties_in[0].temperature.setlb(3.00) + b.properties_in[0].temperature.setub(10.00) + b.properties_out[0].temperature.setlb(3.00) + b.properties_out[0].temperature.setub(10.00) + cons_to_fbbt = list() + cons_to_fbbt.extend(b.properties_in[0].eq_enth_mol_phase_comp.values()) + cons_to_fbbt.extend(b.properties_out[0].eq_enth_mol_phase_comp.values()) + cons_to_fbbt.extend(b.properties_in[0].eq_enth_mol_phase.values()) + cons_to_fbbt.extend(b.properties_out[0].eq_enth_mol_phase.values()) + cons_to_fbbt.extend(b.enthalpy_balances.values()) + for c in cons_to_fbbt: + assert c.active + if c.active: + fbbt(c) + else: + c.activate() + fbbt(c) + c.deactivate() + res = pe.ComponentMap() + for v in m.component_data_objects(pe.Var, descend_into=True): + res[v] = (v.lb, v.ub) + bounds_manager.pop_bounds() + return res + + +def scale_model(m): + """ + This function scales the heat exchanger network model + prior to performing flexibility analysis + + Parameters + ---------- + m: _BlockData + The IDAES model of the heat exchanger network + """ + m.scaling_factor = pe.Suffix(direction=pe.Suffix.EXPORT) + for b in m.block_data_objects(active=True, descend_into=True): + if isinstance(b, ControlVolumeBlockData): + m.scaling_factor[b.heat[0]] = 1e-4 + m.scaling_factor[b.properties_in[0].pressure] = 1e-5 + m.scaling_factor[b.properties_out[0].pressure] = 1e-5 + m.scaling_factor[b.properties_in[0].temperature] = 1e-2 + m.scaling_factor[b.properties_out[0].temperature] = 1e-2 + m.scaling_factor[b.properties_in[0].enth_mol_phase["Vap"]] = 1e-4 + m.scaling_factor[b.properties_out[0].enth_mol_phase["Vap"]] = 1e-4 + m.scaling_factor[ + b.properties_in[0].enth_mol_phase_comp["Vap", "benzene"] + ] = 1e-4 + m.scaling_factor[ + b.properties_out[0].enth_mol_phase_comp["Vap", "benzene"] + ] = 1e-4 + m.scaling_factor[ + b.properties_in[0].enth_mol_phase_comp["Vap", "toluene"] + ] = 1e-4 + m.scaling_factor[ + b.properties_out[0].enth_mol_phase_comp["Vap", "toluene"] + ] = 1e-4 + for c in b.enthalpy_balances.values(): + m.scaling_factor[c] = 1e-4 + for c in b.pressure_balance.values(): + m.scaling_factor[c] = 1e-4 + for c in b.properties_in[0].eq_enth_mol_phase.values(): + m.scaling_factor[c] = 1e-4 + for c in b.properties_out[0].eq_enth_mol_phase.values(): + m.scaling_factor[c] = 1e-4 + for c in b.properties_in[0].eq_enth_mol_phase_comp.values(): + m.scaling_factor[c] = 1e-4 + for c in b.properties_out[0].eq_enth_mol_phase_comp.values(): + m.scaling_factor[c] = 1e-4 + for c in m.fs.duty_cons.values(): + m.scaling_factor[c] = 1e-4 + m.scaling_factor[m.fs.s1_expanded.pressure_equality[0]] = 1e-4 + m.scaling_factor[m.fs.s2_expanded.pressure_equality[0]] = 1e-4 + m.scaling_factor[m.fs.s3_expanded.pressure_equality[0]] = 1e-4 + m.scaling_factor[get_objective(m)] = 1e-4 + + pe.TransformationFactory("core.scale_model").apply_to(m, rename=False) + + +def main(method: flexibility.FlexTestMethod): + """ + Run the example + + Parameters + ---------- + method: flexibility.FlexTestMethod + The method to use for the flexibility test + """ + m, nominal_values, param_bounds = create_model() + initialize(m) + var_bounds = get_var_bounds(m, param_bounds) + config = flexibility.FlexTestConfig() + config.feasibility_tol = 1e-6 + config.terminate_early = False + config.method = method + config.minlp_solver = pe.SolverFactory("scip") + config.minlp_solver.options["limits/time"] = 300 + config.sampling_config.solver = pe.SolverFactory("ipopt") + config.sampling_config.strategy = flexibility.SamplingStrategy.grid + config.sampling_config.num_points = 3 + if method == flexibility.FlexTestMethod.linear_decision_rule: + config.decision_rule_config = flexibility.LinearDRConfig() + config.decision_rule_config.solver = pe.SolverFactory("scip") + elif method == flexibility.FlexTestMethod.relu_decision_rule: + config.decision_rule_config = flexibility.ReluDRConfig() + config.decision_rule_config.n_layers = 1 + config.decision_rule_config.n_nodes_per_layer = 10 + config.decision_rule_config.epochs = 10000 + config.decision_rule_config.batch_size = 50 + config.decision_rule_config.scale_inputs = True + config.decision_rule_config.scale_outputs = True + results = flexibility.solve_flextest( + m=m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[ + m.fs.cooler4.control_volume.heat[0], + m.fs.heater3.control_volume.properties_in[0].flow_mol, + ], + valid_var_bounds=var_bounds, + config=config, + ) + print(results) + return results + + +if __name__ == "__main__": + main(flexibility.FlexTestMethod.sampling) diff --git a/idaes/apps/flexibility_analysis/examples/linear_hx_network.py b/idaes/apps/flexibility_analysis/examples/linear_hx_network.py new file mode 100644 index 0000000000..e0263ebcfc --- /dev/null +++ b/idaes/apps/flexibility_analysis/examples/linear_hx_network.py @@ -0,0 +1,178 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +A flexibility analysis example from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. +""" +from typing import Tuple, Mapping +import random +import numpy as np +import pyomo.environ as pe +from pyomo.core.base.block import _BlockData +from pyomo.contrib.fbbt import interval +import idaes.apps.flexibility_analysis as flexibility + + +def create_model() -> Tuple[_BlockData, Mapping, Mapping]: + """ + This example is from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. + """ + + print( + """This example is based off of \n\n + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693.\n\n""" + ) + + m = pe.ConcreteModel() + + m.uncertain_temps_set = pe.Set(initialize=[1, 3, 5, 8]) + m.uncertain_temps = pe.Param( + m.uncertain_temps_set, mutable=True, initialize={1: 620, 3: 388, 5: 583, 8: 313} + ) + nominal_values = pe.ComponentMap() + for p in m.uncertain_temps.values(): + nominal_values[p] = p.value + + param_bounds = pe.ComponentMap() + for p in m.uncertain_temps.values(): + param_bounds[p] = (p.value - 10.0, p.value + 10.0) + + m.variable_temps_set = pe.Set(initialize=[2, 4, 6, 7]) + m.variable_temps = pe.Var(m.variable_temps_set, bounds=(0, 1000)) + m.qc = pe.Var(initialize=0) + + m.balances = pe.Constraint([1, 2, 3, 4]) + m.balances[1] = 1.5 * (m.uncertain_temps[1] - m.variable_temps[2]) == 2 * ( + m.variable_temps[4] - m.uncertain_temps[3] + ) + m.balances[2] = m.uncertain_temps[5] - m.variable_temps[6] == 2 * ( + 563 - m.variable_temps[4] + ) + m.balances[3] = m.variable_temps[6] - m.variable_temps[7] == 3 * ( + 393 - m.uncertain_temps[8] + ) + m.balances[4] = m.qc == 1.5 * (m.variable_temps[2] - 350) + + m.temp_approaches = pe.Constraint([1, 2, 3, 4]) + m.temp_approaches[1] = m.variable_temps[2] >= m.uncertain_temps[3] + m.temp_approaches[2] = m.variable_temps[6] >= m.variable_temps[4] + m.temp_approaches[3] = m.variable_temps[7] >= m.uncertain_temps[8] + m.temp_approaches[4] = m.variable_temps[6] >= 393 + + m.performance = pe.Constraint(expr=m.variable_temps[7] <= 323) + + return m, nominal_values, param_bounds + + +def get_var_bounds(m): + """ + Generate a map with valid variable bounds for + any possible realization of the uncertain parameters + """ + res = pe.ComponentMap() + for v in m.variable_temps.values(): + res[v] = (100, 1000) + res[m.qc] = interval.mul(1.5, 1.5, *interval.sub(100, 1000, 350, 350)) + return res + + +def main( + flex_index: bool = False, + method: flexibility.FlexTestMethod = flexibility.FlexTestMethod.active_constraint, + plot_history=True, + relu_epochs=3000, +): + """ + Run the example + + Parameters + ---------- + flex_index: bool + If True, the flexibility index will be solved. Otherwise, the flexibility + test will be solved. + method: flexibility.FlexTestMethod + The method to use for the flexibility test + plot_history: bool + Only used if method is flexibility.FlexTestMethod.relu_decision_rule; + Plots the training history for the neural network + """ + np.random.seed(0) + random.seed(1) + m, nominal_values, param_bounds = create_model() + var_bounds = get_var_bounds(m) + config = flexibility.FlexTestConfig() + config.feasibility_tol = 1e-6 + config.terminate_early = False # TODO: this does not do anything yet + config.method = method + config.minlp_solver = pe.SolverFactory("scip") + config.sampling_config.solver = pe.SolverFactory("appsi_highs") + config.sampling_config.strategy = "lhs" + config.sampling_config.num_points = 600 + config.sampling_config.initialization_strategy = "square" + if method == flexibility.FlexTestMethod.linear_decision_rule: + config.decision_rule_config = flexibility.LinearDRConfig() + config.decision_rule_config.solver = pe.SolverFactory("ipopt") + elif method == flexibility.FlexTestMethod.relu_decision_rule: + config.decision_rule_config = flexibility.ReluDRConfig() + config.decision_rule_config.n_layers = 1 + config.decision_rule_config.n_nodes_per_layer = 15 + config.decision_rule_config.epochs = relu_epochs + config.decision_rule_config.batch_size = 150 + config.decision_rule_config.scale_inputs = True + config.decision_rule_config.scale_outputs = True + config.decision_rule_config.plot_history = plot_history + config.decision_rule_config.tensorflow_seed = 2 + if not flex_index: + results = flexibility.solve_flextest( + m=m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.qc], + valid_var_bounds=var_bounds, + config=config, + ) + print(results) + else: + results = flexibility.solve_flex_index( + m=m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.qc], + valid_var_bounds=var_bounds, + config=config, + reconstruct_decision_rule=False, + ) + print(results) + return results + + +if __name__ == "__main__": + print("\n\n********************Active Constraint**************************") + main(flex_index=True, method=flexibility.FlexTestMethod.active_constraint) + print("\n\n********************Linear Decision Rule**************************") + main(flex_index=True, method=flexibility.FlexTestMethod.linear_decision_rule) + print("\n\n********************Vertex Enumeration**************************") + main(flex_index=True, method=flexibility.FlexTestMethod.vertex_enumeration) + print("\n\n********************ReLU Decision rule**************************") + main(flex_index=True, method=flexibility.FlexTestMethod.relu_decision_rule) diff --git a/idaes/apps/flexibility_analysis/examples/nonlin_hx_network.py b/idaes/apps/flexibility_analysis/examples/nonlin_hx_network.py new file mode 100644 index 0000000000..40a044d23e --- /dev/null +++ b/idaes/apps/flexibility_analysis/examples/nonlin_hx_network.py @@ -0,0 +1,131 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +A flexibility analysis example from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. +""" +from typing import Tuple, MutableMapping +import pyomo.environ as pe +from pyomo.core.base.block import _BlockData +from pyomo.core.base.param import _ParamData +import idaes.apps.flexibility_analysis as flexibility + + +def create_model() -> Tuple[ + _BlockData, + MutableMapping[_ParamData, float], + MutableMapping[_ParamData, Tuple[float, float]], +]: + """ + This example is from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. + """ + + print( + """This example is based off of \n\n + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693.\n\n""" + ) + + m = pe.ConcreteModel() + + m.qc = pe.Var() + m.fh1 = pe.Param(mutable=True, initialize=1.4) + + m.f1 = pe.Constraint(expr=-25 + m.qc * ((1 / m.fh1) - 0.5) + 10 / m.fh1 <= 0) + m.f2 = pe.Constraint(expr=-190 + 10 / m.fh1 + m.qc / m.fh1 <= 0) + m.f3 = pe.Constraint(expr=-270 + 250 / m.fh1 + m.qc / m.fh1 <= 0) + m.f4 = pe.Constraint(expr=260 - 250 / m.fh1 - m.qc / m.fh1 <= 0) + + nominal_values = pe.ComponentMap() + nominal_values[m.fh1] = 1 + + param_bounds = pe.ComponentMap() + param_bounds[m.fh1] = (1, 1.8) + + return m, nominal_values, param_bounds + + +def get_var_bounds(m): + """ + Generate a map with valid variable bounds for + any possible realization of the uncertain parameters + """ + res = pe.ComponentMap() + res[m.qc] = (-1000, 1000) + return res + + +def main(method): + """ + Run the example + + Parameters + ---------- + method: flexibility.FlexTestMethod + The method to use for the flexibility test + """ + m, nominal_values, param_bounds = create_model() + var_bounds = get_var_bounds(m) + config = flexibility.FlexTestConfig() + config.feasibility_tol = 1e-6 + config.terminate_early = False + config.method = method + config.minlp_solver = pe.SolverFactory("scip") + config.sampling_config.solver = pe.SolverFactory("scip") + config.sampling_config.strategy = flexibility.SamplingStrategy.lhs + config.sampling_config.num_points = 100 + if method == flexibility.FlexTestMethod.linear_decision_rule: + config.decision_rule_config = flexibility.LinearDRConfig() + config.decision_rule_config.solver = pe.SolverFactory("ipopt") + elif method == flexibility.FlexTestMethod.relu_decision_rule: + config.decision_rule_config = flexibility.ReluDRConfig() + config.decision_rule_config.n_layers = 1 + config.decision_rule_config.n_nodes_per_layer = 10 + config.decision_rule_config.epochs = 3000 + config.decision_rule_config.batch_size = 50 + config.decision_rule_config.scale_inputs = True + config.decision_rule_config.scale_outputs = True + # results = flexibility.solve_flextest(m=m, uncertain_params=list(nominal_values.keys()), + # param_nominal_values=nominal_values, param_bounds=param_bounds, + # controls=[m.qc], valid_var_bounds=var_bounds, config=config) + # print(results) + results = flexibility.solve_flex_index( + m=m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.qc], + valid_var_bounds=var_bounds, + config=config, + ) + print(results) + return results + + +if __name__ == "__main__": + print("\n\n********************Active Constraint**************************") + main(flexibility.FlexTestMethod.active_constraint) + print("\n\n********************Linear Decision Rule**************************") + main(flexibility.FlexTestMethod.linear_decision_rule) + print("\n\n********************Vertex Enumeration**************************") + main(flexibility.FlexTestMethod.vertex_enumeration) + print("\n\n********************ReLU Decision rule**************************") + main(flexibility.FlexTestMethod.relu_decision_rule) diff --git a/idaes/apps/flexibility_analysis/examples/tests/__init__.py b/idaes/apps/flexibility_analysis/examples/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/apps/flexibility_analysis/examples/tests/test_examples.py b/idaes/apps/flexibility_analysis/examples/tests/test_examples.py new file mode 100644 index 0000000000..53d7355ebf --- /dev/null +++ b/idaes/apps/flexibility_analysis/examples/tests/test_examples.py @@ -0,0 +1,122 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from contextlib import contextmanager +import unittest +import pytest +import pyomo.environ as pe +from idaes.apps.flexibility_analysis import ( + _check_dependencies, + _check_relu_dependencies, +) +import idaes.apps.flexibility_analysis as flex +from idaes.apps.flexibility_analysis.examples import ( + linear_hx_network, + idaes_hx_network, + nonlin_hx_network, +) +from idaes.core.util.testing import _enable_scip_solver_for_testing + + +@contextmanager +def scip_solver(): + solver = pe.SolverFactory("scip") + undo_changes = None + + if not solver.available(): + undo_changes = _enable_scip_solver_for_testing() + if not solver.available(): + pytest.skip(reason="SCIP solver not available") + yield solver + if undo_changes is not None: + undo_changes() + + +@pytest.mark.component +class TestExamples(unittest.TestCase): + def test_nonlin_hx_network(self): + with scip_solver(): + res = nonlin_hx_network.main( + method=flex.FlexTestMethod.active_constraint, + ) + self.assertAlmostEqual(res, 0.1474609375, 3) + + def test_linear_hx_network(self): + with scip_solver(): + res = linear_hx_network.main( + flex_index=False, + method=flex.FlexTestMethod.active_constraint, + plot_history=False, + ) + self.assertEqual( + res.termination, flex.FlexTestTermination.found_infeasible_point + ) + self.assertAlmostEqual(res.max_constraint_violation, 8.8, 5) + + res = linear_hx_network.main( + flex_index=False, + method=flex.FlexTestMethod.vertex_enumeration, + plot_history=False, + ) + self.assertEqual( + res.termination, flex.FlexTestTermination.found_infeasible_point + ) + self.assertAlmostEqual(res.max_constraint_violation, 8.8, 5) + + res = linear_hx_network.main( + flex_index=False, + method=flex.FlexTestMethod.linear_decision_rule, + plot_history=False, + ) + self.assertEqual( + res.termination, flex.FlexTestTermination.found_infeasible_point + ) + + res = linear_hx_network.main( + flex_index=True, + method=flex.FlexTestMethod.active_constraint, + plot_history=False, + ) + self.assertAlmostEqual(res, 0.5, 5) + + res = linear_hx_network.main( + flex_index=True, + method=flex.FlexTestMethod.vertex_enumeration, + plot_history=False, + ) + self.assertAlmostEqual(res, 0.5, 5) + + res = linear_hx_network.main( + flex_index=True, + method=flex.FlexTestMethod.linear_decision_rule, + plot_history=False, + ) + self.assertLessEqual(res, 0.5) + + res = linear_hx_network.main( + flex_index=True, + method=flex.FlexTestMethod.relu_decision_rule, + plot_history=False, + relu_epochs=100, + ) + self.assertGreaterEqual(res, 0.1) + self.assertLessEqual(res, 0.5) + + def test_idaes_hx_network(self): + with scip_solver(): + res = idaes_hx_network.main(flex.FlexTestMethod.sampling) + self.assertEqual( + res.termination, flex.FlexTestTermination.found_infeasible_point + ) + self.assertAlmostEqual( + res.max_constraint_violation, 0.375170890924453 + ) # regression diff --git a/idaes/apps/flexibility_analysis/flex_index.py b/idaes/apps/flexibility_analysis/flex_index.py new file mode 100644 index 0000000000..5dd896b373 --- /dev/null +++ b/idaes/apps/flexibility_analysis/flex_index.py @@ -0,0 +1,357 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains a function for solving the flexibility index problem using +the bisection method. A flexibility test problem is solved at each iteration. +""" +import math +import logging +from typing import Sequence, Union, Mapping, MutableMapping, Optional +from pyomo.core.base.block import _BlockData +import pyomo.environ as pe +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.param import _ParamData +from .sampling import SamplingStrategy +from .flextest import ( + FlexTestConfig, + solve_flextest, + FlexTestMethod, + FlexTestTermination, + FlexTest, +) + + +logger = logging.getLogger(__name__) + + +def _get_param_bounds(orig_param_bounds, nominal_values, flex_index): + tmp_param_bounds = pe.ComponentMap() + for p, (p_lb, p_ub) in orig_param_bounds.items(): + p_nom = nominal_values[p] + tmp_param_bounds[p] = ( + p_nom - (p_nom - p_lb) * flex_index, + p_nom + (p_ub - p_nom) * flex_index, + ) + return tmp_param_bounds + + +def _add_table_row( + log_level, + outer_iter, + flex_index_lower, + flex_index_upper, + fi_lb_max_viol, + fi_ub_max_viol, +): + if flex_index_lower is None: + lb_str = str(flex_index_lower) + else: + lb_str = f"{flex_index_lower:<12.3e}" + if flex_index_upper is None: + ub_str = str(flex_index_upper) + else: + ub_str = f"{flex_index_upper:<12.3e}" + if fi_lb_max_viol is None: + lb_mv_str = str(fi_lb_max_viol) + else: + lb_mv_str = f"{fi_lb_max_viol:<12.3e}" + if fi_ub_max_viol is None: + ub_mv_str = str(fi_ub_max_viol) + else: + ub_mv_str = f"{fi_ub_max_viol:<12.3e}" + logger.log( + log_level, + f"{outer_iter:<12}{lb_str:<12}{ub_str:<12}{lb_mv_str:<15}{ub_mv_str:<15}", + ) + + +def solve_flex_index( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Sequence[float]], + in_place: bool = False, + cap_index_at_1: bool = True, + reconstruct_decision_rule: Optional[bool] = None, + config: Optional[FlexTestConfig] = None, + log_level: int = logging.INFO, +) -> float: + r""" + Use bisection to solve the flexibility index problem. + + Parameters + ---------- + m: _BlockData + The pyomo model to be used for the feasibility/flexibility test. + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]] + A sequence (e.g., list) defining the set of uncertain parameters (:math:`\theta`). + These can be pyomo variables (Var) or parameters (param). However, if parameters are used, + they must be mutable. + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + nominal values (:math:`\theta^{N}`). + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + bounds (:math:`\underline{\theta}`, :math:`\overline{\theta}`). + controls: Sequence[_GeneralVarData] + A sequence (e.g., list) defining the set of control variables (:math:`z`). + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]] + A mapping (e.g., ComponentMap) defining bounds for all variables (:math:`x` and :math:`z`) that + should be valid for any :math:`\theta` between :math:`\underline{\theta}` and + :math:`\overline{\theta}`. These are only used to make the resulting flexibility test problem + more computationally tractable. All variable bounds in the model `m` are treated as performance + constraints and relaxed (:math:`g_{j}(x, z, \theta) \leq u`). The bounds in `valid_var_bounds` + are applied to the single-level problem generated from the active constraint method or one of + the decision rules. This argument is not necessary for vertex enumeration or sampling. + in_place: bool + If True, m is modified in place to generate the model for solving the flexibility test. If False, + the model is cloned first. + cap_index_at_1: bool + If False, the flexibility index (:math:`\delta`) will be allowed to be larger than 1. Otherwise, + it will be between 0 and 1. (default: True) + reconstruct_decision_rule: Optional[bool] + If True, the decision rule will be re-trained for every flexibility test subproblem solved in the + bisection method. + config: Optional[FlexTestConfig] + An object defining options for how the flexibility test should be solved for each subproblem + in the bisection method. + log_level: int + The level at which to log progress (default: logging.INFO) + + Returns + ------- + index: float + The flexibility index, :math:`\delta` + """ + + original_uncertain_params = uncertain_params + original_param_nominal_values = param_nominal_values + original_param_bounds = param_bounds + original_controls = controls + original_valid_var_bounds = valid_var_bounds + if not in_place: + m = m.clone() + uncertain_params = [m.find_component(i) for i in original_uncertain_params] + param_nominal_values = pe.ComponentMap( + (p, original_param_nominal_values[orig_p]) + for orig_p, p in zip(original_uncertain_params, uncertain_params) + ) + param_bounds = pe.ComponentMap( + (p, original_param_bounds[orig_p]) + for orig_p, p in zip(original_uncertain_params, uncertain_params) + ) + controls = [m.find_component(i) for i in original_controls] + valid_var_bounds = pe.ComponentMap( + (m.find_component(v), bnds) for v, bnds in original_valid_var_bounds.items() + ) + + if ( + config.method == FlexTestMethod.linear_decision_rule + and reconstruct_decision_rule is None + ): + reconstruct_decision_rule = True + elif ( + config.method == FlexTestMethod.relu_decision_rule + and reconstruct_decision_rule is None + ): + reconstruct_decision_rule = False + elif reconstruct_decision_rule is None: + reconstruct_decision_rule = False + + logger.log( + log_level, + f"{'Iter':<12}{'FI LB':<12}{'FI UB':<12}{'FI LB Max Viol':<15}{'FI UB Max Viol':<15}", + ) + + fi_lb_max_viol = -math.inf + fi_ub_max_viol = math.inf + + outer_iter = 0 + _add_table_row(log_level, outer_iter, None, None, None, None) + + flex_index_lower = 0 + # make sure the nominal point is feasible + nominal_bounds = pe.ComponentMap() + for p, val in param_nominal_values.items(): + nominal_bounds[p] = (val, val) + nominal_config: FlexTestConfig = config() + nominal_config.method = FlexTestMethod.sampling + nominal_config.terminate_early = True + nominal_config.sampling_config.strategy = SamplingStrategy.grid + nominal_config.sampling_config.num_points = 1 + nominal_config.sampling_config.enable_progress_bar = False + nominal_res = solve_flextest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=nominal_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + in_place=False, + config=nominal_config, + ) + if nominal_res.termination != FlexTestTermination.proven_feasible: + raise RuntimeError("Nominal point is infeasible") + + outer_iter += 1 + fi_lb_max_viol = nominal_res.max_constraint_violation + _add_table_row( + log_level, outer_iter, flex_index_lower, None, fi_lb_max_viol, fi_ub_max_viol + ) + + flextest_config: FlexTestConfig = config() + flextest_config.terminate_early = True + flextest_config.sampling_config.enable_progress_bar = False + flex_index_upper = 1 + + if not cap_index_at_1: + # Find an upper bound on the flexibility index (i.e., a point where the flextest fails) + found_infeasible_point = False + for _iter in range(10): + tmp_param_bounds = _get_param_bounds( + param_bounds, param_nominal_values, flex_index_upper + ) + upper_res = solve_flextest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=tmp_param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + in_place=False, + config=flextest_config, + ) + outer_iter += 1 + if upper_res.termination == FlexTestTermination.found_infeasible_point: + fi_ub_max_viol = upper_res.max_constraint_violation + _add_table_row( + log_level, + outer_iter, + flex_index_lower, + flex_index_upper, + fi_lb_max_viol, + fi_ub_max_viol, + ) + found_infeasible_point = True + break + elif upper_res.termination == FlexTestTermination.proven_feasible: + flex_index_lower = flex_index_upper + flex_index_upper *= 2 + fi_lb_max_viol = upper_res.max_constraint_violation + _add_table_row( + log_level, + outer_iter, + flex_index_lower, + None, + fi_lb_max_viol, + fi_ub_max_viol, + ) + else: + raise RuntimeError("Unexpected termination from flexibility test") + + if not found_infeasible_point: + raise RuntimeError("Could not find an upper bound on the flexibility index") + + max_param_bounds = _get_param_bounds( + param_bounds, param_nominal_values, flex_index_upper + ) + if cap_index_at_1: + if reconstruct_decision_rule: + res = solve_flextest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=max_param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + in_place=False, + config=flextest_config, + ) + else: + ft = FlexTest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + max_param_bounds=max_param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + config=flextest_config, + ) + res = ft.solve(max_param_bounds) + outer_iter += 1 + if res.termination == FlexTestTermination.proven_feasible: + flex_index_lower = 1 + fi_lb_max_viol = res.max_constraint_violation + fi_ub_max_viol = res.max_constraint_violation + elif res.termination == FlexTestTermination.found_infeasible_point: + fi_ub_max_viol = res.max_constraint_violation + else: + raise RuntimeError("Unexpected termination from flexibility test") + _add_table_row( + log_level, + outer_iter, + flex_index_lower, + flex_index_upper, + fi_lb_max_viol, + fi_ub_max_viol, + ) + elif not reconstruct_decision_rule: + ft = FlexTest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + max_param_bounds=max_param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + config=flextest_config, + ) + + while (flex_index_upper - flex_index_lower) > 1e-3: + midpoint = 0.5 * (flex_index_lower + flex_index_upper) + tmp_param_bounds = _get_param_bounds( + param_bounds, param_nominal_values, midpoint + ) + if reconstruct_decision_rule: + res = solve_flextest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=tmp_param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + in_place=False, + config=flextest_config, + ) + else: + res = ft.solve(param_bounds=tmp_param_bounds) + outer_iter += 1 + if res.termination == FlexTestTermination.proven_feasible: + flex_index_lower = midpoint + fi_lb_max_viol = res.max_constraint_violation + elif res.termination == FlexTestTermination.found_infeasible_point: + flex_index_upper = midpoint + fi_ub_max_viol = res.max_constraint_violation + else: + raise RuntimeError("Unexpected termination from flexibility test") + _add_table_row( + log_level, + outer_iter, + flex_index_lower, + flex_index_upper, + fi_lb_max_viol, + fi_ub_max_viol, + ) + + return flex_index_lower diff --git a/idaes/apps/flexibility_analysis/flextest.py b/idaes/apps/flexibility_analysis/flextest.py new file mode 100644 index 0000000000..d599ee7e44 --- /dev/null +++ b/idaes/apps/flexibility_analysis/flextest.py @@ -0,0 +1,958 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +A module for formulating different versions of the flexibility/feasibility +test problem. +""" +import enum +import logging +from typing import Sequence, Union, Mapping, MutableMapping, Optional, Tuple +import numpy as np +from pyomo.core.base.block import _BlockData +from pyomo.common.dependencies import attempt_import +from pyomo.util.report_scaling import report_scaling +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.param import _ParamData +from pyomo.contrib.solver.util import get_objective +import pyomo.environ as pe +from pyomo.common.config import ( + ConfigDict, + ConfigValue, + PositiveFloat, + InEnum, + MarkImmutable, + NonNegativeFloat, +) +from .var_utils import ( + get_used_unfixed_variables, + BoundsManager, + _remove_var_bounds, + _apply_var_bounds, +) +from .indices import _VarIndex, _ConIndex +from .kkt import add_kkt_with_milp_complementarity_conditions +from .uncertain_params import _replace_uncertain_params +from .inner_problem import _build_inner_problem +from .decision_rules.linear_dr import construct_linear_decision_rule +from .sampling import ( + SamplingStrategy, + perform_sampling, + SamplingConfig, + _perform_sampling, +) + +relu_dr, relu_dr_available = attempt_import( + "idaes.apps.flexibility_analysis.decision_rules.relu_dr", + "The ReLU decision rule requires Tensorflow and OMLT", +) + + +logger = logging.getLogger(__name__) + + +def _get_longest_name(comps): + longest_name = 0 + + for i in comps: + i_len = len(str(i)) + if i_len > longest_name: + longest_name = i_len + + if longest_name > 195: + longest_name = 195 + if longest_name < 12: + longest_name = 12 + + return longest_name + + +class FlexTestMethod(enum.Enum): + """ + Enum for specifying how to formulate the + flexibility test problem. + """ + + active_constraint = enum.auto() + linear_decision_rule = enum.auto() + relu_decision_rule = enum.auto() + vertex_enumeration = enum.auto() + sampling = enum.auto() + + +FlexTestMethod.active_constraint.__doc__ = r"Solve the flexibility test using the active constraint method described in [Grossmann1987]_." +FlexTestMethod.linear_decision_rule.__doc__ = r"Solve the flexibility test by converting the inner minimization problem to a square problem by removing all degrees of freedom by creating a linear decision rule of the form :math:`z = A \theta + b`" +FlexTestMethod.relu_decision_rule.__doc__ = r"Solve the flexibility test by converting the inner minimization problem to a square problem by removing all degrees of freedom by creating a decision rule of the form :math:`z = f(\theta)` where :math:`f(\theta)` is a nueral network with ReLU activation functions." +FlexTestMethod.vertex_enumeration.__doc__ = r"Solve the flexibility test by solving the inner minimization problem at every vertex of the hyperrectangle defined by :math:`(\underline{\theta}, \overline{\theta})`." +FlexTestMethod.sampling.__doc__ = r"Solve the flexibility test by solving the inner minimization problem at random samples of :math:`\theta \in [\underline{\theta}, \overline{\theta}]`." + + +class ActiveConstraintConfig(ConfigDict): + r""" + A class for specifying options for the active constraint method for the + flexibility test problem. + + Attributes + ---------- + use_haar_conditions: bool + If False, no constraint will be added to constraint the number of + active inequalities. (default: True) + default_BigM: float + Default value for the bigM parameter used to reformulate the + complimentarity conditions in the KKT system. (default: None) + enforce_equalities: bool + If False, :math:`h(x, z, \theta) = 0` is treated as two inequalities + (performance constraints) that can be violated (:math:`h_{i}(x, z, \theta) \leq u` + and :math:`-h_{i}(x, z, \theta) \leq u`) (default: True) + skip_scaling_check: bool + If True, the model scaling will not be checked. (default: False) + total_violation: bool + If True, the objective of the flexibility test will be the sum of + the constraint violations instead of the maximum violation. (default: False) + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.use_haar_conditions: bool = self.declare( + "use_haar_conditions", ConfigValue(domain=bool, default=True) + ) + self.default_BigM: Optional[float] = self.declare( + "default_BigM", ConfigValue(domain=NonNegativeFloat, default=None) + ) + self.enforce_equalities: bool = self.declare( + "enforce_equalities", ConfigValue(domain=bool, default=True) + ) + self.skip_scaling_check: bool = self.declare( + "skip_scaling_check", ConfigValue(domain=bool, default=False) + ) + self.total_violation: bool = self.declare( + "total_violation", ConfigValue(domain=bool, default=False) + ) + + +class FlexTestConfig(ConfigDict): + r""" + A class for specifying options for solving the flexibility test. + + Attributes + ---------- + feasibility_tol: float + Tolerance for considering constraints to be satisfied. In particular, if the + maximum constraint violation is less than or equal to :py:attr:`feasibility_tol`, then + the flexibility test passes. (default: 1e-6) + terminate_early: bool + If True, the specified algorithm should terminate as soon as a point + (:math:`\theta`) is found that confirms the flexibility test fails. If + False, the specified algorithm will continue until the :math:`\theta` + that maximizes the constraint violation is found. (default: False) + method: FlexTestMethod + The method that should be used to solve the flexibility test. (default: :py:attr:`active_constraint`) + minlp_solver: Union[Solver, OptSolver] + A Pyomo solver interface appropriate for solving MINLPs + sampling_config: SamplingConfig + A config object for specifying how sampling should be performed when either + generating data to create a decision rule or using sampling to solve the + flexibility test. + decision_rule_config: DRConfig + Only used if method is one of the decision rules. Should be either a LinearDRConfig + or a ReluDRConfig. + active_constraint_config: ActiveConstraintConfig + Only used if :py:attr:`method` is :py:attr:`active_constraint` + total_violation: bool + If False, the maximum constraint violation is considered. If True, the sum + of the violations of all constraints is considered. Should normally be False. (default: False) + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.feasibility_tol: float = self.declare( + "feasibility_tol", ConfigValue(domain=PositiveFloat, default=1e-6) + ) + self.terminate_early: bool = self.declare( + "terminate_early", ConfigValue(domain=bool, default=False) + ) + self.method: FlexTestMethod = self.declare( + "method", + ConfigValue( + domain=InEnum(FlexTestMethod), default=FlexTestMethod.active_constraint + ), + ) + self.minlp_solver = self.declare("minlp_solver", ConfigValue()) + self.sampling_config: SamplingConfig = self.declare( + "sampling_config", SamplingConfig() + ) + self.decision_rule_config = self.declare( + "decision_rule_config", ConfigValue(default=None) + ) + self.active_constraint_config: ActiveConstraintConfig = self.declare( + "active_constraint_config", ActiveConstraintConfig() + ) + self.total_violation: bool = self.declare( + "total_violation", ConfigValue(domain=bool, default=False) + ) + + +class FlexTestTermination(enum.Enum): + """ + An enum for communicating the results of a + flexibility test problem. + """ + + found_infeasible_point = enum.auto() + proven_feasible = enum.auto() + uncertain = enum.auto() + + +FlexTestTermination.found_infeasible_point.__doc__ = r"The meaning of this member depends on the method used to solve the flexibility/feasibility test, but it generally means that the flexibility test failed. If the solution method is not conservative (:py:attr:`FlexTestMethod.vertex_enumeration`, :py:attr:`FlexTestMethod.sampling`), then :py:attr:`FlexTestTermination.found_infeasible_point` indicates that a value of :math:`\theta` was found where at least one performance constraint (:math:`g_{j}(x, z, \theta) \leq 0`) is violated. Otherwise, :py:attr:`FlexTestTermination.found_infeasible_point` indicates that a point was found where the performance constraints might be violated." +FlexTestTermination.proven_feasible.__doc__ = r"The meaning of this member depends on the method used to solve the flexibility/feasibility test, but it generally means that the flexibility test passed. If the solution method is conservative (:py:attr:`FlexTestMethod.active_constraint`, :py:attr:`FlexTestMethod.linear_decision_rule`, :py:attr:`FlexTestMethod.relu_decision_rule`), then :py:attr:`FlexTestTermination.proven_feasible` indicates that, for any :math:`\theta \in [\underline{\theta}, \overline{\theta}]`, there exists a :math:`z` such that all of the performance constraints (:math:`g_{j}(x, z, \theta) \leq 0`) are satisfied. Otherwise, :py:attr:`FlexTestTermination.proven_feasible` just indicates that no :math:`\theta \in [\underline{\theta}, \overline{\theta}]` was found that violates the performance constraints for all :math:`z`." +FlexTestTermination.uncertain.__doc__ = r"Cannot definitively say whether the flexibility test passes or fails. This usually indicates an error was encountered." + + +class FlexTestResults(object): + r""" + Results for the flexibility test problem. + + Attributes + ---------- + termination: FlexTestTermination + max_constraint_violation: float + The largest constraint violation found (:math:`u`) + unc_param_values_at_max_violation: Optional[MutableMapping[Union[_GeneralVarData, _ParamData], float]] + The values of the uncertain parameters that generated the maximum constraint violation + """ + + def __init__(self): + self.termination = FlexTestTermination.uncertain + self.max_constraint_violation: Optional[float] = None + self.unc_param_values_at_max_violation: Optional[ + MutableMapping[Union[_GeneralVarData, _ParamData], float] + ] = None + + def __str__(self): + s = f"Termination: {self.termination}\n" + s += f"Maximum constraint violation: {self.max_constraint_violation}\n" + if self.unc_param_values_at_max_violation is not None: + s += "Uncertain parameter values at maximum constraint violation: \n" + longest_param_name = _get_longest_name( + self.unc_param_values_at_max_violation.keys() + ) + s += f'{"Param":<{longest_param_name + 5}}{"Value":>12}\n' + for k, v in self.unc_param_values_at_max_violation.items(): + s += f"{str(k):<{longest_param_name + 5}}{v:>12.2e}\n" + return s + + +def _get_dof(m: _BlockData): + n_cons = len( + set( + i + for i in m.component_data_objects( + pe.Constraint, active=True, descend_into=True + ) + if i.equality + ) + ) + n_vars = len(get_used_unfixed_variables(m)) + return n_vars - n_cons + + +dr_construction_map = dict() +dr_construction_map[FlexTestMethod.linear_decision_rule] = ( + construct_linear_decision_rule +) + + +def build_flextest_with_dr( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: FlexTestConfig, +): + """ + Build the flexibility test problem using a decision rule. + """ + config.sampling_config.total_violation = config.total_violation + + # this has to be here in case tensorflow or omlt are not installed + dr_construction_map[FlexTestMethod.relu_decision_rule] = ( + relu_dr.construct_relu_decision_rule + ) + + # enforce_equalities must be true for this method, or the resulting + # problem will be unbounded; the key is degrees of freedom + + # perform sampling + tmp = perform_sampling( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + controls=controls, + in_place=False, + config=config.sampling_config, + ) + + _sample_points, _, control_values = tmp + + # replace uncertain parameters with variables + _replace_uncertain_params(m, uncertain_params, param_nominal_values, param_bounds) + for v in m.unc_param_vars.values(): + valid_var_bounds[v] = (v.lb, v.ub) + v.fix() # these should be fixed before we check the degrees of freedom + for p, p_bnds in param_bounds.items(): + if p.is_variable_type(): + valid_var_bounds[p] = p_bnds + + if _get_dof(m) != len(controls): + raise ValueError( + "The number of controls must match the number of degrees of freedom" + ) + + # check the scaling of the model + # this has to be done with valid_var_bounds (original bounds removed) to ensure we have + # an entry in valid_var_bounds for every variable + for v in m.unc_param_vars.values(): + v.unfix() + bounds_manager = BoundsManager(m) + bounds_manager.save_bounds() + _remove_var_bounds(m) + _apply_var_bounds(valid_var_bounds) + passed = report_scaling(m) + if not passed: + raise ValueError( + "Please scale the model. If a scaling report was not shown, " + "set the logging level to INFO." + ) + bounds_manager.pop_bounds() + + # construct the decision rule + # the keys of sample_points need to be the new variables + # that replaced the uncertain parameters + sample_points: MutableMapping[_GeneralVarData, Sequence[float]] = pe.ComponentMap() + for p in uncertain_params: + ndx = _VarIndex(p, None) + p_var = m.unc_param_vars[ndx] + sample_points[p_var] = _sample_points[p] + + dr = dr_construction_map[config.method]( + input_vals=sample_points, + output_vals=control_values, + config=config.decision_rule_config, + ) + + if config.total_violation: + total_violation_disjunctions = True + else: + total_violation_disjunctions = False + _build_inner_problem( + m=m, + enforce_equalities=True, + unique_constraint_violations=True, + valid_var_bounds=valid_var_bounds, + total_violation=config.total_violation, + total_violation_disjunctions=total_violation_disjunctions, + ) + _apply_var_bounds(valid_var_bounds) + m.decision_rule = dr + + obj = get_objective(m) + obj.deactivate() + + if config.total_violation: + m.max_total_violation_obj = pe.Objective( + expr=sum(m.constraint_violation.values()), sense=pe.maximize + ) + else: + m.max_constraint_violation_obj = pe.Objective( + expr=m.max_constraint_violation, sense=pe.maximize + ) + + +def build_active_constraint_flextest( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: Optional[ActiveConstraintConfig] = None, +): + """ + Build the flexibility test problem using the active constraint method. + """ + if config is None: + config = ActiveConstraintConfig() + + _replace_uncertain_params(m, uncertain_params, param_nominal_values, param_bounds) + for v in m.unc_param_vars.values(): + valid_var_bounds[v] = v.bounds + for p, p_bnds in param_bounds.items(): + if p.is_variable_type(): + valid_var_bounds[p] = p_bnds + + # TODO: make this a context manager or try-finally + if not config.skip_scaling_check: + bounds_manager = BoundsManager(m) + bounds_manager.save_bounds() + _remove_var_bounds(m) + _apply_var_bounds(valid_var_bounds) + passed = report_scaling(m) + if not passed: + raise ValueError( + "Please scale the model. If a scaling report was not " + "shown, set the logging level to INFO." + ) + bounds_manager.pop_bounds() + + # TODO: constraint.equality does not check for range constraints with equal bounds + orig_equality_cons = [ + c + for c in m.component_data_objects(pe.Constraint, descend_into=True, active=True) + if c.equality + ] + + _build_inner_problem( + m=m, + enforce_equalities=config.enforce_equalities, + unique_constraint_violations=False, + valid_var_bounds=valid_var_bounds, + total_violation=config.total_violation, + total_violation_disjunctions=False, + ) + + for v in m.unc_param_vars.values(): + v.fix() + n_dof = _get_dof(m) + for v in m.unc_param_vars.values(): + v.unfix() + + add_kkt_with_milp_complementarity_conditions( + m=m, + uncertain_params=list(m.unc_param_vars.values()), + valid_var_bounds=valid_var_bounds, + default_M=config.default_BigM, + ) + + # TODO: to control the namespace and reduce cloning: + # take the users model and stick it on a new block as a sub-block + + if not config.enforce_equalities and not config.total_violation: + m.equality_cuts = pe.ConstraintList() + max_viol_lb, max_viol_ub = valid_var_bounds[m.max_constraint_violation] + for c in orig_equality_cons: + key1 = _ConIndex(c, "lb") + key2 = _ConIndex(m.ineq_violation_cons[key1], "ub") + y1 = m.active_indicator[key2] + key1 = _ConIndex(c, "ub") + key2 = _ConIndex(m.ineq_violation_cons[key1], "ub") + y2 = m.active_indicator[key2] + m.equality_cuts.add( + m.max_constraint_violation <= (1 - y1 * y2) * max_viol_ub + ) + m.equality_cuts.add( + m.max_constraint_violation >= (1 - y1 * y2) * max_viol_lb + ) + + if config.use_haar_conditions and not config.total_violation: + m.n_active_ineqs = pe.Constraint(expr=sum(m.active_indicator.values()) == n_dof) + + if config.total_violation: + m.max_total_violation_obj = pe.Objective( + expr=sum(m.constraint_violation.values()), sense=pe.maximize + ) + else: + m.max_constraint_violation_obj = pe.Objective( + expr=m.max_constraint_violation, sense=pe.maximize + ) + + +def _solve_flextest_active_constraint( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: Optional[FlexTestConfig] = None, +) -> FlexTestResults: + build_active_constraint_flextest( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + valid_var_bounds=valid_var_bounds, + config=config.active_constraint_config, + ) + opt = config.minlp_solver + res = opt.solve(m) + pe.assert_optimal_termination(res) + + results = FlexTestResults() + results.max_constraint_violation = m.max_constraint_violation.value + if results.max_constraint_violation > config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, v in m.unc_param_vars.items(): + results.unc_param_values_at_max_violation[key.var] = v.value + return results + + +def _solve_flextest_decision_rule( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: Optional[FlexTestConfig] = None, +) -> FlexTestResults: + build_flextest_with_dr( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + config=config, + ) + opt = config.minlp_solver + res = opt.solve(m) + pe.assert_optimal_termination(res) + + results = FlexTestResults() + results.max_constraint_violation = m.max_constraint_violation.value + if results.max_constraint_violation > config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, v in m.unc_param_vars.items(): + results.unc_param_values_at_max_violation[key.var] = v.value + return results + + +def _solve_flextest_sampling( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: Optional[FlexTestConfig] = None, +) -> FlexTestResults: + config.sampling_config.total_violation = config.total_violation + tmp = perform_sampling( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + controls=controls, + in_place=True, + config=config.sampling_config, + ) + sample_points, max_violation_values, _ = tmp + max_viol_ndx = int(np.argmax(max_violation_values)) + + results = FlexTestResults() + results.max_constraint_violation = max_violation_values[max_viol_ndx] + if results.max_constraint_violation > config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, vals in sample_points.items(): + results.unc_param_values_at_max_violation[key] = vals[max_viol_ndx] + return results + + +def _solve_flextest_vertex_enumeration( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: FlexTestConfig, +) -> FlexTestResults: + config: FlexTestConfig = config() + config.sampling_config.num_points = 2 + config.sampling_config.strategy = SamplingStrategy.grid + config.sampling_config.total_violation = config.total_violation + tmp = perform_sampling( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + controls=controls, + in_place=True, + config=config.sampling_config, + ) + sample_points, max_violation_values, _ = tmp + max_viol_ndx = int(np.argmax(max_violation_values)) + + results = FlexTestResults() + results.max_constraint_violation = max_violation_values[max_viol_ndx] + if results.max_constraint_violation > config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, vals in sample_points.items(): + results.unc_param_values_at_max_violation[key] = vals[max_viol_ndx] + return results + + +_flextest_map = dict() +_flextest_map[FlexTestMethod.active_constraint] = _solve_flextest_active_constraint +_flextest_map[FlexTestMethod.sampling] = _solve_flextest_sampling +_flextest_map[FlexTestMethod.vertex_enumeration] = _solve_flextest_vertex_enumeration +_flextest_map[FlexTestMethod.linear_decision_rule] = _solve_flextest_decision_rule +_flextest_map[FlexTestMethod.relu_decision_rule] = _solve_flextest_decision_rule + + +def solve_flextest( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + in_place: bool = False, + config: Optional[FlexTestConfig] = None, +) -> FlexTestResults: + r""" + Parameters + ---------- + m: _BlockData + The pyomo model to be used for the feasibility/flexibility test. + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]] + A sequence (e.g., list) defining the set of uncertain parameters (:math:`\theta`). + These can be pyomo variables (Var) or parameters (param). However, if parameters are used, + they must be mutable. + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + nominal values (:math:`\theta^{N}`). + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + bounds (:math:`\underline{\theta}`, :math:`\overline{\theta}`). + controls: Sequence[_GeneralVarData] + A sequence (e.g., list) defining the set of control variables (:math:`z`). + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]] + A mapping (e.g., ComponentMap) defining bounds for all variables (:math:`x` and :math:`z`) that + should be valid for any :math:`\theta` between :math:`\underline{\theta}` and + :math:`\overline{\theta}`. These are only used to make the resulting flexibility test problem + more computationally tractable. All variable bounds in the model `m` are treated as performance + constraints and relaxed (:math:`g_{j}(x, z, \theta) \leq u`). The bounds in `valid_var_bounds` + are applied to the single-level problem generated from the active constraint method or one of + the decision rules. This argument is not necessary for vertex enumeration or sampling. + in_place: bool + If True, m is modified in place to generate the model for solving the flexibility test. If False, + the model is cloned first. + config: Optional[FlexTestConfig] + An object defining options for how the flexibility test should be solved. + """ + if config is None: + config = FlexTestConfig() + + original_model = m + original_uncertain_params = uncertain_params + original_param_nominal_values = param_nominal_values + original_param_bounds = param_bounds + original_controls = controls + original_valid_var_bounds = valid_var_bounds + if not in_place: + # TODO: + # tmp_name = pyomo.common.modeling.unique_component_name(m, 'tmp_data') + # setattr(m, tmp_name, uncertain_params) + # new_m = m.clone() + # old_to_new_params = ComponentMap(zip(getattr(m, tmp_name), + # getattr(new_m, tmp_name))) + # delattr(m, tmp_name) + # m = new_m + m = m.clone() + uncertain_params = [m.find_component(i) for i in original_uncertain_params] + param_nominal_values = pe.ComponentMap( + (p, original_param_nominal_values[orig_p]) + for orig_p, p in zip(original_uncertain_params, uncertain_params) + ) + param_bounds = pe.ComponentMap( + (p, original_param_bounds[orig_p]) + for orig_p, p in zip(original_uncertain_params, uncertain_params) + ) + controls = [m.find_component(i) for i in original_controls] + valid_var_bounds = pe.ComponentMap( + (m.find_component(v), bnds) for v, bnds in original_valid_var_bounds.items() + ) + results = _flextest_map[config.method]( + m=m, + uncertain_params=uncertain_params, + param_nominal_values=param_nominal_values, + param_bounds=param_bounds, + controls=controls, + valid_var_bounds=valid_var_bounds, + config=config, + ) + if not in_place: + unc_param_values = pe.ComponentMap() + for v, val in results.unc_param_values_at_max_violation.items(): + unc_param_values[original_model.find_component(v)] = val + results.unc_param_values_at_max_violation = unc_param_values + return results + + +class FlexTest(object): + """ + This class is mostly here for the flexibility index problem. + This class enables solving the same flexibility test problem + repeatedly with different uncertain parameter bounds. + """ + + def __init__( + self, + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + max_param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]], + controls: Sequence[_GeneralVarData], + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]], + config: Optional[FlexTestConfig] = None, + ): + r""" + Parameters + ---------- + m: _BlockData + The pyomo model to be used for the feasibility/flexibility test. + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]] + A sequence (e.g., list) defining the set of uncertain parameters (:math:`\theta`). + These can be pyomo variables (Var) or parameters (param). However, if parameters are used, + they must be mutable. + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + nominal values (:math:`\theta^{N}`). + max_param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to the + widest possible bounds (:math:`\underline{\theta}`, :math:`\overline{\theta}`) that will + be considered for any call to solve(). + controls: Sequence[_GeneralVarData] + A sequence (e.g., list) defining the set of control variables (:math:`z`). + valid_var_bounds: MutableMapping[_GeneralVarData, Tuple[float, float]] + A mapping (e.g., ComponentMap) defining bounds for all variables (:math:`x` and :math:`z`) that + should be valid for any :math:`\theta` between :math:`\underline{\theta}` and + :math:`\overline{\theta}`. These are only used to make the resulting flexibility test problem + more computationally tractable. All variable bounds in the model `m` are treated as performance + constraints and relaxed (:math:`g_{j}(x, z, \theta) \leq u`). The bounds in `valid_var_bounds` + are applied to the single-level problem generated from the active constraint method or one of + the decision rules. This argument is not necessary for vertex enumeration or sampling. + config: Optional[FlexTestConfig] + An object defining options for how the flexibility test should be solved. + """ + if config is None: + self.config: FlexTestConfig = FlexTestConfig() + else: + self.config: FlexTestConfig = config() + MarkImmutable(self.config.get("method")) + if self.config.method == FlexTestMethod.vertex_enumeration: + self.config.sampling_config.strategy = SamplingStrategy.grid + self.config.sampling_config.num_points = 2 + MarkImmutable(self.config.sampling_config.get("strategy")) + MarkImmutable(self.config.sampling_config.get("num_points")) + + self._original_model = m + self._model = m.clone() + m = self._model + self._uncertain_params = [m.find_component(i) for i in uncertain_params] + self._param_nominal_values = pe.ComponentMap( + (p, param_nominal_values[orig_p]) + for orig_p, p in zip(uncertain_params, self._uncertain_params) + ) + self._max_param_bounds = pe.ComponentMap( + (p, max_param_bounds[orig_p]) + for orig_p, p in zip(uncertain_params, self._uncertain_params) + ) + self._controls = [m.find_component(i) for i in controls] + self._valid_var_bounds = pe.ComponentMap( + (m.find_component(v), bnds) for v, bnds in valid_var_bounds.items() + ) + + self._orig_param_clone_param_map = pe.ComponentMap( + (i, j) for i, j in zip(uncertain_params, self._uncertain_params) + ) + self._clone_param_orig_param_map = pe.ComponentMap( + (i, j) for i, j in zip(self._uncertain_params, uncertain_params) + ) + + assert self.config.method in FlexTestMethod + if self.config.method == FlexTestMethod.active_constraint: + self._build_active_constraint_model() + elif self.config.method == FlexTestMethod.linear_decision_rule: + self._build_flextest_with_dr() + elif self.config.method == FlexTestMethod.relu_decision_rule: + self._build_flextest_with_dr() + elif self.config.method in { + FlexTestMethod.sampling, + FlexTestMethod.vertex_enumeration, + }: + self._build_sampling() + else: + raise ValueError(f"Unrecognized method: {self.config.method}") + + def _set_param_bounds( + self, param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]] + ): + for orig_p, clone_p in self._orig_param_clone_param_map.items(): + p_lb, p_ub = param_bounds[orig_p] + ndx = _VarIndex(clone_p, None) + p_var = self._model.unc_param_vars[ndx] + p_var.setlb(p_lb) + p_var.setub(p_ub) + + def _build_active_constraint_model(self): + build_active_constraint_flextest( + m=self._model, + uncertain_params=self._uncertain_params, + param_nominal_values=self._param_nominal_values, + param_bounds=self._max_param_bounds, + valid_var_bounds=self._valid_var_bounds, + ) + + def _build_flextest_with_dr(self): + build_flextest_with_dr( + m=self._model, + uncertain_params=self._uncertain_params, + param_nominal_values=self._param_nominal_values, + param_bounds=self._max_param_bounds, + controls=self._controls, + valid_var_bounds=self._valid_var_bounds, + config=self.config, + ) + + def _build_sampling(self): + _replace_uncertain_params( + m=self._model, + uncertain_params=self._uncertain_params, + param_nominal_values=self._param_nominal_values, + param_bounds=self._max_param_bounds, + ) + _build_inner_problem( + m=self._model, + enforce_equalities=True, + unique_constraint_violations=False, + valid_var_bounds=None, + ) + + def _solve_maximization( + self, param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]] + ) -> FlexTestResults: + self._set_param_bounds(param_bounds=param_bounds) + + opt = self.config.minlp_solver + res = opt.solve(self._model) + pe.assert_optimal_termination(res) + + results = FlexTestResults() + results.max_constraint_violation = self._model.max_constraint_violation.value + if results.max_constraint_violation > self.config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, v in self._model.unc_param_vars.items(): + results.unc_param_values_at_max_violation[ + self._clone_param_orig_param_map[key.var] + ] = v.value + return results + + def _solve_sampling( + self, param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]] + ) -> FlexTestResults: + self._set_param_bounds(param_bounds=param_bounds) + tmp = _perform_sampling( + m=self._model, + uncertain_params=self._uncertain_params, + controls=self._controls, + config=self.config.sampling_config, + ) + sample_points, max_violation_values, _ = tmp + sample_points = pe.ComponentMap( + (self._clone_param_orig_param_map[p], vals) + for p, vals in sample_points.items() + ) + + results = FlexTestResults() + max_viol_ndx = int(np.argmax(max_violation_values)) + results.max_constraint_violation = max_violation_values[max_viol_ndx] + if results.max_constraint_violation > self.config.feasibility_tol: + results.termination = FlexTestTermination.found_infeasible_point + else: + results.termination = FlexTestTermination.proven_feasible + results.unc_param_values_at_max_violation = pe.ComponentMap() + for key, vals in sample_points.items(): + results.unc_param_values_at_max_violation[key] = vals[max_viol_ndx] + return results + + def solve( + self, param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float]] + ) -> FlexTestResults: + r""" + Solve the flexibility test problem for the specified uncertain parameter bounds. + + Parameters + ---------- + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + bounds (:math:`\underline{\theta}`, :math:`\overline{\theta}`). + """ + if self.config.method in { + FlexTestMethod.active_constraint, + FlexTestMethod.linear_decision_rule, + FlexTestMethod.relu_decision_rule, + }: + return self._solve_maximization(param_bounds) + elif self.config.method in { + FlexTestMethod.sampling, + FlexTestMethod.vertex_enumeration, + }: + return self._solve_sampling(param_bounds=param_bounds) + else: + raise ValueError(f"Unrecognized method: {self.config.method}") diff --git a/idaes/apps/flexibility_analysis/indices.py b/idaes/apps/flexibility_analysis/indices.py new file mode 100644 index 0000000000..0e057c0b4e --- /dev/null +++ b/idaes/apps/flexibility_analysis/indices.py @@ -0,0 +1,82 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains some utility classes for creating meaningful indices +when formulating the flexibility test problem. +""" + + +class _ConIndex(object): + def __init__(self, con, bound): + self._con = con + self._bound = bound + + @property + def con(self): + # pylint: disable=missing-function-docstring + return self._con + + @property + def bound(self): + # pylint: disable=missing-function-docstring + return self._bound + + def __repr__(self): + if self.bound is None: + return str(self.con) + else: + return str((str(self.con), str(self.bound))) + + def __str__(self): + return repr(self) + + def __eq__(self, other): + if isinstance(other, _ConIndex): + return self.con is other.con and self.bound is other.bound + return False + + def __hash__(self): + return hash((self.con, self.bound)) + + +class _VarIndex(object): + def __init__(self, var, bound): + self._var = var + self._bound = bound + + @property + def var(self): + # pylint: disable=missing-function-docstring + return self._var + + @property + def bound(self): + # pylint: disable=missing-function-docstring + return self._bound + + def __repr__(self): + if self.bound is None: + return str(self.var) + else: + return str((str(self.var), str(self.bound))) + + def __str__(self): + return repr(self) + + def __eq__(self, other): + if isinstance(other, _VarIndex): + return self.var is other.var and self.bound is other.bound + return False + + def __hash__(self): + return hash((id(self.var), self.bound)) diff --git a/idaes/apps/flexibility_analysis/inner_problem.py b/idaes/apps/flexibility_analysis/inner_problem.py new file mode 100644 index 0000000000..d64fb9468c --- /dev/null +++ b/idaes/apps/flexibility_analysis/inner_problem.py @@ -0,0 +1,389 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains functions for formulating the inner problem of the +flexibility test problem +""" +from typing import MutableMapping, Tuple, Optional, Mapping, Union +from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +import pyomo.environ as pe +from pyomo.core.base.block import _BlockData +from pyomo.core.base.var import _GeneralVarData, ScalarVar +from pyomo.core.expr.numeric_expr import ExpressionBase +from pyomo.contrib.solver.util import get_objective +from .var_utils import ( + BoundsManager, + _apply_var_bounds, + get_used_unfixed_variables, + _remove_var_bounds, +) +from .indices import _ConIndex, _VarIndex + + +def _get_g_bounds( + m: _BlockData, original_vars, valid_var_bounds: Mapping +) -> MutableMapping[Union[_ConIndex, _VarIndex], Tuple[float, float]]: + + key_list = list() + g_list = list() + + for c in list( + m.component_data_objects(pe.Constraint, descend_into=True, active=True) + ): + if c.lower is not None: + key = _ConIndex(c, "lb") + g = c.lower - c.body + key_list.append(key) + g_list.append(g) + if c.upper is not None: + key = _ConIndex(c, "ub") + g = c.body - c.upper + key_list.append(key) + g_list.append(g) + + for v in original_vars: + if v.is_integer(): + raise ValueError("Original problem must be continuous") + if v.lb is not None: + key = _VarIndex(v, "lb") + g = v.lb - v + key_list.append(key) + g_list.append(g) + if v.ub is not None: + key = _VarIndex(v, "ub") + g = v - v.ub + key_list.append(key) + g_list.append(g) + + bounds_manager = BoundsManager(m) + bounds_manager.save_bounds() + + _remove_var_bounds(m) + _apply_var_bounds(valid_var_bounds) + + g_bounds = dict() + for key, g in zip(key_list, g_list): + g_bounds[key] = compute_bounds_on_expr(g) + + bounds_manager.pop_bounds() + return g_bounds + + +def _add_total_violation_disjunctions( + m: _BlockData, g: ExpressionBase, key, g_bounds: MutableMapping +): + m.zero_violation_cons[key] = ( + None, + ( + m.constraint_violation[key] + - (1 - m.zero_violation[key]) * m.violation_disjunction_BigM[key] + ), + 0, + ) + m.nonzero_violation_cons[key] = ( + None, + ( + m.constraint_violation[key] + - g + - (1 - m.nonzero_violation[key]) * m.violation_disjunction_BigM[key] + ), + 0, + ) + m.violation_disjunction_cons[key] = ( + m.zero_violation[key] + m.nonzero_violation[key], + 1, + ) + lb, ub = g_bounds[key] + m.violation_disjunction_BigM[key].value = max(abs(lb), abs(ub)) + + +def _process_constraint( + m: _BlockData, + g: ExpressionBase, + key: Union[_ConIndex, _VarIndex], + unique_constraint_violations: bool, + total_violation: bool, + total_violation_disjunctions: bool, + g_bounds: MutableMapping, +): + m.ineq_violation_set.add(key) + if total_violation: + m.constraint_violation[key].setlb(0) + if total_violation_disjunctions: + _add_total_violation_disjunctions(m, g, key, g_bounds) + else: + m.ineq_violation_cons[key] = ( + None, + g - m.constraint_violation[key], + 0, + ) + elif unique_constraint_violations: + m.ineq_violation_cons[key] = ( + g - m.constraint_violation[key], + 0, + ) + else: + m.ineq_violation_cons[key] = ( + None, + g - m.max_constraint_violation, + 0, + ) + + +def _build_inner_problem( + m: _BlockData, + enforce_equalities: bool, + unique_constraint_violations: bool, + valid_var_bounds: Optional[MutableMapping[_GeneralVarData, Tuple[float, float]]], + total_violation: bool = False, + total_violation_disjunctions: bool = False, +): + """ + If enfoce equalities is True and unique_constraint_violations is False, then this + function converts + + min f(x) + s.t. + c(x) = 0 + g(x) <= 0 + + to + + min u + s.t. + c(x) = 0 + g(x) <= u + + If enfoce equalities is False and unique_constraint_violations is False, then this + function converts + + min f(x) + s.t. + c(x) = 0 + g(x) <= 0 + + to + + min u + s.t. + c(x) <= u + -c(x) <= u + g(x) <= u + + If enfoce equalities is True and unique_constraint_violations is True, then this + function converts + + min f(x) + s.t. + c(x) = 0 + g(x) <= 0 + + to + + min u + s.t. + c(x) = 0 + g_i(x) == u_i + u = sum(u_i * y_i) + sum(y_i) = 1 + Of course, the nonlinear constraint u = sum(u_i * y_i) gets reformulated. + + If total_violation is True, then unique_constraint_violations is ignored and + this function converts + + min f(x) + s.t. + c(x) = 0 + g(x) <= 0 + + to (if enforce_equalities is True) + + min sum_{i} u_i + s.t. + c(x) = 0 + g_i(x) <= u_i + u_i >= 0 + + or to (if enforce_equalities is False) + + min sum_{i} u_i + sum_{j} u_j + sum_{k} u_k + s.t. + c_j(x) <= u_j + -c_k(x) <= u_k + g_i(x) <= u_i + u_i >= 0 + u_j >= 0 + u_k >= 0 + + or to (if total_violation_disjunctions is True) + + max sum_{i} u_i + s.t. + c(x) = 0 + u_i >= 0 + [u_i <= 0] v [u_i <= g_i(x)] + + This function will also modify valid_var_bounds to include any new variables + """ + if total_violation_disjunctions: + assert total_violation + + obj = get_objective(m) + if obj is not None: + obj.deactivate() + + for v in m.unc_param_vars.values(): + v.fix() + original_vars = list(get_used_unfixed_variables(m)) + for v in m.unc_param_vars.values(): + v.unfix() + + if valid_var_bounds is None: + g_bounds = dict() + else: + g_bounds = _get_g_bounds(m, original_vars, valid_var_bounds) + + m.ineq_violation_set = pe.Set() + if not total_violation_disjunctions: + m.ineq_violation_cons = pe.Constraint(m.ineq_violation_set) + + if not total_violation: + m.max_constraint_violation = ScalarVar() + + if unique_constraint_violations or total_violation: + m.constraint_violation = pe.Var(m.ineq_violation_set) + + if total_violation_disjunctions: + m.zero_violation = pe.Var(m.ineq_violation_set, domain=pe.Binary) + m.nonzero_violation = pe.Var(m.ineq_violation_set, domain=pe.Binary) + m.zero_violation_cons = pe.Constraint(m.ineq_violation_set) + m.nonzero_violation_cons = pe.Constraint(m.ineq_violation_set) + m.violation_disjunction_cons = pe.Constraint(m.ineq_violation_set) + m.violation_disjunction_BigM = pe.Param(m.ineq_violation_set, mutable=True) + + for c in list( + m.component_data_objects(pe.Constraint, descend_into=True, active=True) + ): + if c.equality and enforce_equalities: + continue + if c.lower is not None: + key = _ConIndex(c, "lb") + g = c.lower - c.body + _process_constraint( + m, + g, + key, + unique_constraint_violations, + total_violation, + total_violation_disjunctions, + g_bounds, + ) + if c.upper is not None: + key = _ConIndex(c, "ub") + g = c.body - c.upper + _process_constraint( + m, + g, + key, + unique_constraint_violations, + total_violation, + total_violation_disjunctions, + g_bounds, + ) + + for v in original_vars: + if v.is_integer(): + raise ValueError("Original problem must be continuous") + if v.lb is not None: + key = _VarIndex(v, "lb") + g = v.lb - v + _process_constraint( + m, + g, + key, + unique_constraint_violations, + total_violation, + total_violation_disjunctions, + g_bounds, + ) + if v.ub is not None: + key = _VarIndex(v, "ub") + g = v - v.ub + _process_constraint( + m, + g, + key, + unique_constraint_violations, + total_violation, + total_violation_disjunctions, + g_bounds, + ) + + if total_violation: + m.total_constraint_violation_obj = pe.Objective( + expr=sum(m.constraint_violation.values()) + ) + else: + m.min_constraint_violation_obj = pe.Objective(expr=m.max_constraint_violation) + + for key in m.ineq_violation_set: + if isinstance(key, _ConIndex): + key.con.deactivate() + else: + key.var.setlb(None) + key.var.setub(None) + key.var.domain = pe.Reals + + if ( + total_violation or unique_constraint_violations + ) and valid_var_bounds is not None: + for key in m.ineq_violation_set: + lb, ub = g_bounds[key] + v = m.constraint_violation[key] + valid_var_bounds[v] = (min(lb, 0), max(ub, 0)) + + if unique_constraint_violations and not total_violation: + # max_constraint_violation = sum(constraint_violation[i] * y[i]) + # sum(y[i]) == 1 + # reformulate as + # max_constraint_violation = sum(u_hat[i]) + # u_hat[i] = constraint_violation[i] * y[i] + # and use mccormick for the last constraint + + m.max_violation_selector = pe.Var( + m.ineq_violation_set, domain=pe.Binary + ) # y[i] + m.one_max_violation = pe.Constraint( + expr=sum(m.max_violation_selector.values()) == 1 + ) + m.u_hat = pe.Var(m.ineq_violation_set) + m.max_violation_sum = pe.Constraint( + expr=m.max_constraint_violation == sum(m.u_hat.values()) + ) + m.u_hat_cons = pe.ConstraintList() + for key in m.ineq_violation_set: + violation_var = m.constraint_violation[key] + viol_lb, viol_ub = g_bounds[key] + valid_var_bounds[m.u_hat[key]] = (min(viol_lb, 0), max(viol_ub, 0)) + valid_var_bounds[m.max_violation_selector[key]] = (0, 1) + y_i = m.max_violation_selector[key] + m.u_hat_cons.add(m.u_hat[key] <= viol_ub * y_i) + m.u_hat_cons.add(m.u_hat[key] >= viol_lb * y_i) + m.u_hat_cons.add(m.u_hat[key] <= violation_var + viol_lb * y_i - viol_lb) + m.u_hat_cons.add(m.u_hat[key] >= viol_ub * y_i + violation_var - viol_ub) + + if valid_var_bounds is not None and not total_violation: + valid_var_bounds[m.max_constraint_violation] = ( + min(i[0] for i in g_bounds.values()), + max(i[1] for i in g_bounds.values()), + ) diff --git a/idaes/apps/flexibility_analysis/kkt.py b/idaes/apps/flexibility_analysis/kkt.py new file mode 100644 index 0000000000..8c51a4da8f --- /dev/null +++ b/idaes/apps/flexibility_analysis/kkt.py @@ -0,0 +1,222 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module contains functions for generating the KKT system for the +inner problem of the flexibility/feasibility test problem. +""" +from typing import Sequence, Mapping +import pyomo.environ as pe +from pyomo.core.expr.calculus.diff_with_pyomo import reverse_sd +from pyomo.core.base.block import _BlockData +from pyomo.contrib.fbbt.fbbt import fbbt +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.expr.sympy_tools import sympyify_expression, sympy2pyomo_expression +from pyomo.contrib.solver.util import get_objective +from .indices import _VarIndex, _ConIndex +from .var_utils import ( + get_used_unfixed_variables, + _apply_var_bounds, +) + + +def _simplify(expr): + cm, sympy_expr = sympyify_expression(expr) + sympy_expr = sympy_expr.simplify() + pyomo_expr = sympy2pyomo_expression(sympy_expr, cm) + return pyomo_expr + + +def _add_grad_lag_constraints(m: _BlockData) -> _BlockData: + primal_vars = get_used_unfixed_variables(m) + + m.duals_eq_set = pe.Set() + m.duals_eq = pe.Var(m.duals_eq_set) + + m.duals_ineq_set = pe.Set() + m.duals_ineq = pe.Var(m.duals_ineq_set, bounds=(0, None)) + + obj = get_objective(m) + assert obj.sense == pe.minimize + if obj is None: + lagrangian = 0 + else: + lagrangian = obj.expr + + for c in m.component_data_objects(pe.Constraint, active=True, descend_into=True): + if c.equality: + key = _ConIndex(c, "eq") + m.duals_eq_set.add(key) + lagrangian += m.duals_eq[key] * (c.body - c.upper) + else: + if c.upper is not None: + key = _ConIndex(c, "ub") + m.duals_ineq_set.add(key) + lagrangian += m.duals_ineq[key] * (c.body - c.upper) + if c.lower is not None: + key = _ConIndex(c, "lb") + m.duals_ineq_set.add(key) + lagrangian += m.duals_ineq[key] * (c.lower - c.body) + + for v in primal_vars: + assert v.is_continuous() + if v.ub is not None: + key = _VarIndex(v, "ub") + m.duals_ineq_set.add(key) + lagrangian += m.duals_ineq[key] * (v - v.ub) + if v.lb is not None: + key = _VarIndex(v, "lb") + m.duals_ineq_set.add(key) + lagrangian += m.duals_ineq[key] * (v.lb - v) + + grad_lag = reverse_sd(lagrangian) + + m.grad_lag_set = pe.Set() + m.grad_lag = pe.Constraint(m.grad_lag_set) + for v in primal_vars: + if v in grad_lag and ( + type(grad_lag[v]) not in {float, int} or grad_lag[v] != 0 + ): + key = _VarIndex(v, None) + m.grad_lag_set.add(key) + m.grad_lag[key] = _simplify(grad_lag[v]) == 0 + + return m + + +def _introduce_inequality_slacks(m) -> _BlockData: + m.slacks = pe.Var(m.duals_ineq_set, bounds=(0, None)) + m.ineq_cons_with_slacks = pe.Constraint(m.duals_ineq_set) + + for key in m.duals_ineq_set: + s = m.slacks[key] + bnd = key.bound + + if isinstance(key, _ConIndex): + e = key.con.body + lb = key.con.lower + ub = key.con.upper + else: + assert isinstance(key, _VarIndex) + e = key.var + lb = e.lb + ub = e.ub + + if bnd == "ub": + m.ineq_cons_with_slacks[key] = s + e - ub == 0 + else: + assert bnd == "lb" + m.ineq_cons_with_slacks[key] = s + lb - e == 0 + + for key in m.duals_ineq_set: + if isinstance(key, _ConIndex): + key.con.deactivate() + else: + key.var.setlb(None) + key.var.setub(None) + key.var.domain = pe.Reals + + return m + + +def _do_fbbt(m, uncertain_params): + p_bounds = pe.ComponentMap() + for p in uncertain_params: + p.unfix() + p_bounds[p] = (p.lb, p.ub) + fbbt(m) + for p in uncertain_params: + if p.lb > p_bounds[p][0] + 1e-6 or p.ub < p_bounds[p][1] - 1e-6: + raise RuntimeError( + "The bounds provided in valid_var_bounds were proven to " + "be invalid for some values of the uncertain parameters." + ) + p.fix() + + +def add_kkt_with_milp_complementarity_conditions( + m: _BlockData, + uncertain_params: Sequence[_GeneralVarData], + valid_var_bounds: Mapping[_GeneralVarData, Sequence[float]], + default_M=None, +) -> _BlockData: + """ + Generate the KKT conditions for the model m using a MIP + representation of the complementarity conditions. + + Parameters + ---------- + m: _BlockData + The model for which to build the KKT conditions + uncertain_params: Sequence[_GeneralVarData] + These variables are decisions for the outer problem + and should be considered parameters when generating + the KKT conditions + valid_var_bounds: Mapping[_GeneralVarData, Sequence[float]] + Variable bounds that are valid for any values of the + uncertain parameters. This is only used to make the + resulting problem more tractable. + default_M: Optional[float] + M value to use for the Big-M representation of the + complementarity conditions + """ + for v in uncertain_params: + v.fix() + + _add_grad_lag_constraints(m) + obj = get_objective(m) + obj.deactivate() + + _apply_var_bounds(valid_var_bounds) + _do_fbbt(m, uncertain_params) + + _introduce_inequality_slacks(m) + + _do_fbbt(m, uncertain_params) + + m.active_indicator = pe.Var(m.duals_ineq_set, domain=pe.Binary) + m.dual_ineq_0_if_not_active = pe.Constraint(m.duals_ineq_set) + m.slack_0_if_active = pe.Constraint(m.duals_ineq_set) + m.dual_M = pe.Param(m.duals_ineq_set, mutable=True) + m.slack_M = pe.Param(m.duals_ineq_set, mutable=True) + for key in m.duals_ineq_set: + if m.duals_ineq[key].ub is None: + if default_M is None: + raise RuntimeError( + f"could not compute upper bound on multiplier for inequality {key}." + ) + else: + dual_M = default_M + else: + dual_M = m.duals_ineq[key].ub + if m.slacks[key].ub is None: + if default_M is None: + raise RuntimeError( + f"could not compute upper bound on slack for inequality {key}" + ) + else: + slack_M = default_M + else: + slack_M = m.slacks[key].ub + m.dual_M[key].value = dual_M + m.slack_M[key].value = slack_M + m.dual_ineq_0_if_not_active[key] = ( + m.duals_ineq[key] <= m.active_indicator[key] * m.dual_M[key] + ) + m.slack_0_if_active[key] = ( + m.slacks[key] <= (1 - m.active_indicator[key]) * m.slack_M[key] + ) + + for v in uncertain_params: + v.unfix() + + return m diff --git a/idaes/apps/flexibility_analysis/sampling.py b/idaes/apps/flexibility_analysis/sampling.py new file mode 100644 index 0000000000..f3f82daae3 --- /dev/null +++ b/idaes/apps/flexibility_analysis/sampling.py @@ -0,0 +1,528 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module provides functions for solving the inner problem of the flexibility +test at different samples of the uncertain parameters. +""" +from __future__ import annotations +import enum +from typing import Sequence, Union, Mapping, Optional, MutableMapping, Tuple, List +import numpy as np +from pyomo.core.base.block import _BlockData +import pyomo.environ as pe +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.param import _ParamData +from pyomo.common.config import ConfigDict, ConfigValue, InEnum +from pyomo.contrib.solver.util import get_objective +from pyomo.common.errors import ApplicationError +from idaes.core.surrogate.pysmo.sampling import LatinHypercubeSampling +from .uncertain_params import _replace_uncertain_params +from .inner_problem import _build_inner_problem +from .indices import _VarIndex +from .check_optimal import assert_optimal_termination + +try: + from tqdm import tqdm +except ImportError: + + def tqdm(items, ncols, desc, disable): # pylint: disable=missing-function-docstring + return items + + +class SamplingStrategy(enum.Enum): + """ + An enum for specifying the sampling method to use. + """ + + grid = "grid" + lhs = "lhs" + + +SamplingStrategy.grid.__doc__ = r"Use evenly spaced samples" +SamplingStrategy.lhs.__doc__ = r"Use latin hypercube sampling" + + +class SamplingInitStrategy(enum.Enum): + """ + An enum for specifying how to initialize the inner problem + of the flexibility test for each sample of the uncertain + parameters + """ + + none = "none" + square = "square" + min_control_deviation = "min_control_deviation" + all = "all" + + +SamplingInitStrategy.none.__doc__ = ( + r"Use the solution from the previous sample to initialize the inner problem" +) +SamplingInitStrategy.square.__doc__ = ( + r"Fix the controls and solve a square problem to initialize the inner problem" +) +SamplingInitStrategy.min_control_deviation.__doc__ = r"Fix the maximum constraint violation to 0 and minimized the square of the differences between the controls and their current values" +SamplingInitStrategy.all.__doc__ = r"Try both square and min_control_deviation" + + +class _GridSamplingState(enum.Enum): + increment = "increment" + decrement = "decrement" + + +class _ParamIterator(object): + def __init__( + self, + param: _GeneralVarData, + num_points: int, + next_param: Optional[_ParamIterator], + ): + self.state = _GridSamplingState.increment + self.ndx = 0 + self.pts = list( + set([float(i) for i in np.linspace(param.lb, param.ub, num_points)]) + ) + self.pts.sort() + self.next_param = next_param + + def reset(self): + # pylint: disable=missing-function-docstring + self.state = _GridSamplingState.increment + self.ndx = 0 + + def get_value(self): + # pylint: disable=missing-function-docstring + res = self.pts[self.ndx] + return res + + def swap_state(self): + # pylint: disable=missing-function-docstring + if self.state == _GridSamplingState.increment: + self.state = _GridSamplingState.decrement + else: + assert self.state == _GridSamplingState.decrement + self.state = _GridSamplingState.increment + + def step(self) -> bool: + # pylint: disable=missing-function-docstring + if self.state == _GridSamplingState.increment: + if self.ndx == len(self.pts) - 1: + if self.next_param is None: + done = True + else: + done = self.next_param.step() + self.swap_state() + else: + self.ndx += 1 + done = False + else: + assert self.state == _GridSamplingState.decrement + if self.ndx == 0: + assert self.next_param is not None + done = self.next_param.step() + self.swap_state() + else: + self.ndx -= 1 + done = False + return done + + +class _GridSamplingIterator(object): + def __init__(self, uncertain_params: Sequence[_GeneralVarData], num_points: int): + self.params = list(uncertain_params) + self.param_iterators: List[Optional[_ParamIterator]] = [None] * len(self.params) + self.param_iterators[-1] = _ParamIterator( + param=self.params[-1], num_points=num_points, next_param=None + ) + for ndx in reversed(range(len(self.params) - 1)): + self.param_iterators[ndx] = _ParamIterator( + param=self.params[ndx], + num_points=num_points, + next_param=self.param_iterators[ndx + 1], + ) + self.done = False + + def __next__(self): + if self.done: + raise StopIteration + + res = [i.get_value() for i in self.param_iterators] + self.done = self.param_iterators[0].step() + + return res + + def __iter__(self): + for i in self.param_iterators: + i.reset() + self.done = False + return self + + +def _grid_sampling( + uncertain_params: Sequence[_GeneralVarData], num_points: int, seed: int +): + it = _GridSamplingIterator(uncertain_params=uncertain_params, num_points=num_points) + + sample_points = pe.ComponentMap() + for p in uncertain_params: + sample_points[p] = list() + + n_samples = 0 + for sample in it: + for p, v in zip(uncertain_params, sample): + sample_points[p].append(float(v)) + n_samples += 1 + + return n_samples, sample_points + + +def _lhs_sampling( + uncertain_params: Sequence[_GeneralVarData], num_points: int, seed: int +): + lb_list = list() + ub_list = list() + for p in uncertain_params: + lb_list.append(p.lb) + ub_list.append(p.ub) + sampler = LatinHypercubeSampling( + [lb_list, ub_list], number_of_samples=num_points, sampling_type="creation" + ) + + np.random.seed(seed) + sample_array = sampler.sample_points() + + sample_points = pe.ComponentMap() + for ndx, p in enumerate(uncertain_params): + sample_points[p] = [float(i) for i in sample_array[:, ndx]] + + return num_points, sample_points + + +_sample_strategy_map = dict() +_sample_strategy_map[SamplingStrategy.grid] = _grid_sampling +_sample_strategy_map[SamplingStrategy.lhs] = _lhs_sampling + + +class SamplingConfig(ConfigDict): + r""" + A class for specifying options for sampling the uncertain parameter values + and solving the inner problem of the flexibility test. + + Attributes + ---------- + strategy: SamplingStrategy + The method for sampling the uncertain parameters. (default: SamplingStrategy.lhs) + lhs_seed: int + The seed used for latin hypercube sampling (default: 0) + solver: Union[Solver, OptSolver] + The solver to use for the inner problem of the flexibility test problem + num_points: int + The number of samples of uncertain parameter values to use (default: 100) + enable_progress_bar: bool + If False, no progress bar will be shown (default: True) + initialization_strategy: SamplingInitStrategy + The initialization strategy to use for the inner problems of the + flexibility test at each sample of the uncertain parameter values. + (default: SamplingInitStrategy.none) + total_violation: bool + If True, the objective of the flexibility test will be the sum of + the constraint violations instead of the maximum violation. (default: False) + """ + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super().__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.strategy: SamplingStrategy = self.declare( + "strategy", + ConfigValue(domain=InEnum(SamplingStrategy), default=SamplingStrategy.lhs), + ) + self.lhs_seed: int = self.declare( + "lhs_seed", ConfigValue(domain=int, default=0) + ) + self.solver = self.declare( + "solver", ConfigValue(default=pe.SolverFactory("appsi_ipopt")) + ) + self.num_points: int = self.declare( + "num_points", ConfigValue(domain=int, default=100) + ) + self.enable_progress_bar: bool = self.declare( + "enable_progress_bar", ConfigValue(domain=bool, default=True) + ) + self.initialization_strategy: SamplingInitStrategy = self.declare( + "initialization_strategy", + ConfigValue( + domain=InEnum(SamplingInitStrategy), default=SamplingInitStrategy.none + ), + ) + self.total_violation: bool = self.declare( + "total_violation", ConfigValue(domain=bool, default=False) + ) + + +def _deactivate_inequalities(m: _BlockData): + deactivated_cons = list() + for c in m.component_data_objects(pe.Constraint, descend_into=True, active=True): + if not c.equality and c.lb != c.ub: + deactivated_cons.append(c) + c.deactivate() + return deactivated_cons + + +def _init_with_square_problem(m: _BlockData, controls, solver): + for v in controls: + if v.value is None: + raise RuntimeError( + "Cannot initialize sampling problem with square problem because the " + "control values are not initialized." + ) + v.fix() + if m.max_constraint_violation.value is None: + m.max_constraint_violation.value = 0 + m.max_constraint_violation.fix() + deactivated_cons = _deactivate_inequalities(m) + try: + res = solver.solve(m, tee=False, load_solutions=False) + except ApplicationError: + res = None + for c in deactivated_cons: + c.activate() + for v in controls: + v.unfix() + if res is not None and pe.check_optimal_termination(res): + m.solutions.load_from(res) + max_viol = 0 + for c in deactivated_cons: + assert c.lb is None + assert c.ub == 0 + body_val = pe.value(c.body) + if body_val > max_viol: + max_viol = body_val + m.max_constraint_violation.value = max_viol + else: + max_viol = None + m.max_constraint_violation.unfix() + return max_viol + + +def _solve_with_max_viol_fixed(m: _BlockData, controls, solver): + orig_obj = get_objective(m) + orig_obj.deactivate() + orig_max_viol_value = m.max_constraint_violation.value + m.max_constraint_violation.fix(0) + + obj_expr = 0 + for v in controls: + if v.value is None: + obj_expr += v**2 + else: + obj_expr += (v - v.value) ** 2 + m.control_setpoints_obj = pe.Objective(expr=obj_expr) + + try: + res = solver.solve(m, tee=False, load_solutions=False) + if pe.check_optimal_termination(res): + m.solutions.load_from(res) + feasible = True + control_vals = [v.value for v in controls] + else: + feasible = False + control_vals = None + m.max_constraint_violation.value = orig_max_viol_value + except ApplicationError: + feasible = False + control_vals = None + m.max_constraint_violation.value = orig_max_viol_value + + del m.control_setpoints_obj + m.max_constraint_violation.unfix() + orig_obj.activate() + + return feasible, control_vals + + +def _perform_sampling( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + controls: Optional[Sequence[_GeneralVarData]], + config: SamplingConfig, +) -> Tuple[ + MutableMapping[Union[_GeneralVarData, _ParamData], Sequence[float]], + Sequence[float], + MutableMapping[_GeneralVarData, Sequence[float]], +]: + unc_param_vars = list() + for p in uncertain_params: + ndx = _VarIndex(p, None) + p_var = m.unc_param_vars[ndx] + unc_param_vars.append(p_var) + n_samples, sample_points = _sample_strategy_map[config.strategy]( + unc_param_vars, config.num_points, config.lhs_seed + ) + + obj_values = list() + obj = get_objective(m) + + control_values = pe.ComponentMap() + for v in controls: + control_values[v] = list() + + if not config.total_violation: + m.max_constraint_violation.value = 0 + orig_max_constraint_violation_ub = m.max_constraint_violation.ub + + for sample_ndx in tqdm( + list(range(n_samples)), + ncols=100, + desc="Sampling", + disable=not config.enable_progress_bar, + ): + for p, p_vals in sample_points.items(): + p.fix(p_vals[sample_ndx]) + + if not config.total_violation and config.initialization_strategy in { + SamplingInitStrategy.square, + SamplingInitStrategy.all, + }: + max_viol_ub = _init_with_square_problem(m, controls, config.solver) + if max_viol_ub is not None: + m.max_constraint_violation.setub(max_viol_ub) + if not config.total_violation and config.initialization_strategy in { + SamplingInitStrategy.min_control_deviation, + SamplingInitStrategy.all, + }: + feasible, control_vals = _solve_with_max_viol_fixed( + m, controls, config.solver + ) + else: + feasible = False + control_vals = None + if feasible: + obj_values.append(0) + for v, val in zip(controls, control_vals): + control_values[v].append(val) + else: + res = config.solver.solve(m) + assert_optimal_termination(res) + obj_values.append(pe.value(obj.expr)) + + for v in controls: + control_values[v].append(v.value) + + if not config.total_violation: + m.max_constraint_violation.setub(orig_max_constraint_violation_ub) + + unc_param_var_to_unc_param_map = pe.ComponentMap( + zip(unc_param_vars, uncertain_params) + ) + sample_points = pe.ComponentMap( + (unc_param_var_to_unc_param_map[p], vals) for p, vals in sample_points.items() + ) + + return sample_points, obj_values, control_values + + +def perform_sampling( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]], + controls: Optional[Sequence[_GeneralVarData]], + in_place: bool, + config: SamplingConfig, +) -> Tuple[ + MutableMapping[Union[_GeneralVarData, _ParamData], List[float]], + List[float], + MutableMapping[_GeneralVarData, List[float]], +]: + r""" + Sample values of the uncertain parameters and solve the inner problem + of the flexibility test for each sample. + + Parameters + ---------- + m: _BlockData + The pyomo model to be used for the feasibility/flexibility test. + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]] + A sequence (e.g., list) defining the set of uncertain parameters (:math:`\theta`). + These can be pyomo variables (Var) or parameters (param). However, if parameters are used, + they must be mutable. + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + nominal values (:math:`\theta^{N}`). + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Tuple[float, float]] + A mapping (e.g., ComponentMap) from the uncertain parameters (:math:`\theta`) to their + bounds (:math:`\underline{\theta}`, :math:`\overline{\theta}`). + controls: Optional[Sequence[_GeneralVarData]] + A sequence (e.g., list) defining the set of control variables (:math:`z`). + in_place: bool + If True, m is modified in place to generate the model for solving the flexibility test. If False, + the model is cloned first. + config: SamplingConfig + An object defining options for how the flexibility test should be solved. + + Returns + ------- + sample_points: MutableMapping[Union[_GeneralVarData, _ParamData], List[float]] + The sampled values of the uncertain parameters + obj_values: List[float] + The value of the maximum (or total) constraint violation for each sample + of the uncertain parameters + control_values: MutableMapping[_GeneralVarData, List[float]] + The optimal values of the controls for each sample of the uncertain parameters + """ + original_model = m + if not in_place: + m = m.clone() + uncertain_params = [m.find_component(p) for p in uncertain_params] + param_nominal_values = pe.ComponentMap( + (m.find_component(p), val) for p, val in param_nominal_values.items() + ) + param_bounds = pe.ComponentMap( + (m.find_component(p), bnds) for p, bnds in param_bounds.items() + ) + controls = [m.find_component(v) for v in controls] + + _replace_uncertain_params(m, uncertain_params, param_nominal_values, param_bounds) + _build_inner_problem( + m=m, + enforce_equalities=True, + unique_constraint_violations=False, + valid_var_bounds=None, + total_violation=config.total_violation, + total_violation_disjunctions=False, + ) + sample_points, obj_values, control_values = _perform_sampling( + m=m, uncertain_params=uncertain_params, controls=controls, config=config + ) + + sample_points = pe.ComponentMap( + (original_model.find_component(p), vals) for p, vals in sample_points.items() + ) + control_values = pe.ComponentMap( + (original_model.find_component(v), vals) for v, vals in control_values.items() + ) + + return sample_points, obj_values, control_values diff --git a/idaes/apps/flexibility_analysis/simplify.py b/idaes/apps/flexibility_analysis/simplify.py new file mode 100644 index 0000000000..ad1629a16e --- /dev/null +++ b/idaes/apps/flexibility_analysis/simplify.py @@ -0,0 +1,41 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module provides a function to simplify pyomo expressions with sympy +""" +from typing import Union +from pyomo.core.expr.numeric_expr import NumericExpression +from pyomo.core.expr.sympy_tools import sympyify_expression, sympy2pyomo_expression +from pyomo.core.expr import is_fixed, value + + +def simplify_expr(expr: NumericExpression) -> Union[float, NumericExpression]: + """ + Simplify a pyomo expression using sympy + + Parameters + ---------- + expr: NumericExpression + The pyomo expression to be simplified + + Returns + ------- + new_expr: Union[float, NumericExpression] + The simplified expression + """ + om, se = sympyify_expression(expr) + se = se.simplify() + new_expr = sympy2pyomo_expression(se, om) + if is_fixed(new_expr): + new_expr = value(new_expr) + return new_expr diff --git a/idaes/apps/flexibility_analysis/tests/__init__.py b/idaes/apps/flexibility_analysis/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/idaes/apps/flexibility_analysis/tests/test_flextest.py b/idaes/apps/flexibility_analysis/tests/test_flextest.py new file mode 100644 index 0000000000..7db8bf2304 --- /dev/null +++ b/idaes/apps/flexibility_analysis/tests/test_flextest.py @@ -0,0 +1,170 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import pyomo.environ as pe +from idaes.apps.flexibility_analysis.flextest import ( + build_active_constraint_flextest, + ActiveConstraintConfig, +) +import unittest +from idaes.apps.flexibility_analysis.indices import _VarIndex +from pyomo.contrib.fbbt import interval +import pytest +from idaes.core.util.testing import _enable_scip_solver_for_testing +from contextlib import contextmanager + + +@contextmanager +def scip_solver(): + solver = pe.SolverFactory("scip") + undo_changes = None + + if not solver.available(): + undo_changes = _enable_scip_solver_for_testing() + if not solver.available(): + pytest.skip(reason="SCIP solver not available") + yield solver + if undo_changes is not None: + undo_changes() + + +def create_poly_model(): + m = pe.ConcreteModel() + m.z = pe.Var(initialize=8, bounds=(-10, 15)) + m.theta = pe.Param(mutable=True) + + offset = 1.5 + + m.obj = pe.Objective(expr=m.z**2) + m.c1 = pe.Constraint( + expr=0.01 * (m.z - offset) ** 4 + - 0.05 * (m.z - offset) ** 3 + - (m.z - offset) ** 2 + - (m.z - offset) + - 10 + + m.theta + <= 0 + ) + m.c2 = pe.Constraint( + expr=(-0.02 * m.theta - 14) * m.z + (1.66 * m.theta - 100) <= 0 + ) + m.c3 = pe.Constraint( + expr=30 * m.z - 50 - 4 * m.theta + pe.exp(-0.2 * m.theta + 1) <= 0 + ) + + nominal_values = pe.ComponentMap() + nominal_values[m.theta] = 22.5 + param_bounds = pe.ComponentMap() + param_bounds[m.theta] = (-20, 65) + + return m, nominal_values, param_bounds + + +def create_hx_network_model(): + """ + The model used for this test comes from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. + """ + m = pe.ConcreteModel() + + m.uncertain_temps_set = pe.Set(initialize=[1, 3, 5, 8]) + m.uncertain_temps = pe.Param( + m.uncertain_temps_set, mutable=True, initialize={1: 620, 3: 388, 5: 583, 8: 313} + ) + nominal_values = pe.ComponentMap() + for p in m.uncertain_temps.values(): + nominal_values[p] = p.value + + param_bounds = pe.ComponentMap() + for p in m.uncertain_temps.values(): + param_bounds[p] = (p.value - 10.0, p.value + 10.0) + + m.variable_temps_set = pe.Set(initialize=[2, 4, 6, 7]) + m.variable_temps = pe.Var(m.variable_temps_set, bounds=(0, 1000)) + m.qc = pe.Var() + + m.balances = pe.Constraint([1, 2, 3, 4]) + m.balances[1] = 1.5 * (m.uncertain_temps[1] - m.variable_temps[2]) == 2 * ( + m.variable_temps[4] - m.uncertain_temps[3] + ) + m.balances[2] = m.uncertain_temps[5] - m.variable_temps[6] == 2 * ( + 563 - m.variable_temps[4] + ) + m.balances[3] = m.variable_temps[6] - m.variable_temps[7] == 3 * ( + 393 - m.uncertain_temps[8] + ) + m.balances[4] = m.qc == 1.5 * (m.variable_temps[2] - 350) + + m.temp_approaches = pe.Constraint([1, 2, 3, 4]) + m.temp_approaches[1] = m.variable_temps[2] >= m.uncertain_temps[3] + m.temp_approaches[2] = m.variable_temps[6] >= m.variable_temps[4] + m.temp_approaches[3] = m.variable_temps[7] >= m.uncertain_temps[8] + m.temp_approaches[4] = 393 <= m.variable_temps[6] + + m.performance = pe.Constraint(expr=m.variable_temps[7] <= 323) + + return m, nominal_values, param_bounds + + +@pytest.mark.component +class TestFlexTest(unittest.TestCase): + def test_poly(self): + m, nominal_values, param_bounds = create_poly_model() + var_bounds = pe.ComponentMap() + var_bounds[m.z] = ( + -10, + 10, + ) # originally used (-20, 20), but the scip version was sensitive to this + build_active_constraint_flextest( + m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + valid_var_bounds=var_bounds, + ) + with scip_solver() as opt: + res = opt.solve(m) + pe.assert_optimal_termination(res) + self.assertAlmostEqual(m.max_constraint_violation.value, 48.4649, 4) + self.assertAlmostEqual(m.z.value, -2.6513, 4) + ndx = _VarIndex(m.theta, None) + self.assertAlmostEqual(m.unc_param_vars[ndx].value, 65, 5) + + def test_hx_network(self): + expected = [4, 8.8] + enforce_eq = [False, True] + for exp, ee in zip(expected, enforce_eq): + m, nominal_values, param_bounds = create_hx_network_model() + var_bounds = pe.ComponentMap() + for v in m.variable_temps.values(): + var_bounds[v] = (100, 1000) + var_bounds[m.qc] = interval.mul( + 1.5, 1.5, *interval.sub(100, 1000, 350, 350) + ) + config = ActiveConstraintConfig() + config.enforce_equalities = ee + build_active_constraint_flextest( + m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + valid_var_bounds=var_bounds, + config=config, + ) + with scip_solver() as opt: + res = opt.solve(m, tee=False) + pe.assert_optimal_termination(res) + self.assertAlmostEqual(m.max_constraint_violation.value, exp, 4) diff --git a/idaes/apps/flexibility_analysis/tests/test_indices.py b/idaes/apps/flexibility_analysis/tests/test_indices.py new file mode 100644 index 0000000000..43deb5b3ae --- /dev/null +++ b/idaes/apps/flexibility_analysis/tests/test_indices.py @@ -0,0 +1,72 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import unittest +import pyomo.environ as pe +from idaes.apps.flexibility_analysis.indices import _VarIndex, _ConIndex +import pytest + + +@pytest.mark.unit +class TestIndices(unittest.TestCase): + def test_var_index(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + + vi1 = _VarIndex(m.x, "lb") + + self.assertIs(vi1.var, m.x) + self.assertEqual(vi1.bound, "lb") + + vi2 = _VarIndex(m.x, "lb") + self.assertEqual(vi1, vi2) + self.assertEqual(hash(vi1), hash(vi2)) + + vi2 = _VarIndex(m.x, "ub") + self.assertNotEqual(vi1, vi2) + + vi2 = _VarIndex(m.y, "lb") + self.assertNotEqual(vi1, vi2) + + self.assertEqual(str(vi1), "('x', 'lb')") + self.assertEqual(repr(vi1), "('x', 'lb')") + + def test_con_index(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + m.c1 = pe.Constraint(expr=m.x == m.y) + m.c2 = pe.Constraint(expr=m.x == 2 * m.y) + + ci1 = _ConIndex(m.c1, "lb") + + self.assertIs(ci1.con, m.c1) + self.assertEqual(ci1.bound, "lb") + + ci2 = _ConIndex(m.c1, "lb") + self.assertEqual(ci1, ci2) + self.assertEqual(hash(ci1), hash(ci2)) + + ci2 = _ConIndex(m.c1, "ub") + self.assertNotEqual(ci1, ci2) + + ci2 = _ConIndex(m.c2, "lb") + self.assertNotEqual(ci1, ci2) + + self.assertEqual(str(ci1), "('c1', 'lb')") + self.assertEqual(repr(ci1), "('c1', 'lb')") + + vi1 = _VarIndex(m.x, "lb") + self.assertNotEqual(ci1, vi1) + self.assertNotEqual(vi1, ci1) diff --git a/idaes/apps/flexibility_analysis/tests/test_sampling.py b/idaes/apps/flexibility_analysis/tests/test_sampling.py new file mode 100644 index 0000000000..3dcefcb1b0 --- /dev/null +++ b/idaes/apps/flexibility_analysis/tests/test_sampling.py @@ -0,0 +1,185 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import pyomo.environ as pe +from idaes.apps.flexibility_analysis.sampling import ( + perform_sampling, + SamplingConfig, + SamplingStrategy, +) +import unittest +import numpy as np +import pytest +from pyomo.contrib import appsi +from idaes.core.util.testing import _enable_scip_solver_for_testing +from contextlib import contextmanager + + +@contextmanager +def scip_solver(): + solver = pe.SolverFactory("scip") + undo_changes = None + + if not solver.available(): + undo_changes = _enable_scip_solver_for_testing() + if not solver.available(): + pytest.skip(reason="SCIP solver not available") + yield solver + if undo_changes is not None: + undo_changes() + + +def create_poly_model(): + m = pe.ConcreteModel() + m.z = pe.Var(initialize=8, bounds=(-10, 15)) + m.theta = pe.Param(mutable=True) + + offset = 1.5 + + m.obj = pe.Objective(expr=m.z**2) + m.c1 = pe.Constraint( + expr=0.01 * (m.z - offset) ** 4 + - 0.05 * (m.z - offset) ** 3 + - (m.z - offset) ** 2 + - (m.z - offset) + - 10 + + m.theta + <= 0 + ) + m.c2 = pe.Constraint( + expr=(-0.02 * m.theta - 14) * m.z + (1.66 * m.theta - 100) <= 0 + ) + m.c3 = pe.Constraint( + expr=30 * m.z - 50 - 4 * m.theta + pe.exp(-0.2 * m.theta + 1) <= 0 + ) + + nominal_values = pe.ComponentMap() + nominal_values[m.theta] = 22.5 + param_bounds = pe.ComponentMap() + param_bounds[m.theta] = (-20, 65) + + return m, nominal_values, param_bounds + + +def create_hx_network_model(): + """ + The model used for this test comes from + + Grossmann, I. E., & Floudas, C. A. (1987). Active constraint strategy for + flexibility analysis in chemical processes. Computers & Chemical Engineering, + 11(6), 675-693. + """ + m = pe.ConcreteModel() + + m.uncertain_temps_set = pe.Set(initialize=[1, 3, 5, 8]) + m.uncertain_temps = pe.Param( + m.uncertain_temps_set, mutable=True, initialize={1: 620, 3: 388, 5: 583, 8: 313} + ) + nominal_values = pe.ComponentMap() + for p in m.uncertain_temps.values(): + nominal_values[p] = p.value + + param_bounds = pe.ComponentMap() + for p in m.uncertain_temps.values(): + param_bounds[p] = (p.value - 10.0, p.value + 10.0) + + m.variable_temps_set = pe.Set(initialize=[2, 4, 6, 7]) + m.variable_temps = pe.Var(m.variable_temps_set, bounds=(0, 1000)) + m.qc = pe.Var() + + m.balances = pe.Constraint([1, 2, 3, 4]) + m.balances[1] = 1.5 * (m.uncertain_temps[1] - m.variable_temps[2]) == 2 * ( + m.variable_temps[4] - m.uncertain_temps[3] + ) + m.balances[2] = m.uncertain_temps[5] - m.variable_temps[6] == 2 * ( + 563 - m.variable_temps[4] + ) + m.balances[3] = m.variable_temps[6] - m.variable_temps[7] == 3 * ( + 393 - m.uncertain_temps[8] + ) + m.balances[4] = m.qc == 1.5 * (m.variable_temps[2] - 350) + + m.temp_approaches = pe.Constraint([1, 2, 3, 4]) + m.temp_approaches[1] = m.variable_temps[2] >= m.uncertain_temps[3] + m.temp_approaches[2] = m.variable_temps[6] >= m.variable_temps[4] + m.temp_approaches[3] = m.variable_temps[7] >= m.uncertain_temps[8] + m.temp_approaches[4] = 393 <= m.variable_temps[6] + + m.performance = pe.Constraint(expr=m.variable_temps[7] <= 323) + + return m, nominal_values, param_bounds + + +@pytest.mark.component +class TestSampling(unittest.TestCase): + def test_poly(self): + m, nominal_values, param_bounds = create_poly_model() + with scip_solver() as opt: + config = SamplingConfig() + config.solver = opt + config.num_points = 5 + config.strategy = SamplingStrategy.grid + tmp = perform_sampling( + m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.z], + in_place=True, + config=config, + ) + sample_points, max_violation_values, control_values = tmp + max_viol_ndx = np.argmax(max_violation_values) + self.assertAlmostEqual(max_violation_values[max_viol_ndx], -1.0142, 4) + self.assertAlmostEqual(control_values[m.z][max_viol_ndx], 4.6319, 4) + self.assertAlmostEqual(sample_points[m.theta][max_viol_ndx], 22.5) + + def test_hx_network(self): + m, nominal_values, param_bounds = create_hx_network_model() + with scip_solver() as opt: + config = SamplingConfig() + config.solver = opt + config.num_points = 2 + config.strategy = SamplingStrategy.grid + tmp = perform_sampling( + m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.qc], + in_place=True, + config=config, + ) + sample_points, max_violation_values, control_values = tmp + max_viol_ndx = np.argmax(max_violation_values) + self.assertAlmostEqual(max_violation_values[max_viol_ndx], 8.8) + + def test_hx_network3(self): + m, nominal_values, param_bounds = create_hx_network_model() + opt = appsi.solvers.Highs() + config = SamplingConfig() + config.solver = opt + config.num_points = 2 + config.strategy = SamplingStrategy.grid + tmp = perform_sampling( + m, + uncertain_params=list(nominal_values.keys()), + param_nominal_values=nominal_values, + param_bounds=param_bounds, + controls=[m.qc], + in_place=True, + config=config, + ) + sample_points, max_violation_values, control_values = tmp + max_viol_ndx = np.argmax(max_violation_values) + self.assertAlmostEqual(max_violation_values[max_viol_ndx], 8.8) diff --git a/idaes/apps/flexibility_analysis/tests/test_uncertain_params.py b/idaes/apps/flexibility_analysis/tests/test_uncertain_params.py new file mode 100644 index 0000000000..33b0edec21 --- /dev/null +++ b/idaes/apps/flexibility_analysis/tests/test_uncertain_params.py @@ -0,0 +1,152 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import pyomo.environ as pe +import unittest +from idaes.apps.flexibility_analysis.uncertain_params import _replace_uncertain_params +from idaes.apps.flexibility_analysis.indices import _ConIndex, _VarIndex +from pyomo.core.expr.compare import compare_expressions +import pytest + + +@pytest.mark.unit +class TestReplaceUncertainParams(unittest.TestCase): + def test_replace_mutable_parameter(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + m.p = pe.Param(mutable=True) + m.c1 = pe.Constraint(expr=(m.x + m.p, 0)) + m.c2 = pe.Constraint(expr=(None, m.y - m.x, 0)) + m.c3 = pe.Constraint(expr=(-1, m.y + m.p, 1)) + + nominal_values = pe.ComponentMap() + nominal_values[m.p] = 2.3 + + param_bounds = pe.ComponentMap() + param_bounds[m.p] = (1, 4) + + _replace_uncertain_params( + m=m, + uncertain_params=[m.p], + param_nominal_values=nominal_values, + param_bounds=param_bounds, + ) + + self.assertEqual(len(m.unc_cons), 4) + + self.assertEqual(len(m.unc_param_vars), 1) + v_ndx = _VarIndex(m.p, None) + self.assertEqual(m.unc_param_vars[v_ndx].lb, 1) + self.assertEqual(m.unc_param_vars[v_ndx].ub, 4) + self.assertEqual(m.unc_param_vars[v_ndx].value, 2.3) + + self.assertFalse(m.c1.active) + c_ndx = _ConIndex(m.c1, None) + self.assertEqual(m.unc_cons[c_ndx].lower, 0) + self.assertEqual(m.unc_cons[c_ndx].upper, 0) + self.assertTrue( + compare_expressions(m.unc_cons[c_ndx].body, m.x + m.unc_param_vars[v_ndx]) + ) + + self.assertFalse(m.c2.active) + c_ndx = _ConIndex(m.c2, "ub") + self.assertEqual(m.unc_cons[c_ndx].lower, None) + self.assertEqual(m.unc_cons[c_ndx].upper, 0) + self.assertTrue(compare_expressions(m.unc_cons[c_ndx].body, m.y - m.x)) + + self.assertFalse(m.c3.active) + c_ndx = _ConIndex(m.c3, "lb") + self.assertEqual(m.unc_cons[c_ndx].lower, -1) + self.assertEqual(m.unc_cons[c_ndx].upper, None) + self.assertTrue( + compare_expressions(m.unc_cons[c_ndx].body, m.y + m.unc_param_vars[v_ndx]) + ) + + c_ndx = _ConIndex(m.c3, "ub") + self.assertEqual(m.unc_cons[c_ndx].lower, None) + self.assertEqual(m.unc_cons[c_ndx].upper, 1) + self.assertTrue( + compare_expressions(m.unc_cons[c_ndx].body, m.y + m.unc_param_vars[v_ndx]) + ) + + def test_replace_var(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.p = pe.Var() + m.c1 = pe.Constraint(expr=(m.x + m.p, 0)) + + nominal_values = pe.ComponentMap() + nominal_values[m.p] = 2.3 + + param_bounds = pe.ComponentMap() + param_bounds[m.p] = (1, 4) + + _replace_uncertain_params( + m=m, + uncertain_params=[m.p], + param_nominal_values=nominal_values, + param_bounds=param_bounds, + ) + + self.assertFalse(m.c1.active) + c_ndx = _ConIndex(m.c1, None) + v_ndx = _VarIndex(m.p, None) + self.assertEqual(m.unc_cons[c_ndx].lower, 0) + self.assertEqual(m.unc_cons[c_ndx].upper, 0) + self.assertTrue( + compare_expressions(m.unc_cons[c_ndx].body, m.x + m.unc_param_vars[v_ndx]) + ) + self.assertEqual(m.unc_param_vars[v_ndx].lb, 1) + self.assertEqual(m.unc_param_vars[v_ndx].ub, 4) + self.assertEqual(m.unc_param_vars[v_ndx].value, 2.3) + + def test_non_constant_var(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.p = pe.Param(initialize=1, mutable=True) + m.x.setlb(m.p) + + nominal_values = pe.ComponentMap() + nominal_values[m.p] = 2.3 + + param_bounds = pe.ComponentMap() + param_bounds[m.p] = (1, 4) + + with self.assertRaises(ValueError): + _replace_uncertain_params( + m=m, + uncertain_params=[m.p], + param_nominal_values=nominal_values, + param_bounds=param_bounds, + ) + + def test_non_constant_var2(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.p = pe.Param(initialize=1, mutable=True) + m.x.setub(m.p) + + nominal_values = pe.ComponentMap() + nominal_values[m.p] = 2.3 + + param_bounds = pe.ComponentMap() + param_bounds[m.p] = (1, 4) + + with self.assertRaises(ValueError): + _replace_uncertain_params( + m=m, + uncertain_params=[m.p], + param_nominal_values=nominal_values, + param_bounds=param_bounds, + ) diff --git a/idaes/apps/flexibility_analysis/tests/test_var_utils.py b/idaes/apps/flexibility_analysis/tests/test_var_utils.py new file mode 100644 index 0000000000..421cab3fb9 --- /dev/null +++ b/idaes/apps/flexibility_analysis/tests/test_var_utils.py @@ -0,0 +1,106 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +from idaes.apps.flexibility_analysis import _check_dependencies +import pyomo.environ as pe +import unittest +from idaes.apps.flexibility_analysis.var_utils import ( + get_all_unfixed_variables, + get_used_unfixed_variables, + BoundsManager, + _remove_var_bounds, + _apply_var_bounds, +) +import pytest + + +@pytest.mark.unit +class TestGetVariables(unittest.TestCase): + def test_get_all_unfixed_variables(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var() + m.x_ref = pe.Reference(m.x) + m.y.fix(3) + + unfixed_vars = get_all_unfixed_variables(m) + self.assertEqual(len(unfixed_vars), 1) + self.assertIn(m.x, unfixed_vars) + self.assertNotIn(m.y, unfixed_vars) + + def test_get_used_unfixed_variables(self): + m = pe.ConcreteModel() + m.x = pe.Var([1, 2, 3, 4, 5, 6]) + m.x[3].fix(1) + m.x[4].fix(1) + m.x[6].fix(1) + m.c1 = pe.Constraint(expr=m.x[1] == m.x[3]) + m.obj = pe.Objective(expr=m.x[5] + m.x[6]) + uuf_vars = get_used_unfixed_variables(m) + self.assertEqual(len(uuf_vars), 2) + self.assertIn(m.x[1], uuf_vars) + self.assertIn(m.x[5], uuf_vars) + + +@pytest.mark.unit +class TestBounds(unittest.TestCase): + def test_bounds_manager1(self): + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(-1, 1)) + + bm = BoundsManager(m) + bm.save_bounds() + m.x.setlb(-2) + self.assertEqual(m.x.lb, -2) + bm.pop_bounds() + self.assertEqual(m.x.lb, -1) + + def test_remove_var_bounds(self): + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(-1, 1)) + m.y = pe.Var(domain=pe.NonNegativeReals) + + bm = BoundsManager(m) + bm.save_bounds() + + _remove_var_bounds(m) + self.assertIsNone(m.x.lb) + self.assertIsNone(m.x.ub) + self.assertIsNone(m.y.lb) + self.assertIsNone(m.y.ub) + + bm.pop_bounds() + self.assertEqual(m.x.lb, -1) + self.assertEqual(m.x.ub, 1) + self.assertEqual(m.y.lb, 0) + self.assertIsNone(m.y.ub) + + def test_remove_var_bounds_exception(self): + m = pe.ConcreteModel() + m.x = pe.Var(domain=pe.Binary) + with self.assertRaises(ValueError): + _remove_var_bounds(m) + + def test_apply_var_bounds(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.y = pe.Var(bounds=(-1, 1)) + + new_bounds = pe.ComponentMap() + new_bounds[m.x] = (-5, 5) + new_bounds[m.y] = (0, 2) + + _apply_var_bounds(new_bounds) + self.assertEqual(m.x.lb, -5) + self.assertEqual(m.x.ub, 5) + self.assertEqual(m.y.lb, 0) + self.assertEqual(m.y.ub, 1) diff --git a/idaes/apps/flexibility_analysis/uncertain_params.py b/idaes/apps/flexibility_analysis/uncertain_params.py new file mode 100644 index 0000000000..fb4e44cc1e --- /dev/null +++ b/idaes/apps/flexibility_analysis/uncertain_params.py @@ -0,0 +1,76 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +This module defines a function for replacing uncertain parameters with variables +""" +from typing import Sequence, Union, Mapping +from pyomo.core.base.block import _BlockData +import pyomo.environ as pe +from pyomo.core.expr.visitor import replace_expressions +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.param import _ParamData +from .var_utils import get_all_unfixed_variables +from .indices import _VarIndex, _ConIndex + + +def _replace_uncertain_params( + m: _BlockData, + uncertain_params: Sequence[Union[_GeneralVarData, _ParamData]], + param_nominal_values: Mapping[Union[_GeneralVarData, _ParamData], float], + param_bounds: Mapping[Union[_GeneralVarData, _ParamData], Sequence[float]], +) -> _BlockData: + for v in get_all_unfixed_variables(m): + if not pe.is_constant(v._lb): # pylint: disable=protected-access + raise ValueError( + f"The lower bound on {str(v)} is not constant. All variable bounds must be constant." + ) + if not pe.is_constant(v._ub): # pylint: disable=protected-access + raise ValueError( + f"The upper bound on {str(v)} is not constant. All variable bounds must be constant." + ) + + m.unc_param_vars_set = pe.Set() + m.unc_param_vars = pe.Var(m.unc_param_vars_set) + sub_map = dict() + for p in uncertain_params: + key = _VarIndex(p, None) + m.unc_param_vars_set.add(key) + sub_map[id(p)] = m.unc_param_vars[key] + m.unc_param_vars[key].setlb(param_bounds[p][0]) + m.unc_param_vars[key].setub(param_bounds[p][1]) + m.unc_param_vars[key].value = param_nominal_values[p] + + m.unc_cons_set = pe.Set() + m.unc_cons = pe.Constraint(m.unc_cons_set) + for c in list( + m.component_data_objects(pe.Constraint, descend_into=True, active=True) + ): + new_body = replace_expressions(c.body, substitution_map=sub_map) + new_lower = replace_expressions(c.lower, substitution_map=sub_map) + new_upper = replace_expressions(c.upper, substitution_map=sub_map) + if c.equality: + key = _ConIndex(c, None) + m.unc_cons_set.add(key) + m.unc_cons[key] = new_body == new_lower + else: + if c.lower is not None: + key = _ConIndex(c, "lb") + m.unc_cons_set.add(key) + m.unc_cons[key] = new_lower <= new_body + if c.upper is not None: + key = _ConIndex(c, "ub") + m.unc_cons_set.add(key) + m.unc_cons[key] = new_body <= new_upper + c.deactivate() + + return m diff --git a/idaes/apps/flexibility_analysis/var_utils.py b/idaes/apps/flexibility_analysis/var_utils.py new file mode 100644 index 0000000000..ba2c67c4d9 --- /dev/null +++ b/idaes/apps/flexibility_analysis/var_utils.py @@ -0,0 +1,122 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Some utility functions for working with pyomo variables. +""" +from typing import Mapping, Sequence, MutableSet +import pyomo.environ as pe +from pyomo.core.base.block import _BlockData +from pyomo.common.collections import ComponentSet +from pyomo.core.expr.visitor import identify_variables +from pyomo.contrib.solver.util import get_objective +from pyomo.core.base.var import _GeneralVarData + + +def get_all_unfixed_variables(m: _BlockData) -> MutableSet[_GeneralVarData]: + """ + Returns a set containing all unfixed variables on the model m. + + Parameters + ---------- + m: _BlockData + The model for which to get the variables. + + Returns + ------- + vset: MutableSet[_GeneralVarData] + The set of all variables on m. + """ + return ComponentSet( + v + for v in m.component_data_objects(pe.Var, descend_into=True, active=True) + if not v.is_fixed() + ) + + +def get_used_unfixed_variables(m: _BlockData) -> MutableSet[_GeneralVarData]: + """ + Returns a set containing all unfixed variables in any active constraint + or objective on the model m. + + Parameters + ---------- + m: _BlockData + The model for which to get the variables. + + Returns + ------- + vset: MutableSet[_GeneralVarData] + The set of all variables in active constraints or objectives. + """ + res = ComponentSet() + for c in m.component_data_objects(pe.Constraint, active=True, descend_into=True): + res.update(v for v in identify_variables(c.body, include_fixed=False)) + obj = get_objective(m) + if obj is not None: + res.update(identify_variables(obj.expr, include_fixed=False)) + return res + + +class BoundsManager(object): + """ + A class for saving and restoring variable bounds. + """ + + def __init__(self, m: _BlockData): + # TODO: maybe use get_used_unfixed_variables here? + self._vars = ComponentSet(m.component_data_objects(pe.Var, descend_into=True)) + self._saved_bounds = list() + + def save_bounds(self): + """ + Save the variable bounds for later use. + """ + bnds = pe.ComponentMap() + for v in self._vars: + bnds[v] = (v.lb, v.ub) + self._saved_bounds.append(bnds) + + def pop_bounds(self, ndx=-1): + """ + Restore the variable bounds that were + previously saved. + + Parameters + ---------- + ndx: int + Indicates which set of bounds to restore. + By default, this will use the most recently + saved bounds. + """ + bnds = self._saved_bounds.pop(ndx) + for v, _bnds in bnds.items(): + lb, ub = _bnds + v.setlb(lb) + v.setub(ub) + + +def _remove_var_bounds(m: _BlockData): + for v in get_all_unfixed_variables(m): + v.setlb(None) + v.setub(None) + if v.is_integer(): + raise ValueError("Unwilling to remove domain from integer variable") + v.domain = pe.Reals + + +def _apply_var_bounds(bounds: Mapping[_GeneralVarData, Sequence[float]]): + for v, (lb, ub) in bounds.items(): + if v.lb is None or v.lb < lb: + v.setlb(lb) + if v.ub is None or v.ub > ub: + v.setub(ub) diff --git a/requirements-dev.txt b/requirements-dev.txt index 384dee177e..8ec2e1ca6c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,6 +10,7 @@ sphinxcontrib-napoleon>=0.5.0 sphinx-argparse==0.4.0 sphinx-book-theme<=1.1.2,>=1.0.0 sphinx-copybutton==0.5.2 +enum-tools[sphinx] ### testing and linting # TODO/NOTE pytest is specified as a dependency in setup.py, but we might want to pin a specific version here @@ -29,6 +30,7 @@ jsonschema jupyter_contrib_nbextensions snowballstemmer==1.2.1 addheader>=0.2.2 +highspy # this will install IDAES in editable mode using the dependencies defined under the `extras_require` tags defined in `setup.py` --editable .[ui,dmf,grid,omlt,coolprop]