optimization_lake_model_dps.py

  1"""
  2This example replicates Quinn, J.D., Reed, P.M., Keller, K. (2017)
  3Direct policy search for robust multi-objective management of deeply
  4uncertain socio-ecological tipping points. Environmental Modelling &
  5Software 92, 125-141.
  6
  7It also show cases how the workbench can be used to apply the MORDM extension
  8suggested by Watson, A.A., Kasprzyk, J.R. (2017) Incorporating deeply uncertain
  9factors into the many objective search process. Environmental Modelling &
 10Software 89, 159-171.
 11
 12"""
 13
 14import math
 15
 16import numpy as np
 17from scipy.optimize import brentq
 18
 19from ema_workbench import (
 20    Model,
 21    RealParameter,
 22    ScalarOutcome,
 23    Constant,
 24    ema_logging,
 25    MultiprocessingEvaluator,
 26    CategoricalParameter,
 27    Scenario,
 28)
 29
 30from ema_workbench.em_framework.optimization import ArchiveLogger, EpsilonProgress
 31
 32
 33# Created on 1 Jun 2017
 34#
 35# .. codeauthor::jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
 36
 37
 38def get_antropogenic_release(xt, c1, c2, r1, r2, w1):
 39    """
 40
 41    Parameters
 42    ----------
 43    xt : float
 44         pollution in lake at time t
 45    c1 : float
 46         center rbf 1
 47    c2 : float
 48         center rbf 2
 49    r1 : float
 50         ratius rbf 1
 51    r2 : float
 52         ratius rbf 2
 53    w1 : float
 54         weight of rbf 1
 55
 56    note:: w2 = 1 - w1
 57
 58    """
 59
 60    rule = w1 * (abs(xt - c1) / r1) ** 3 + (1 - w1) * (abs(xt - c2) / r2) ** 3
 61    at1 = max(rule, 0.01)
 62    at = min(at1, 0.1)
 63
 64    return at
 65
 66
 67def lake_problem(
 68    b=0.42,  # decay rate for P in lake (0.42 = irreversible)
 69    q=2.0,  # recycling exponent
 70    mean=0.02,  # mean of natural inflows
 71    stdev=0.001,  # future utility discount rate
 72    delta=0.98,  # standard deviation of natural inflows
 73    alpha=0.4,  # utility from pollution
 74    nsamples=100,  # Monte Carlo sampling of natural inflows
 75    myears=1,  # the runtime of the simulation model
 76    c1=0.25,
 77    c2=0.25,
 78    r1=0.5,
 79    r2=0.5,
 80    w1=0.5,
 81):
 82    Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)
 83
 84    X = np.zeros(myears)
 85    average_daily_P = np.zeros(myears)
 86    reliability = 0.0
 87    inertia = 0
 88    utility = 0
 89
 90    for _ in range(nsamples):
 91        X[0] = 0.0
 92        decision = 0.1
 93
 94        decisions = np.zeros(myears)
 95        decisions[0] = decision
 96
 97        natural_inflows = np.random.lognormal(
 98            math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
 99            math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
100            size=myears,
101        )
102
103        for t in range(1, myears):
104            # here we use the decision rule
105            decision = get_antropogenic_release(X[t - 1], c1, c2, r1, r2, w1)
106            decisions[t] = decision
107
108            X[t] = (
109                (1 - b) * X[t - 1]
110                + X[t - 1] ** q / (1 + X[t - 1] ** q)
111                + decision
112                + natural_inflows[t - 1]
113            )
114            average_daily_P[t] += X[t] / nsamples
115
116        reliability += np.sum(X < Pcrit) / (nsamples * myears)
117        inertia += np.sum(np.absolute(np.diff(decisions) < 0.02)) / (nsamples * myears)
118        utility += np.sum(alpha * decisions * np.power(delta, np.arange(myears))) / nsamples
119    max_P = np.max(average_daily_P)
120
121    return max_P, utility, inertia, reliability
122
123
124if __name__ == "__main__":
125    ema_logging.log_to_stderr(ema_logging.INFO)
126
127    # instantiate the model
128    lake_model = Model("lakeproblem", function=lake_problem)
129    # specify uncertainties
130    lake_model.uncertainties = [
131        RealParameter("b", 0.1, 0.45),
132        RealParameter("q", 2.0, 4.5),
133        RealParameter("mean", 0.01, 0.05),
134        RealParameter("stdev", 0.001, 0.005),
135        RealParameter("delta", 0.93, 0.99),
136    ]
137
138    # set levers
139    lake_model.levers = [
140        RealParameter("c1", -2, 2),
141        RealParameter("c2", -2, 2),
142        RealParameter("r1", 0, 2),
143        RealParameter("r2", 0, 2),
144        CategoricalParameter("w1", np.linspace(0, 1, 10)),
145    ]
146    # specify outcomes
147    lake_model.outcomes = [
148        ScalarOutcome("max_P", kind=ScalarOutcome.MINIMIZE),
149        ScalarOutcome("utility", kind=ScalarOutcome.MAXIMIZE),
150        ScalarOutcome("inertia", kind=ScalarOutcome.MAXIMIZE),
151        ScalarOutcome("reliability", kind=ScalarOutcome.MAXIMIZE),
152    ]
153
154    # override some of the defaults of the model
155    lake_model.constants = [
156        Constant("alpha", 0.41),
157        Constant("nsamples", 100),
158        Constant("myears", 100),
159    ]
160
161    # reference is optional, but can be used to implement search for
162    # various user specified scenarios along the lines suggested by
163    # Watson and Kasprzyk (2017)
164    reference = Scenario("reference", b=0.4, q=2, mean=0.02, stdev=0.01)
165
166    convergence_metrics = [
167        ArchiveLogger(
168            "./data",
169            [l.name for l in lake_model.levers],
170            [o.name for o in lake_model.outcomes],
171            base_filename="lake_model_dps_archive.tar.gz",
172        ),
173        EpsilonProgress(),
174    ]
175
176    with MultiprocessingEvaluator(lake_model) as evaluator:
177        results, convergence = evaluator.optimize(
178            searchover="levers",
179            nfe=100000,
180            epsilons=[0.1] * len(lake_model.outcomes),
181            reference=reference,
182            convergence=convergence_metrics,
183        )