1"""
2An example of using output space exploration on the lake problem
3
4The lake problem itself is adapted from the Rhodium example by Dave Hadka,
5see https://gist.github.com/dhadka/a8d7095c98130d8f73bc
6
7"""
8
9import math
10
11import numpy as np
12from scipy.optimize import brentq
13
14from ema_workbench import (
15 Model,
16 RealParameter,
17 ScalarOutcome,
18 Constant,
19 ema_logging,
20 MultiprocessingEvaluator,
21 Policy,
22)
23
24from ema_workbench.em_framework.outputspace_exploration import OutputSpaceExploration
25
26
27def lake_problem(
28 b=0.42, # decay rate for P in lake (0.42 = irreversible)
29 q=2.0, # recycling exponent
30 mean=0.02, # mean of natural inflows
31 stdev=0.001, # future utility discount rate
32 delta=0.98, # standard deviation of natural inflows
33 alpha=0.4, # utility from pollution
34 nsamples=100, # Monte Carlo sampling of natural inflows
35 **kwargs,
36):
37 try:
38 decisions = [kwargs[str(i)] for i in range(100)]
39 except KeyError:
40 decisions = [
41 0,
42 ] * 100
43
44 Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)
45 nvars = len(decisions)
46 X = np.zeros((nvars,))
47 average_daily_P = np.zeros((nvars,))
48 decisions = np.array(decisions)
49 reliability = 0.0
50
51 for _ in range(nsamples):
52 X[0] = 0.0
53
54 natural_inflows = np.random.lognormal(
55 math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
56 math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
57 size=nvars,
58 )
59
60 for t in range(1, nvars):
61 X[t] = (
62 (1 - b) * X[t - 1]
63 + X[t - 1] ** q / (1 + X[t - 1] ** q)
64 + decisions[t - 1]
65 + natural_inflows[t - 1]
66 )
67 average_daily_P[t] += X[t] / float(nsamples)
68
69 reliability += np.sum(X < Pcrit) / float(nsamples * nvars)
70
71 max_P = np.max(average_daily_P)
72 utility = np.sum(alpha * decisions * np.power(delta, np.arange(nvars)))
73 inertia = np.sum(np.absolute(np.diff(decisions)) < 0.02) / float(nvars - 1)
74
75 return max_P, utility, inertia, reliability
76
77
78if __name__ == "__main__":
79 ema_logging.log_to_stderr(ema_logging.INFO)
80
81 # instantiate the model
82 lake_model = Model("lakeproblem", function=lake_problem)
83 lake_model.time_horizon = 100
84
85 # specify uncertainties
86 lake_model.uncertainties = [
87 RealParameter("b", 0.1, 0.45),
88 RealParameter("q", 2.0, 4.5),
89 RealParameter("mean", 0.01, 0.05),
90 RealParameter("stdev", 0.001, 0.005),
91 RealParameter("delta", 0.93, 0.99),
92 ]
93
94 # set levers, one for each time step
95 lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon)]
96
97 # specify outcomes
98 # note that outcomes of kind INFO will be ignored
99 lake_model.outcomes = [
100 ScalarOutcome("max_P", kind=ScalarOutcome.MAXIMIZE),
101 ScalarOutcome("utility", kind=ScalarOutcome.MAXIMIZE),
102 ScalarOutcome("inertia", kind=ScalarOutcome.MAXIMIZE),
103 ScalarOutcome("reliability", kind=ScalarOutcome.MAXIMIZE),
104 ]
105
106 # override some of the defaults of the model
107 lake_model.constants = [Constant("alpha", 0.41), Constant("nsamples", 150)]
108
109 # generate a reference policy
110 n_scenarios = 1000
111 reference = Policy("nopolicy", **{l.name: 0.02 for l in lake_model.levers})
112
113 # we are doing output space exploration given a reference
114 # policy, so we are exploring the output space over the uncertainties
115 # grid spec specifies the grid structure imposed on the output space
116 # each tuple is associated with an outcome. It gives the minimum
117 # maximum, and epsilon value.
118 with MultiprocessingEvaluator(lake_model) as evaluator:
119 res = evaluator.optimize(
120 algorithm=OutputSpaceExploration,
121 grid_spec=[
122 (0, 12, 0.5),
123 (0, 1, 0.05),
124 (0, 1, 0.1),
125 (0, 1, 0.1),
126 ],
127 nfe=1000,
128 searchover="uncertainties",
129 reference=reference,
130 )