1"""
2An example of the lake problem using the ema workbench.
3
4The model itself is adapted from the Rhodium example by Dave Hadka,
5see https://gist.github.com/dhadka/a8d7095c98130d8f73bc
6
7"""
8
9import math
10import time
11
12import numpy as np
13from scipy.optimize import brentq
14
15from ema_workbench import (
16 Model,
17 RealParameter,
18 ScalarOutcome,
19 Constant,
20 ema_logging,
21 MPIEvaluator,
22 save_results,
23)
24
25
26def lake_problem(
27 b=0.42, # decay rate for P in lake (0.42 = irreversible)
28 q=2.0, # recycling exponent
29 mean=0.02, # mean of natural inflows
30 stdev=0.001, # future utility discount rate
31 delta=0.98, # standard deviation of natural inflows
32 alpha=0.4, # utility from pollution
33 nsamples=100, # Monte Carlo sampling of natural inflows
34 **kwargs,
35):
36 try:
37 decisions = [kwargs[str(i)] for i in range(100)]
38 except KeyError:
39 decisions = [0] * 100
40 print("No valid decisions found, using 0 water release every year as default")
41
42 nvars = len(decisions)
43 decisions = np.array(decisions)
44
45 # Calculate the critical pollution level (Pcrit)
46 Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)
47
48 # Generate natural inflows using lognormal distribution
49 natural_inflows = np.random.lognormal(
50 mean=math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
51 sigma=math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
52 size=(nsamples, nvars),
53 )
54
55 # Initialize the pollution level matrix X
56 X = np.zeros((nsamples, nvars))
57
58 # Loop through time to compute the pollution levels
59 for t in range(1, nvars):
60 X[:, t] = (
61 (1 - b) * X[:, t - 1]
62 + (X[:, t - 1] ** q / (1 + X[:, t - 1] ** q))
63 + decisions[t - 1]
64 + natural_inflows[:, t - 1]
65 )
66
67 # Calculate the average daily pollution for each time step
68 average_daily_P = np.mean(X, axis=0)
69
70 # Calculate the reliability (probability of the pollution level being below Pcrit)
71 reliability = np.sum(X < Pcrit) / float(nsamples * nvars)
72
73 # Calculate the maximum pollution level (max_P)
74 max_P = np.max(average_daily_P)
75
76 # Calculate the utility by discounting the decisions using the discount factor (delta)
77 utility = np.sum(alpha * decisions * np.power(delta, np.arange(nvars)))
78
79 # Calculate the inertia (the fraction of time steps with changes larger than 0.02)
80 inertia = np.sum(np.abs(np.diff(decisions)) > 0.02) / float(nvars - 1)
81
82 return max_P, utility, inertia, reliability
83
84
85if __name__ == "__main__":
86 import ema_workbench
87
88 # run with mpiexec -n 1 -usize {ntasks} python example_mpi_lake_model.py
89 starttime = time.perf_counter()
90
91 ema_logging.log_to_stderr(ema_logging.INFO, pass_root_logger_level=True)
92 ema_logging.get_rootlogger().info(f"{ema_workbench.__version__}")
93
94 # instantiate the model
95 lake_model = Model("lakeproblem", function=lake_problem)
96 lake_model.time_horizon = 100
97
98 # specify uncertainties
99 lake_model.uncertainties = [
100 RealParameter("b", 0.1, 0.45),
101 RealParameter("q", 2.0, 4.5),
102 RealParameter("mean", 0.01, 0.05),
103 RealParameter("stdev", 0.001, 0.005),
104 RealParameter("delta", 0.93, 0.99),
105 ]
106
107 # set levers, one for each time step
108 lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon)]
109
110 # specify outcomes
111 lake_model.outcomes = [
112 ScalarOutcome("max_P"),
113 ScalarOutcome("utility"),
114 ScalarOutcome("inertia"),
115 ScalarOutcome("reliability"),
116 ]
117
118 # override some of the defaults of the model
119 lake_model.constants = [Constant("alpha", 0.41), Constant("nsamples", 150)]
120
121 # generate some random policies by sampling over levers
122 n_scenarios = 10000
123 n_policies = 4
124
125 with MPIEvaluator(lake_model) as evaluator:
126 res = evaluator.perform_experiments(n_scenarios, n_policies, chunksize=250)
127
128 save_results(res, "test.tar.gz")
129
130 print(time.perf_counter() - starttime)