forked from quaquel/EMAworkbench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsample_sobol_lake_model.py
140 lines (110 loc) · 4.15 KB
/
sample_sobol_lake_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""
An example of the lake problem using the ema workbench.
The model itself is adapted from the Rhodium example by Dave Hadka,
see https://gist.github.com/dhadka/a8d7095c98130d8f73bc
"""
import math
import numpy as np
import pandas as pd
from SALib.analyze import sobol
from scipy.optimize import brentq
from ema_workbench import (
Model,
RealParameter,
ScalarOutcome,
Constant,
ema_logging,
MultiprocessingEvaluator,
Policy,
)
from ema_workbench.em_framework import get_SALib_problem
from ema_workbench.em_framework.evaluators import Samplers
def lake_problem(
b=0.42, # decay rate for P in lake (0.42 = irreversible)
q=2.0, # recycling exponent
mean=0.02, # mean of natural inflows
stdev=0.001, # future utility discount rate
delta=0.98, # standard deviation of natural inflows
alpha=0.4, # utility from pollution
nsamples=100, # Monte Carlo sampling of natural inflows
**kwargs,
):
try:
decisions = [kwargs[str(i)] for i in range(100)]
except KeyError:
decisions = [0] * 100
Pcrit = brentq(lambda x: x**q / (1 + x**q) - b * x, 0.01, 1.5)
nvars = len(decisions)
X = np.zeros((nvars,))
average_daily_P = np.zeros((nvars,))
decisions = np.array(decisions)
reliability = 0.0
for _ in range(nsamples):
X[0] = 0.0
natural_inflows = np.random.lognormal(
math.log(mean**2 / math.sqrt(stdev**2 + mean**2)),
math.sqrt(math.log(1.0 + stdev**2 / mean**2)),
size=nvars,
)
for t in range(1, nvars):
X[t] = (
(1 - b) * X[t - 1]
+ X[t - 1] ** q / (1 + X[t - 1] ** q)
+ decisions[t - 1]
+ natural_inflows[t - 1]
)
average_daily_P[t] += X[t] / float(nsamples)
reliability += np.sum(X < Pcrit) / float(nsamples * nvars)
max_P = np.max(average_daily_P)
utility = np.sum(alpha * decisions * np.power(delta, np.arange(nvars)))
inertia = np.sum(np.absolute(np.diff(decisions)) < 0.02) / float(nvars - 1)
return max_P, utility, inertia, reliability
def analyze(results, ooi):
"""analyze results using SALib sobol, returns a dataframe"""
_, outcomes = results
problem = get_SALib_problem(lake_model.uncertainties)
y = outcomes[ooi]
sobol_indices = sobol.analyze(problem, y)
sobol_stats = {key: sobol_indices[key] for key in ["ST", "ST_conf", "S1", "S1_conf"]}
sobol_stats = pd.DataFrame(sobol_stats, index=problem["names"])
sobol_stats.sort_values(by="ST", ascending=False)
s2 = pd.DataFrame(sobol_indices["S2"], index=problem["names"], columns=problem["names"])
s2_conf = pd.DataFrame(
sobol_indices["S2_conf"], index=problem["names"], columns=problem["names"]
)
return sobol_stats, s2, s2_conf
if __name__ == "__main__":
ema_logging.log_to_stderr(ema_logging.INFO)
# instantiate the model
lake_model = Model("lakeproblem", function=lake_problem)
lake_model.time_horizon = 100
# specify uncertainties
lake_model.uncertainties = [
RealParameter("b", 0.1, 0.45),
RealParameter("q", 2.0, 4.5),
RealParameter("mean", 0.01, 0.05),
RealParameter("stdev", 0.001, 0.005),
RealParameter("delta", 0.93, 0.99),
]
# set levers, one for each time step
lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in range(lake_model.time_horizon)]
# specify outcomes
lake_model.outcomes = [
ScalarOutcome("max_P"),
ScalarOutcome("utility"),
ScalarOutcome("inertia"),
ScalarOutcome("reliability"),
]
# override some of the defaults of the model
lake_model.constants = [Constant("alpha", 0.41), Constant("nsamples", 150)]
# generate sa single default no release policy
policy = Policy("no release", **{str(i): 0.1 for i in range(100)})
n_scenarios = 1000
with MultiprocessingEvaluator(lake_model) as evaluator:
results = evaluator.perform_experiments(
n_scenarios, policy, uncertainty_sampling=Samplers.SOBOL
)
sobol_stats, s2, s2_conf = analyze(results, "max_P")
print(sobol_stats)
print(s2)
print(s2_conf)