Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
probml
GitHub Repository: probml/pyprobml
Path: blob/master/deprecated/scripts/bayes_opt_utils.py
1192 views
1
import superimport
2
3
import numpy as np
4
from scipy.stats import norm
5
6
#from scipy.optimize import minimize
7
import scipy.optimize
8
9
def expected_improvement(X, X_sample, Y_sample, surrogate,
10
improvement_thresh=0.01, trust_incumbent=False,
11
greedy=False):
12
'''
13
Computes the EI at points X based on existing samples X_sample
14
and Y_sample using a probabilistic surrogate model.
15
16
Args:
17
X: Points at which EI shall be computed (m x d).
18
X_sample: Sample locations (n x d).
19
Y_sample: Sample values (n x 1).
20
surrogate: a model with a predict that returns mu, sigma
21
improvement_thresh: Exploitation-exploration trade-off parameter.
22
trust_incumbent: whether to trust current best obs. or re-evaluate
23
24
Returns:
25
Expected improvements at points X.
26
'''
27
#X = np.atleast_2d(X)
28
mu, sigma = surrogate.predict(X, return_std=True)
29
# Make sigma have same shape as mu
30
#sigma = sigma.reshape(-1, X_sample.shape[1])
31
sigma = np.reshape(sigma, np.shape(mu))
32
33
if trust_incumbent:
34
current_best = np.max(Y_sample)
35
else:
36
mu_sample = surrogate.predict(X_sample)
37
current_best = np.max(mu_sample)
38
39
with np.errstate(divide='warn'):
40
imp = mu - current_best - improvement_thresh
41
Z = imp / sigma
42
ei = imp * norm.cdf(Z) + sigma * norm.pdf(Z)
43
#ei[sigma == 0.0] = 0.0
44
ei[sigma < 1e-4] = 0.0
45
46
return ei
47
48
49
50
# bounds: D*2 array, where D = number parameter dimensions
51
# bounds[:,0] are lower bounds, bounds[:,1] are upper bounds
52
class MultiRestartGradientOptimizer:
53
def __init__(self, dim, bounds=None, n_restarts=1, method='L-BFGS-B',
54
callback=None):
55
self.bounds = bounds
56
self.n_restarts = n_restarts
57
self.method = method
58
self.dim = dim
59
60
def maximize(self, objective):
61
#neg_obj = lambda x: -objective(x)
62
neg_obj = lambda x: -objective(x.reshape(-1, 1))
63
min_val = np.inf
64
best_x = None
65
candidates = np.random.uniform(self.bounds[:, 0], self.bounds[:, 1],
66
size=(self.n_restarts, self.dim))
67
for x0 in candidates:
68
res = scipy.optimize.minimize(neg_obj, x0=x0, bounds=self.bounds,
69
method=self.method)
70
if res.fun < min_val:
71
min_val = res.fun
72
best_x = res.x
73
return best_x.reshape(-1, 1)
74
75
76
77
class BayesianOptimizer:
78
def __init__(self, X_init, Y_init, surrogate,
79
acq_fn=expected_improvement, acq_solver=None,
80
n_iter=None, callback=None):
81
self.X_sample = X_init
82
self.Y_sample = Y_init
83
self.surrogate = surrogate
84
self.surrogate.fit(self.X_sample, self.Y_sample)
85
self.acq_fn = acq_fn
86
self.acq_solver = acq_solver
87
self.n_iter = n_iter
88
self.callback = callback
89
# Make sure you "pay" for the initial random guesses
90
self.val_history = Y_init
91
self.current_best_val = np.max(self.val_history)
92
best_ndx = np.argmax(self.val_history)
93
self.current_best_arg = X_init[best_ndx]
94
95
def propose(self):
96
def objective(x):
97
y = self.acq_fn(x, self.X_sample, self.Y_sample, self.surrogate)
98
if np.size(y)==1:
99
y = y[0] # convert to scalar
100
return y
101
x_next = self.acq_solver.maximize(objective)
102
return x_next
103
104
def update(self, x, y):
105
X = np.atleast_2d(x)
106
self.X_sample = np.append(self.X_sample, X, axis=0)
107
self.Y_sample = np.append(self.Y_sample, y)
108
self.surrogate.fit(self.X_sample, self.Y_sample)
109
if y > self.current_best_val:
110
self.current_best_arg = x
111
self.current_best_val = y
112
self.val_history = np.append(self.val_history, y)
113
114
def maximize(self, objective):
115
for i in range(self.n_iter):
116
X_next = self.propose()
117
Y_next = objective(X_next)
118
#print("BO iter {}, xnext={}, ynext={:0.3f}".format(i, X_next, Y_next))
119
self.update(X_next, Y_next)
120
if self.callback is not None:
121
self.callback(X_next, Y_next, i)
122
return self.current_best_arg
123
124
125