Path: blob/master/deprecated/scripts/bayes_opt_utils.py
1192 views
import superimport12import numpy as np3from scipy.stats import norm45#from scipy.optimize import minimize6import scipy.optimize78def expected_improvement(X, X_sample, Y_sample, surrogate,9improvement_thresh=0.01, trust_incumbent=False,10greedy=False):11'''12Computes the EI at points X based on existing samples X_sample13and Y_sample using a probabilistic surrogate model.1415Args:16X: Points at which EI shall be computed (m x d).17X_sample: Sample locations (n x d).18Y_sample: Sample values (n x 1).19surrogate: a model with a predict that returns mu, sigma20improvement_thresh: Exploitation-exploration trade-off parameter.21trust_incumbent: whether to trust current best obs. or re-evaluate2223Returns:24Expected improvements at points X.25'''26#X = np.atleast_2d(X)27mu, sigma = surrogate.predict(X, return_std=True)28# Make sigma have same shape as mu29#sigma = sigma.reshape(-1, X_sample.shape[1])30sigma = np.reshape(sigma, np.shape(mu))3132if trust_incumbent:33current_best = np.max(Y_sample)34else:35mu_sample = surrogate.predict(X_sample)36current_best = np.max(mu_sample)3738with np.errstate(divide='warn'):39imp = mu - current_best - improvement_thresh40Z = imp / sigma41ei = imp * norm.cdf(Z) + sigma * norm.pdf(Z)42#ei[sigma == 0.0] = 0.043ei[sigma < 1e-4] = 0.04445return ei46474849# bounds: D*2 array, where D = number parameter dimensions50# bounds[:,0] are lower bounds, bounds[:,1] are upper bounds51class MultiRestartGradientOptimizer:52def __init__(self, dim, bounds=None, n_restarts=1, method='L-BFGS-B',53callback=None):54self.bounds = bounds55self.n_restarts = n_restarts56self.method = method57self.dim = dim5859def maximize(self, objective):60#neg_obj = lambda x: -objective(x)61neg_obj = lambda x: -objective(x.reshape(-1, 1))62min_val = np.inf63best_x = None64candidates = np.random.uniform(self.bounds[:, 0], self.bounds[:, 1],65size=(self.n_restarts, self.dim))66for x0 in candidates:67res = scipy.optimize.minimize(neg_obj, x0=x0, bounds=self.bounds,68method=self.method)69if res.fun < min_val:70min_val = res.fun71best_x = res.x72return best_x.reshape(-1, 1)73747576class BayesianOptimizer:77def __init__(self, X_init, Y_init, surrogate,78acq_fn=expected_improvement, acq_solver=None,79n_iter=None, callback=None):80self.X_sample = X_init81self.Y_sample = Y_init82self.surrogate = surrogate83self.surrogate.fit(self.X_sample, self.Y_sample)84self.acq_fn = acq_fn85self.acq_solver = acq_solver86self.n_iter = n_iter87self.callback = callback88# Make sure you "pay" for the initial random guesses89self.val_history = Y_init90self.current_best_val = np.max(self.val_history)91best_ndx = np.argmax(self.val_history)92self.current_best_arg = X_init[best_ndx]9394def propose(self):95def objective(x):96y = self.acq_fn(x, self.X_sample, self.Y_sample, self.surrogate)97if np.size(y)==1:98y = y[0] # convert to scalar99return y100x_next = self.acq_solver.maximize(objective)101return x_next102103def update(self, x, y):104X = np.atleast_2d(x)105self.X_sample = np.append(self.X_sample, X, axis=0)106self.Y_sample = np.append(self.Y_sample, y)107self.surrogate.fit(self.X_sample, self.Y_sample)108if y > self.current_best_val:109self.current_best_arg = x110self.current_best_val = y111self.val_history = np.append(self.val_history, y)112113def maximize(self, objective):114for i in range(self.n_iter):115X_next = self.propose()116Y_next = objective(X_next)117#print("BO iter {}, xnext={}, ynext={:0.3f}".format(i, X_next, Y_next))118self.update(X_next, Y_next)119if self.callback is not None:120self.callback(X_next, Y_next, i)121return self.current_best_arg122123124125