Source code for quantecon.optimize.scalar_maximization

import numpy as np
from numba import njit

[docs]@njit def brent_max(func, a, b, args=(), xtol=1e-5, maxiter=500): """ Uses a jitted version of the maximization routine from SciPy's fminbound. The algorithm is identical except that it's been switched to maximization rather than minimization, and the tests for convergence have been stripped out to allow for jit compilation. Note that the input function `func` must be jitted or the call will fail. Parameters ---------- func : jitted function a : scalar Lower bound for search b : scalar Upper bound for search args : tuple, optional Extra arguments passed to the objective function. maxiter : int, optional Maximum number of iterations to perform. xtol : float, optional Absolute error in solution `xopt` acceptable for convergence. Returns ------- xf : float The maximizer fval : float The maximum value attained info : tuple A tuple of the form (status_flag, num_iter). Here status_flag indicates whether or not the maximum number of function calls was attained. A value of 0 implies that the maximum was not hit. The value `num_iter` is the number of function calls. Examples -------- >>> @njit ... def f(x): ... return -(x + 2.0)**2 + 1.0 ... >>> xf, fval, info = brent_max(f, -2, 2) """ if not np.isfinite(a): raise ValueError("a must be finite.") if not np.isfinite(b): raise ValueError("b must be finite.") if not a < b: raise ValueError("a must be less than b.") maxfun = maxiter status_flag = 0 sqrt_eps = np.sqrt(2.2e-16) golden_mean = 0.5 * (3.0 - np.sqrt(5.0)) fulc = a + golden_mean * (b - a) nfc, xf = fulc, fulc rat = e = 0.0 x = xf fx = -func(x, *args) num = 1 ffulc = fnfc = fx xm = 0.5 * (a + b) tol1 = sqrt_eps * np.abs(xf) + xtol / 3.0 tol2 = 2.0 * tol1 while (np.abs(xf - xm) > (tol2 - 0.5 * (b - a))): golden = 1 # Check for parabolic fit if np.abs(e) > tol1: golden = 0 r = (xf - nfc) * (fx - ffulc) q = (xf - fulc) * (fx - fnfc) p = (xf - fulc) * q - (xf - nfc) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = np.abs(q) r = e e = rat # Check for acceptability of parabola if ((np.abs(p) < np.abs(0.5*q*r)) and (p > q*(a - xf)) and (p < q * (b - xf))): rat = (p + 0.0) / q x = xf + rat if ((x - a) < tol2) or ((b - x) < tol2): si = np.sign(xm - xf) + ((xm - xf) == 0) rat = tol1 * si else: # do a golden section step golden = 1 if golden: # Do a golden-section step if xf >= xm: e = a - xf else: e = b - xf rat = golden_mean*e if rat == 0: si = np.sign(rat) + 1 else: si = np.sign(rat) x = xf + si * np.maximum(np.abs(rat), tol1) fu = -func(x, *args) num += 1 if fu <= fx: if x >= xf: a = xf else: b = xf fulc, ffulc = nfc, fnfc nfc, fnfc = xf, fx xf, fx = x, fu else: if x < xf: a = x else: b = x if (fu <= fnfc) or (nfc == xf): fulc, ffulc = nfc, fnfc nfc, fnfc = x, fu elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): fulc, ffulc = x, fu xm = 0.5 * (a + b) tol1 = sqrt_eps * np.abs(xf) + xtol / 3.0 tol2 = 2.0 * tol1 if num >= maxfun: status_flag = 1 break fval = -fx info = status_flag, num return xf, fval, info