Adam优化原理
自适应矩估计(Adam)是计算每个参数的自适应学习率的另一种方法。除了存储过去平方梯度(如Adadelta和RMSprop)的指数衰减平均值外,Adam还保持过去梯度的指数衰减平均值,类似于动量:
它们通过计算偏差修正的第一和第二矩估计值来抵消这些偏差:
然后,他们使用这些来更新参数,正如我们在Adadelta和RMSprop中看到的那样,这产生了Adam更新规则:
作者建议的默认值是β1=0.9, β 2 = 0.999,他们的经验表明,Adam在实践中运行良好,并优于其他自适应学习方法算法。
迭代过程
代码实践
import numpy as np import matplotlib.pyplot as plt class Optimizer: def __init__(self, epsilon = 1e-10, # 误差 iters = 100000, # 最大迭代次数 lamb = 0.01, # 学习率 gamma = 0.9, theta = 1e-8, beta1 = 0.9, beta2 = 0.999): # 常数 self.epsilon = epsilon self.iters = iters self.lamb = lamb self.gamma = gamma self.theta = theta self.beta1 = beta1 self.beta2 = beta2 def adam(self, x_0 = 0.5, y_0 = 0.5): f1, f2 = self.fn(x_0, y_0), 0 w = np.array([x_0, y_0]) # 每次迭代后的函数值,用于绘制梯度曲线 k = 0 # 当前迭代次数 m_t = 0.0 v_t = 0.0 while True: if abs(f1 - f2) <= self.epsilon or k > self.iters: break f1 = self.fn(x_0, y_0) g = np.array([self.dx(x_0, y_0), self.dy(x_0, y_0)]) m_t = self.beta1 * m_t + (1 - self.beta1) * g v_t = self.beta2 * v_t + (1 - self.beta2) * np.dot(g, g) m_hat = m_t / (1 - self.beta1) v_hat = v_t / (1 - self.beta2) x_0, y_0 = np.array([x_0, y_0]) - self.lamb / (self.theta + np.sqrt(v_hat)) * m_hat f2 = self.fn(x_0, y_0) w = np.vstack((w, (x_0, y_0))) k += 1 self.print_info(k, x_0, y_0, f2) self.draw_process(w) def print_info(self, k, x_0, y_0, f2): print('迭代次数:{}'.format(k)) print('极值点:【x_0】:{} 【y_0】:{}'.format(x_0, y_0)) print('函数的极值:{}'.format(f2)) def draw_process(self, w): X = np.arange(0, 1.5, 0.01) Y = np.arange(-1, 1, 0.01) [x, y] = np.meshgrid(X, Y) f = x**3 - y**3 + 3 * x**2 + 3 * y**2 - 9 * x plt.contour(x, y, f, 20) plt.plot(w[:, 0],w[:, 1], 'g*', w[:, 0], w[:, 1]) plt.show() def fn(self, x, y): return x**3 - y**3 + 3 * x**2 + 3 * y**2 - 9 * x def dx(self, x, y): return 3 * x**2 + 6 * x - 9 def dy(self, x, y): return - 3 * y**2 + 6 * y """ 函数: f(x) = x**3 - y**3 + 3 * x**2 + 3 * y**2 - 9 * x 最优解: x = 1, y = 0 极小值: f(x,y) = -5 """ optimizer = Optimizer() optimizer.adam()