# Batch gradient descent(批量梯度下降) for i in range(nb_epochs): params_grad = evaluate_gradient(loss_function, data, params) params = params - learning_rate * params_grad # Stochastic gradient descent(随机梯度下降) for i in range(nb_epochs): np.random.shuffle(data) for example in data: params_grad = evaluate_gradient(loss_function, example, params) params = params - learning_rate * params_grad # Mini-batch gradient descent(小批量梯度下降) for i in range(nb_epochs): np.random.shuffle(data) for batch in get_batches(data, batch_size=50): params_grad = evaluate_gradient(loss_function, batch, params) params = params - learning_rate * params_grad
本文转自罗兵博客园博客,原文链接:http://www.cnblogs.com/hhh5460/p/5353632.html
,如需转载请自行联系原作者