一、什么是SGD优化算法
SGD(Stochastic Gradient Descent)优化算法是机器学习中常用的优化方法,它是一种迭代式的优化方法,用于寻找损失函数的最小值。相较于传统的梯度下降算法,SGD每次随机选择一个样本进行迭代,因此可以避免一些复杂的计算量。常见的SGD算法包括Mini-Batch SGD和普通SGD。 SGD算法的核心思想是通过不断的迭代寻找当损失函数的梯度为0时的参数取值,从而实现对模型的优化。在训练过程中,SGD每次选取一个样本计算梯度,并根据梯度的方向以一定的步长调节参数,以期逐步降低损失函数的值,直到达到预定的收敛精度。
二、SGD优化算法的优点
相较于传统的梯度下降算法,SGD算法具有以下优点: 1、节省内存:每次只需要处理一个样本,可以节省大量的内存空间; 2、处理高维数据效果好:当数据维度比较高时,SGD算法具有更好的效果; 3、收敛速度快:由于每次迭代只处理一个样本,因此收敛的速度很快。
三、SGD优化算法在机器学习中的应用
SGD优化算法被广泛应用在机器学习领域,特别是在深度学习中。以下是一些常见的机器学习模型中使用SGD算法优化的例子: 1、线性回归模型中的SGD优化器;
class LinearRegression:
def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False):
self.lr = lr
self.num_iter = num_iter
self.fit_intercept = fit_intercept
self.verbose = verbose
def __add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
return np.concatenate((intercept, X), axis=1)
def __loss(self, h, y):
return (1/2*len(y)) * np.sum((h-y)**2)
def fit(self, X, y):
if self.fit_intercept:
X = self.__add_intercept(X)
self.theta = np.zeros(X.shape[1])
for i in range(self.num_iter):
rand_idx = np.random.randint(0, X.shape[0])
X_i = X[rand_idx,:]
y_i = y[rand_idx]
h = np.dot(X_i, self.theta)
gradient = X_i.T.dot(h-y_i)
self.theta -= self.lr * gradient
if self.verbose and i % 10000 == 0:
h = np.dot(X, self.theta)
print(f'Iteration {i}, loss = {self.__loss(h, y)}')
def predict(self, X):
if self.fit_intercept:
X = self.__add_intercept(X)
return np.dot(X, self.theta)
2、逻辑回归模型中的SGD优化器;
class LogisticRegression:
def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False):
self.lr = lr
self.num_iter = num_iter
self.fit_intercept = fit_intercept
self.verbose = verbose
def __add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
return np.concatenate((intercept, X), axis=1)
def __sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def __loss(self, h, y):
return (-1/len(y)) * np.sum(y*np.log(h) + (1-y)*np.log(1-h))
def fit(self, X, y):
if self.fit_intercept:
X = self.__add_intercept(X)
self.theta = np.zeros(X.shape[1])
for i in range(self.num_iter):
rand_idx = np.random.randint(0, X.shape[0])
X_i = X[rand_idx,:]
y_i = y[rand_idx]
z = np.dot(X_i, self.theta)
h = self.__sigmoid(z)
gradient = X_i.T.dot(h-y_i)
self.theta -= self.lr * gradient
if self.verbose and i % 10000 == 0:
z = np.dot(X, self.theta)
h = self.__sigmoid(np.dot(X, self.theta))
print(f'Iteration {i}, loss = {self.__loss(h, y)}')
def predict_proba(self, X):
if self.fit_intercept:
X = self.__add_intercept(X)
return self.__sigmoid(np.dot(X, self.theta))
def predict(self, X, threshold=0.5):
return self.predict_proba(X) >= threshold
3、神经网络中的SGD优化器。
import numpy as np
def sigmoid(z):
return 1 / (1 + np.exp(-z))
class NeuralNetwork:
def __init__(self, lr=0.01, num_iter=100, hidden_size=4, fit_intercept=True, verbose=False):
self.lr = lr
self.num_iter = num_iter
self.fit_intercept = fit_intercept
self.verbose = verbose
self.hidden_size = hidden_size
def __add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
return np.concatenate((intercept, X), axis=1)
def __loss(self, y, y_hat):
return -np.mean(y * np.log(y_hat) + (1-y) * np.log(1-y_hat))
def initialize_parameters(self, X):
input_size = X.shape[1]
output_size = 1
self.params = {
'W1': np.random.randn(input_size, self.hidden_size) * 0.01,
'b1': np.zeros((1, self.hidden_size)),
'W2': np.random.randn(self.hidden_size, output_size) * 0.01,
'b2': np.zeros((1, output_size))
}
def forward_propagation(self, X):
Z1 = np.dot(X, self.params['W1']) + self.params['b1']
A1 = np.tanh(Z1)
Z2 = np.dot(A1, self.params['W2']) + self.params['b2']
y_hat = sigmoid(Z2)
cache = {
'A1': A1,
'Z2': Z2,
'Z1': Z1
}
return y_hat, cache
def backward_propagation(self, X, y, y_hat, cache):
dZ2 = y_hat - y
dW2 = np.dot(cache['A1'].T, dZ2) / X.shape[0]
db2 = np.sum(dZ2, axis=0, keepdims=True) / X.shape[0]
dZ1 = np.dot(dZ2, self.params['W2'].T) * (1 - np.power(cache['A1'], 2))
dW1 = np.dot(X.T, dZ1) / X.shape[0]
db1 = np.sum(dZ1, axis=0, keepdims=True) / X.shape[0]
grads = {
'dW2': dW2,
'db2': db2,
'dW1': dW1,
'db1': db1
}
return grads
def update_parameters(self, grads):
self.params['W1'] -= self.lr * grads['dW1']
self.params['b1'] -= self.lr * grads['db1']
self.params['W2'] -= self.lr * grads['dW2']
self.params['b2'] -= self.lr * grads['db2']
def fit(self, X, y):
if self.fit_intercept:
X = self.__add_intercept(X)
self.initialize_parameters(X)
for i in range(self.num_iter):
y_hat, cache = self.forward_propagation(X)
loss = self.__loss(y, y_hat)
grads = self.backward_propagation(X, y, y_hat, cache)
self.update_parameters(grads)
if self.verbose and i % 10 == 0:
print(f'Iteration {i}, loss = {loss}')
def predict(self, X):
if self.fit_intercept:
X = self.__add_intercept(X)
y_hat, _ = self.forward_propagation(X)
return y_hat
四、如何选择SGD的超参数
SGD算法中的超参数主要有学习率(learning rate)、迭代次数、batch size等等。合适的超参数对于模型性能的提升至关重要,以下是一些常用的选择方法: 1、学习率(learning rate):通常情况下,学习率的选择会根据具体的数据集和模型来定。如果学习率过大,可能导致算法无法收敛;如果学习率过小,则算法的收敛速度会变得非常缓慢。通常情况下,我们可以初次设置学习率等于0.001,然后根据具体实验进行调整; 2、迭代次数:迭代次数应该足够大,以保证算法能够收敛到最优解。同时,迭代次数也不能太大,否则会导致算法耗费大量的时间和资源。一般来说,可以根据实际数据集大小和模型复杂度来确定迭代次数; 3、batch size:batch size通常是一个比较小的数值,但不宜过小或过大。如果batch size过大,将会导致算法内存不足,而过小则会影响模型的优化效果。因此,我们需要根据具体的数据集大小和配置环境确定一个合适的batch size。