原理 闭式求解
正规方程形式求解,即为直接求 的最小值:
先展开 :
对 进行求导:
令 得:
上述结果即为求解结果,需要说明的是:特征矩阵 不满秩(即存在特征间的线性相关性),则正规方程求解过程中的矩阵求逆操作可能会导致数值不稳定性。
梯度下降求解
模型:
注: 表示 的第 维
损失函数:
注: 表示第 个样本
目标:
说明:
损失函数 是一个关于参数 的二次型,对 进行展开:
对 进行偏微分求导运算得到:
每次根据梯度更新参数:
梯度下降法步骤:
Python实现 导包 1 2 3 4 5 6 7 8 9 10 11 12 import numpy as npimport pandas as pdimport matplotlib.pyplot as pltimport seaborn as snsfrom sklearn.impute import SimpleImputerfrom sklearn.preprocessing import OneHotEncoderfrom sklearn.model_selection import train_test_splitimport time%matplotlib inline %config InlineBackend.figure_format = 'svg'
读取数据集 1 2 3 4 5 6 df = pd.read_csv("./housing.csv" ) print (df.head())print (df.info())
数据预处理 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 total_bedrooms = df.loc[:, "total_bedrooms" ].values.reshape(-1 , 1 ) filled_df = df.copy() filled_df.loc[:, "total_bedrooms" ] = SimpleImputer(strategy="median" ).fit_transform(total_bedrooms) filled_df.info() code = OneHotEncoder().fit_transform(filled_df.loc[:, "ocean_proximity" ].values.reshape(-1 , 1 )) coded_df = pd.concat([filled_df, pd.DataFrame(code.toarray())], axis=1 ) coded_df.drop(["ocean_proximity" ], axis=1 , inplace=True ) coded_df.columns = list (coded_df.columns[:-5 ]) + ["ocean_0" , "ocean_1" , "ocean_2" , "ocean_3" , "ocean_4" ] coded_df.head(10 )
划分数据集 1 2 3 4 5 6 feature = coded_df.iloc[:, :8 ].join(coded_df.iloc[:, -5 :]) label = coded_df["median_house_value" ] Xtrain,Xtest,Ytrain,Ytest = train_test_split(feature,label,test_size=0.3 ) Xtrain.head()
求解模型 评价指标R^2 1 2 3 def R2 (y, y_pred ): return 1 - (np.sum ((y - y_pred) ** 2 ) / np.sum ((y - np.mean(y)) ** 2 ))
数据标准化 1 2 3 4 5 6 7 8 9 10 11 12 13 def normalize (X ): sigma = np.std(X, axis=0 ) mu = np.mean(X, axis=0 ) X = (X - mu) / sigma return np.array(X) X = np.array(Xtrain).reshape(np.size(Xtrain, 0 ), -1 ) y = np.array(Ytrain).T.reshape(-1 , 1 ) X = normalize(X) y = normalize(y)
闭合形式求解 1 2 3 4 5 6 7 8 9 10 11 12 13 14 def Normal_Equation (X, y ): return np.linalg.inv(X.T @ X) @ X.T @ y start_time = time.time() theta_ne = Normal_Equation(X, y) print (f"花费时间:{time.time() - start_time} " )vprint (f"R^2:{R2(y, X @ theta_ne)} " )result_cf = pd.DataFrame({"ColumnName" : list (Xtrain.columns), "Theta" : theta_ne.flatten()}) result_cf
梯度下降求解 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 def MSE_Loss (y, y_pred ): return np.sum ((y_pred - y) ** 2 ) / (2 * np.size(y)) def GD (X, y, lr=0.01 , epochs=5000 ): m, n = X.shape theta = np.random.randn(n, 1 ) loss = np.zeros(epochs) for epoch in range (epochs): gradient = (1 / m) * (X.T @ (X @ theta - y)) theta -= lr * gradient loss[epoch] = MSE_Loss(y, X @ theta) return theta, loss start_time = time.time() [theta_gd, loss] = GD(X, y) print (f"花费时间:{time.time() - start_time} " )print (f"R^2:{R2(y, X @ theta_gd)} " )result_gd = pd.DataFrame({"ColumnName" : list (Xtrain.columns), "Theta" : theta_gd.flatten()}) result_gd sns.lineplot(x=np.arange(5000 ), y=loss.flatten(), label='Loss Curve' ) plt.xlabel('Epoch' ) plt.ylabel('Loss' ) plt.title('Gradient Descent Loss Curve' )
实验结果