使用十折交叉验证评估回归模型性能
首先,我们导入所有必要的库和模块,确保环境准备就绪。
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
from collections import OrderedDict
from torch.nn import init
import torch.utils.data as Data
接下来,定义一个函数用于获取每一折的数据,包括训练集和验证集。
def get_kfold_data(k, i, X, y):
fold_size = X.shape[0] // k
val_start = i * fold_size
if i != k - 1:
val_end = (i + 1) * fold_size
X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
X_train = torch.cat((X[0:val_start], X[val_end:]), dim=0)
y_train = torch.cat((y[0:val_start], y[val_end:]), dim=0)
else:
X_valid, y_valid = X[val_start:], y[val_start:]
X_train = X[0:val_start]
y_train = y[0:val_start]
return X_train, y_train, X_valid, y_valid
然后,实现一个执行多折交叉验证的函数,该函数将返回训练和验证的平均损失与准确率。
def k_fold(k, X, y):
train_loss_sum, valid_loss_sum = 0, 0
train_acc_sum, valid_acc_sum = 0, 0
data = []
train_loss_to_data, valid_loss_to_data = [], []
train_acc_to_data, valid_acc_to_data = [], []
for i in range(k):
print(f'第 {i + 1} 折验证结果')
X_train, y_train, X_valid, y_valid = get_kfold_data(k, i, X, y)
train_dataset = Data.TensorDataset(X_train, y_train)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0
)
valid_dataset = Data.TensorDataset(X_valid, y_valid)
valid_loader = DataLoader(
dataset=valid_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0
)
train_loss, valid_loss, train_acc, valid_acc = train(model, train_loader, valid_loader, loss, num_epochs, batch_size, lr)
train_loss_to_data.append(train_loss)
valid_loss_to_data.append(valid_loss)
train_acc_to_data.append(train_acc.detach().numpy())
valid_acc_to_data.append(valid_acc.detach().numpy())
train_loss_sum += train_loss
valid_loss_sum += valid_loss
train_acc_sum += train_acc
valid_acc_sum += valid_acc
print('\n', '最终k折交叉验证结果:')
print(f'average train loss: {train_loss_sum / k:.4f}, average train accuracy: {train_acc_sum / k * 100:.3f}%')
print(f'average valid loss: {valid_loss_sum / k:.4f}, average valid accuracy: {valid_acc_sum / k * 100:.3f}%')
data.extend([train_loss_to_data, valid_loss_to_data, train_acc_to_data, valid_acc_to_data])
return data
定义模型训练函数,该函数将完成模型的训练过程,并返回每个epoch的训练和验证损失及准确率。
def train(model, train_loader, valid_loader, loss, num_epochs, batch_size, lr):
train_losses, valid_losses = [], []
train_accuracies, valid_accuracies = [], []
for epoch in range(num_epochs):
train_loss_sum, valid_loss_sum = 0, 0
train_acc_sum, valid_acc_sum = 0, 0
n_train, n_valid = 0, 0
for X, y in train_loader:
y_pred = model(X)
l = loss(y_pred, y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_loss_sum += l.item()
acc = (1 - abs(y_pred - y) / y).mean()
train_acc_sum += acc
n_train += 1
with torch.no_grad():
for X, y in valid_loader:
y_pred = model(X)
l = loss(y_pred, y)
valid_loss_sum += l.item()
acc = (1 - abs(y_pred - y) / y).mean()
valid_acc_sum += acc
n_valid += 1
train_losses.append(train_loss_sum / n_train)
valid_losses.append(valid_loss_sum / n_valid)
train_accuracies.append(train_acc_sum / n_train)
valid_accuracies.append(valid_acc_sum / n_valid)
print(f'epoch {epoch + 1}, train_loss {train_losses[-1]:.6f}, train_acc {train_accuracies[-1] * 100:.3f}%, valid_loss {valid_losses[-1]:.6f}, valid_acc {valid_accuracies[-1] * 100:.3f}%')
return train_losses[-1], valid_losses[-1], train_accuracies[-1], valid_accuracies[-1]
生成模拟数据集,用于模型训练和验证。
num_features, num_samples = 500, 10000
true_weights = torch.ones(1, num_features) * 0.0056
true_bias = 0.028
x_data = torch.tensor(np.random.normal(0, 0.001, size=(num_samples, num_features)), dtype=torch.float32)
y = torch.mm(x_data, true_weights.t()) + true_bias
y += torch.normal(0, 0.001, y.shape)
构建回归模型,并初始化模型参数。
model = nn.Sequential(OrderedDict([
('linear1', nn.Linear(num_features, 256)),
('relu1', nn.ReLU()),
('linear2', nn.Linear(256, 128)),
('relu2', nn.ReLU()),
('linear3', nn.Linear(128, 1)),
]))
for param in model.parameters():
init.normal_(param, mean=0, std=0.001)
设置超参数并定义损失函数和优化器。
k_folds = 10
learning_rate = 0.001
batch_size = 50
epochs = 10
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
启动训练和验证过程,收集每折的结果。
results = k_fold(k_folds, x_data, y)
最后,使用Pandas将结果保存到CSV文件中,便于后续分析。
import pandas as pd
fold_names = [f'第{i + 1}折' for i in range(k_folds)]
data_frame = {
'Fold': fold_names,
'Train Loss': results[0],
'Valid Loss': results[1],
'Train Acc': results[2],
'Valid Acc': results[3],
}
df = pd.DataFrame(data_frame)
df.to_csv('./feedforward_neural_network_kfold_regression.csv', index=False)
df