我遇到的问题:在开发做皮肤癌分类的项目,使用HAM10000数据集。因为HAM10000不均衡,所以我首先在训练集上进行了重采样。结果是模型训练准确率很高,验证准确率很低,模型过拟合。所以我加入了L2正则化,但是经过几轮调整参数,没有得到好的效果。训练准确率,和验证准确率都变得很低。
来CSDN向各位请教!
这是采样前,数据分布:
采样部分的代码:
# 对训练集进行SMOTE过采样
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
y_train = torch.from_numpy(np.array(y_train)).type(torch.LongTensor)
y_eval = torch.from_numpy(np.array(y_eval)).type(torch.LongTensor)
train_data = my_dataset(df=X_train_smote, labels=y_train_smote, transform=transform)
eval_data = my_dataset(df=X_eval, labels=y_eval, transform=transform)
data_loader_train = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
data_loader_eval = DataLoader(eval_data, batch_size=BATCH_SIZE, shuffle=True)
正则化
# optimizer = optim.Adam(mymodel.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.999), eps=1e-8)
optimizer = torch.optim.Adam(mymodel.parameters(), lr=0.01, weight_decay=0.01)
这两种我都试了,效果不好。特别是L2,我试了很多种参数搭配,没啥用。
得到的结果差不多是这个性能
本人刚接触深度学习。有没有帅哥美女告诉我,针对这种不均衡数据集,在你们的实际经历过的要做哪些处理呀。我怕走弯路,付出了时间也没有效果。
我把整体代码放在后面,哪位针对代码给提点具体有效的建议,我就把悬赏给他:万分感谢!
import os
import sklearn
import tqdm
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import RandomOverSampler, SMOTE
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from torchvision import transforms
from torchvision.models import resnet18, resnet34, resnet50, resnet101
from my_dataset import my_dataset
import torch
if not os.path.exists('/'):
os.mkdir('/')
BATCH_SIZE = 16
DATA_PATH = r'C:\Users\11831\Desktop\FinalProject\data\hmnist_28_28_RGB.csv'
MYMODEL = "resnet18"
EPOCH = 5
LEARNING_RATE = 1e-4
GPU_IS_OK = torch.cuda.is_available()
tb_writer = SummaryWriter(log_dir=f"logs/{MYMODEL}-{EPOCH}")
device = torch.device('cuda' if GPU_IS_OK else 'cpu')
print('SkinCancerMNIST-HAM10000 is being trained using the {}.'.format('GPU' if GPU_IS_OK else 'CPU'))
# ros = RandomOverSampler()
# smote = SMOTE()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
print('Loading the dataset...')
data = pd.read_csv(DATA_PATH, encoding='utf-8')
labels = data['label']
images = data.drop(columns=['label'])
# images, labels = smote.fit_resample(images, labels)
print('The size of dataset: {}'.format(images.shape[0]))
# 打印数据分布条形图
label_count = dict(labels.value_counts(sort=False))
plt.figure(figsize=(10, 10))
plt.bar(label_count.keys(), label_count.values())
# plt.bar(['AKIEC', 'BCC', 'BKL', 'DF', 'NV', 'VASC', 'MEL'], label_count.values())
plt.xlabel('Label')
plt.ylabel('Number')
plt.show()
X_train, X_eval, y_train, y_eval = train_test_split(images, labels, train_size=0.8, random_state=21)
# 对训练集进行SMOTE过采样
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
y_train = torch.from_numpy(np.array(y_train)).type(torch.LongTensor)
y_eval = torch.from_numpy(np.array(y_eval)).type(torch.LongTensor)
train_data = my_dataset(df=X_train_smote, labels=y_train_smote, transform=transform)
eval_data = my_dataset(df=X_eval, labels=y_eval, transform=transform)
data_loader_train = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
data_loader_eval = DataLoader(eval_data, batch_size=BATCH_SIZE, shuffle=True)
print('Over.')
mymodel = resnet18(pretrained=False)
mymodel.fc = nn.Linear(mymodel.fc.in_features, 7)
mymodel = mymodel.to(device)
# mymodel = resnet34(pretrained=False)
# mymodel.fc = nn.Linear(mymodel.fc.in_features, 7)
# mymodel = mymodel.to(device)
# mymodel = resnet50(pretrained=False)
# mymodel.fc = nn.Linear(mymodel.fc.in_features, 7)
# mymodel = mymodel.to(device)
# mymodel = resnet101(pretrained=False)
# mymodel.fc = nn.Linear(mymodel.fc.in_features, 7)
# mymodel = mymodel.to(device)
print('The weight structure and parameters are as follows:')
summary(mymodel, input_size=(3, 28, 28))
criterion = nn.CrossEntropyLoss()
# optimizer = optim.Adam(mymodel.parameters(), lr=LEARNING_RATE, betas=(0.9, 0.999), eps=1e-8)
optimizer = torch.optim.Adam(mymodel.parameters(), lr=0.01, weight_decay=0.01)
print('Training...')
for epoch in range(1, EPOCH + 1):
# train
train_loss = 0
train_acc = 0
train_step = 0
mymodel.train()
for image, label in tqdm.tqdm(data_loader_train):
image = Variable(image.to(device))
label = Variable(label.to(device))
output = mymodel(image)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, pred = output.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / BATCH_SIZE
train_acc += acc
train_step += 1
train_loss /= train_step
train_acc /= train_step
# eval
val_loss = 0
val_acc = 0
val_step = 0
mymodel.eval()
for image, label in data_loader_eval:
image = Variable(image.to(device))
label = Variable(label.to(device))
output = mymodel(image)
loss = criterion(output, label)
val_loss += loss.item()
_, pred = output.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / BATCH_SIZE
val_acc += acc
val_step += 1
val_loss /= val_step
val_acc /= val_step
# 混淆矩阵
preds = []
gts = []
for image, label in data_loader_eval:
image = Variable(image.to(device))
output = mymodel(image)
_, pred = torch.max(output, 1)
preds.extend(pred.cpu().numpy())
gts.extend(label.cpu().numpy())
cm = confusion_matrix(gts, preds)
fig, ax = plt.subplots(figsize=(7, 7))
ax.imshow(cm, cmap='Blues')
ax.set_xticks(range(7))
# ax.set_xticklabels(['AKIEC', 'BCC', 'BKL', 'DF', 'MEL', 'NV', 'VASC'], fontsize=10, rotation=90)
ax.set_yticks(range(7))
# ax.set_yticklabels(['AKIEC', 'BCC', 'BKL', 'DF', 'MEL', 'NV', 'VASC'], fontsize=10)
ax.set_xlabel('Predicted label', fontsize=11)
ax.set_ylabel('True label', fontsize=11)
ax.tick_params(axis='both', which='major', labelsize=10, pad=10)
ax.set_title('Confusion matrix', fontsize=14)
for i in range(7):
for j in range(7):
ax.text(j, i, str(cm[i][j]), horizontalalignment='center', verticalalignment='center',
color='white' if cm[i][j] > cm.max() / 2 else 'black', fontsize=10)
# 计算精确率、召回率和F1-Score
precision = sklearn.metrics.precision_score(gts, preds, average='macro')
recall = sklearn.metrics.recall_score(gts, preds, average='macro')
f1_score = sklearn.metrics.f1_score(gts, preds, average='macro')
# # 打印结果
# print('Precision:', precision)
# print('Recall:', recall)
# print('F1-Score:', f1_score)
# tensorboard
tags = ['train_loss', 'train_acc', 'val_loss', 'val_acc', 'Precision', 'Recall', 'F1-Score']
tb_writer.add_scalar(tags[0], train_loss, epoch)
tb_writer.add_scalar(tags[1], train_acc, epoch)
tb_writer.add_scalar(tags[2], val_loss, epoch)
tb_writer.add_scalar(tags[3], val_acc, epoch)
tb_writer.add_scalar(tags[4], precision, epoch)
tb_writer.add_scalar(tags[5], recall, epoch)
tb_writer.add_scalar(tags[6], f1_score, epoch)
# print the loss and accuracy
print(
'[{:3d}/{:3d}] Train Loss: {:11.9f} | Train Accuracy: {:6.4f} | Val Loss: {:11.9f} | Val Accuracy: {:6.4f} | '
'Precision: {:6.4f}| Recall: {:6.4f} | F1-Score: {:6.4f}'.format(
epoch, EPOCH, train_loss, train_acc, val_loss, val_acc, precision, recall, f1_score))
# save the weight
torch.save(mymodel, './model/ResNet18_epoch{}.pt'.format(epoch))
plt.show()
print('Training completed.')
感觉可以对样本类别较少的数据做图像增强
以下回答引用自chatGpt, 有用的话,请采纳哇!!!
针对您遇到的问题,我可以提供以下一些建议和可能的解决方案:
尝试使用其他的不均衡数据集处理方法
除了重采样,还有一些其他的处理不均衡数据集的方法,例如类别权重调整、欠采样、过采样、生成对抗网络(GAN)等。可以尝试使用这些方法来解决不均衡问题。
尝试使用更复杂的模型或者调整模型参数
如果您使用的模型比较简单,可能无法很好地拟合不均衡数据集。可以尝试使用更复杂的模型或者调整模型参数来提高模型性能。
尝试使用集成学习方法
集成学习方法可以将多个模型的结果进行组合,可以有效地降低过拟合的风险,提高模型的性能。
尝试使用交叉验证
交叉验证可以帮助您更好地评估模型性能,避免过拟合。可以尝试使用交叉验证来评估模型,并调整参数以提高模型性能。
尝试调整正则化参数的值
L2正则化可以有效地避免模型过拟合,但是正则化参数的值也需要适当地调整,否则可能会导致欠拟合的问题。可以尝试调整正则化参数的值来解决这个问题。
以下答案由GPT-3.5大模型与博主波罗歌共同编写:
不均衡数据集是一种常见的实际问题,可以采用以下几种方法来解决不均衡数据的问题:
一种常见的方法是数据重采样,通过随机采样来重新平衡训练集的分布。可以使用过采样方法(SMOTE等)或欠采样方法(如Tomek Links方法)对数据进行重采样。您已经对训练集进行了重采样并提到了模型过拟合的问题,可能会出现增大本来就很容易过拟合的模型这一后遗症。这时候需要调整模型结构或者使用其他方法来避免过拟合。
数据扩增是通过对现有数据改变其一些特征来扩充数据集,从而改变数据的分布,例如旋转、翻转、裁剪、缩放和添加噪声等。这样会使得模型更好地学习到数据的特征。
可以尝试采用分类器算法,如随机森林、SVM、决策树等。这些算法可以直接预处理数据并进行分类,能够处理不均衡的数据集。
针对不均衡数据,可以使用代价敏感的算法来处理。具体的做法是赋权,对于某个类别的样本,如果容易误分类,那么就赋予更高的代价权重,以便于压制算法的误判。
修改损失函数,使其能够惩罚模型在不好处理的类别上的错误分类。一种经典的跨熵损失函数的改进方法是“Focal Loss”。
在您的情况下,模型过拟合可能是由于数据失衡而导致。您可以尝试调整模型参数、修改结构或者使用dropout、L1/L2正则化等方法来缓解模型的过拟合程度。同时,您也可以使用Early stopping等技术防止模型过拟合的出现。具体调参方法因人而异,可以先从相对理论较为成熟的标准中对超参数范围大致切入,再考虑具体地微调。
如果我的回答解决了您的问题,请采纳!
基于最新版ChatGPT4的回答,望采纳!!!有其他问题也可以询问我哦、”(最新版更智能,功能更加强大)
以下是一些建议:
我根据之前的建议对代码做出以下改动:
修改后的代码如下所示:
import os
import sklearn
import tqdm
from sklearn.metrics import confusion_matrix
from imblearn.over_sampling import RandomOverSampler, SMOTE
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from torchvision import transforms
from torchvision.models import resnet18
from my_dataset import my_dataset
import torch
if not os.path.exists('/'):
os.mkdir('/')
BATCH_SIZE = 16
DATA_PATH = r'C:\Users\11831\Desktop\FinalProject\data\hmnist_28_28_RGB.csv'
MYMODEL = "resnet18"
EPOCH = 5
LEARNING_RATE = 1e-4
GPU_IS_OK = torch.cuda.is_available()
tb_writer = SummaryWriter(log_dir=f"logs/{MYMODEL}-{EPOCH}")
device = torch.device('cuda' if GPU_IS_OK else 'cpu')
print('SkinCancerMNIST-HAM10000 is being trained using the {}.'.format('GPU' if GPU_IS_OK else 'CPU'))
# 加载数据集
print('Loading the dataset...')
data = pd.read_csv(DATA_PATH, encoding='utf-8')
labels = data['label']
images = data.drop(columns=['label'])
# 对数据集进行划分
X_train, X_eval, y_train, y_eval = train_test_split(images, labels, train_size=0.8, random_state=21)
# 对训练集进行过采样
smote = SMOTE(random_state=42)
X_train, y_train = smote.fit_resample(X_train, y_train)
# 将标签转换为tensor类型
y_train = torch.from_numpy(np.array(y_train)).type(torch.LongTensor)
y_eval = torch.from_numpy(np.array(y_eval)).type(torch.LongTensor)
# 对数据进行预处理
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
train_data = my_dataset(df=X_train, labels=y_train, transform=transform)
eval_data = my_dataset(df=X_eval, labels=y_eval, transform=transform)
# 加载数据集
data_loader_train = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
data_loader_eval = DataLoader(eval_data, batch_size=BATCH_SIZE, shuffle=True)
print('Data loading is completed.')
# 定义模型结构
mymodel = resnet18(pretrained=False)
mymodel.fc = nn.Linear(mymodel.fc.in_features, 7)
mymodel = mymodel.to(device)
# 输出模型结构和参数
print('The weight structure and parameters are as follows:')
summary(mymodel, input_size=(3, 28, 28))
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(mymodel.parameters(), lr=LEARNING_RATE, weight_decay=0.01)
#训练过程中进行L2正则化
for epoch in range(1, EPOCH + 1):
train_loss = 0
train_acc = 0
train_step = 0
mymodel.train()
for image, label in tqdm.tqdm(data_loader_train):
image = Variable(image.to(device))
label = Variable(label.to(device))
output = mymodel(image)
loss = criterion(output, label)
# 添加L2正则化
l2_reg = None
for W in mymodel.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
loss += 0.01 * l2_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, pred = output.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / BATCH_SIZE
train_acc += acc
train_step += 1
train_loss /= train_step
train_acc /= train_step
val_loss = 0
val_acc = 0
val_step = 0
mymodel.eval()
for image, label in data_loader_eval:
image = Variable(image.to(device))
label = Variable(label.to(device))
output = mymodel(image)
loss = criterion(output, label)
val_loss += loss.item()
_, pred = output.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / BATCH_SIZE
val_acc += acc
val_step += 1
val_loss /= val_step
val_acc /= val_step
preds = []
gts = []
for image, label in data_loader_eval:
image = Variable(image.to(device))
output = mymodel(image)
_, pred = torch.max(output, 1)
preds.extend(pred.cpu().numpy())
gts.extend(label.cpu().numpy())
cm = confusion_matrix(gts, preds)
fig, ax = plt.subplots(figsize=(7, 7))
ax.imshow(cm, cmap='Blues')
ax.set_xticks(range(7))
ax.set_yticks(range(7))
ax.set_xlabel('Predicted label', fontsize=11)
ax.set_ylabel('True label', fontsize=11)
ax.tick_params(axis='both', which='major', labelsize=10, pad=10)
ax.set_title('Confusion matrix', fontsize=14)
for i in range(7):
for j in range(7):
ax.text(j, i, str(cm[i][j]), horizontalalignment='center', verticalalignment='center',
color='white' if cm[i][j] > cm.max() / 2 else 'black', fontsize=10)
precision = sklearn.metrics.precision_score(gts, preds, average='macro')
recall = sklearn.metrics.recall_score(gts, preds, average='macro')
f1_score = sklearn.metrics.f1_score(gts, preds, average='macro')
tags = ['train_loss', 'train_acc', 'val_loss', 'val_acc', 'precision', 'recall', 'f1_score']
tb_writer.add_scalar(tags[0], train_loss, epoch)
tb_writer.add_scalar(tags[1], train_acc, epoch)
tb_writer.add_scalar(tags[2], val_loss, epoch)
tb_writer.add_scalar(tags[3], val_acc, epoch)
tb_writer.add_scalar(tags[4], precision, epoch)
tb_writer.add_scalar(tags[5], recall, epoch)
tb_writer.add_scalar(tags[6], f1_score, epoch)
print(
'[{:3d}/{:3d}] Train Loss: {:11.9f} | Train Accuracy: {:6.4f} | Val Loss: {:11.9f} | Val Accuracy: {:6.4f} | '
'Precision: {:6.4f}| Recall: {:6.4f} | F1-Score: {:6.4f}'.format(
epoch, EPOCH, train_loss, train_acc, val_loss, val_acc, precision, recall, f1_score))
#save the weight
torch.save(mymodel, './model/ResNet18_epoch{}.pt'.format(epoch))
plt.show()
print('Training completed.')
针对不均衡数据集进行图像分类时,可以采用以下方法:
重采样是指对于样本量较少的类别进行复制,使得数据集中各类别的样本数相等或接近。可以采用随机过采样(RandomOverSampler)和SMOTE等方法。但是需要注意的是,重采样可能会导致模型的过拟合问题,因为模型会过多关注样本量较少的类别。
在训练模型时,可以给样本量较少的类别赋予更高的权重,使得模型更加关注这些类别。可以通过在损失函数中加入权重系数来实现。
在样本量较少的类别中,可以通过引入随机噪声来生成新的样本。例如,可以对图像进行旋转、平移、缩放等操作,生成新的图像样本。
对于不均衡数据集,迁移学习也是一种有效的方法。可以使用预训练的模型在大规模数据集上进行训练,然后在不均衡数据集上进行微调,以提高模型的分类性能。
在实际应用中,需要根据数据集的情况选择合适的方法。例如,在皮肤癌分类问题中,可以先尝试使用类别加权和噪声引入的方法,如果效果不好,可以考虑使用迁移学习。同时,还可以尝试调整模型结构和超参数,以进一步提高模型的性能。
1.类别4过于庞大,建议欠采样(注意不要改变数据分布,欠采样后仍应最多)
2.对类别3和类别5进行数据增强(旋转,改变对比度、饱和度等)
(遇到过类似的问题,用以上方法能够大大改善,望采纳)
可以把学习率调低一些,用一些数据增强扩充样本,训练时记得打乱顺序~
可以试试使用数据增强方法(如添加噪声),把数量较少的类别样本扩充到与数量最多的类别样本相同数量级。