如何提高densenet的准确率?

问题遇到的现象和发生背景

densenet的训练准确率,验证准确率,测试准确率都只有70%
不是很清楚问题出在哪,该如何改进?

用代码块功能插入代码,请勿粘贴截图

densenet模型代码

from collections import OrderedDict

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor

from torch.hub import load_state_dict_from_url

model_urls = {
    "densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth",
    "densenet169": "https://download.pytorch.org/models/densenet169-b2777c0a.pth",
    "densenet201": "https://download.pytorch.org/models/densenet201-c1103571.pth",
}


class _DenseLayer(nn.Module):
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
        """
        :param num_input_features:  输入特征图的数量
        :param growth_rate:         在论文中为32,每个DenseLayer产生k个特征图,这里的k即growth_rate
        :param bn_size:             让1x1卷积产生4k个特征图,达到降维的作用
        :param drop_rate:           DropOut层的丢弃概率
        """
        super(_DenseLayer, self).__init__()

        # 论文中Composite function定义为bn -> relu -> conv
        self.add_module("norm1", nn.BatchNorm2d(num_input_features))
        self.add_module("relu1", nn.ReLU(inplace=True))
        self.add_module(
            "conv1", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
        )

        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate))
        self.add_module('relu2', nn.ReLU(inplace=True))
        self.add_module(
            'conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
        )

        self.drop_rate = float(drop_rate)

    def forward(self, x):
        if isinstance(x, Tensor):
            prev_features = [x]
        else:
            prev_features = x

        # 这一操作实现了Dense连接操作
        concated_features = torch.cat(prev_features, 1)
        # 降维
        bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features)))
        # 提取特征
        new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
        if self.drop_rate > 0:
            new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
        return new_features


class _DenseBlock(nn.ModuleDict):
    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(
                num_input_features=num_input_features + i * growth_rate,
                growth_rate=growth_rate,
                bn_size=bn_size,
                drop_rate=drop_rate
            )
            self.add_module('denselayer%d' % (i + 1), layer)

    def forward(self, x):
        features = [x]
        for name, layer in self.items():
            new_features = layer(features)
            features.append(new_features)
        return torch.cat(features, 1)


class _Transition(nn.Sequential):
    def __init__(self, num_input_features, num_output_features):
        super(_Transition, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(num_input_features))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))


class DenseNet(nn.Module):
    def __init__(
            self,
            growth_rate=32,
            block_config=(6, 12, 24, 16),
            num_init_features=64,
            bn_size=4,
            drop_rate=0.0,
            num_classes=1000
    ):
        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(
            OrderedDict(
                [
                    ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
                    ('norm0', nn.BatchNorm2d(num_init_features)),
                    ('relu0', nn.ReLU(inplace=True)),
                    ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
                ]
            )
        )

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate
            )
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(
                    num_input_features=num_features,
                    num_output_features=num_features // 2
                )
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2
        # Final batch norm
        self.add_module('norm5', nn.BatchNorm2d(num_features))

        self.classifier = nn.Linear(num_features, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        features = self.features(x)
        out = F.relu(features, inplace=True)
        out = F.adaptive_avg_pool2d(out, output_size=(1, 1))
        out = torch.flatten(out, start_dim=1)
        out = self.classifier(out)
        return out


def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, **kwargs):
    model = DenseNet(growth_rate=growth_rate, block_config=block_config, num_init_features=num_init_features, **kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls[arch], progress)
        model.load_state_dict(state_dict)
    return model


def densenet121(pretrained=False, progress=True, **kwargs):
    return _densenet(
        arch='densenet121',
        growth_rate=32,
        block_config=(6, 12, 24, 16),
        num_init_features=64,
        pretrained=pretrained,
        progress=progress,
        **kwargs
    )


def densenet169(pretrained=False, progress=True, **kwargs):
    return _densenet(
        arch='densenet161',
        growth_rate=32,
        block_config=(6, 12, 32, 32),
        num_init_features=64,
        pretrained=pretrained,
        progress=progress,
        **kwargs
    )


def densenet201(pretrained=False, progress=True, **kwargs):
    return _densenet(
        arch='densenet201',
        growth_rate=32,
        block_config=(6, 12, 48, 32),
        num_init_features=64,
        pretrained=pretrained,
        progress=progress,
        **kwargs
    )


if __name__ == '__main__':
    inputs = torch.randn(1, 3, 224, 224)
    model = densenet121(num_classes=10)
    out = model(inputs)
    print(out.shape)

训练代码:

import os
import math
import argparse
import time

import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter

from utils.data_utils import get_dataset_dataloader
from utils.train_val_utils import train_one_epoch, evaluate
from models.base_model import BaseModel


def main(args):
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")

    if not os.path.exists("./weights"):
        os.makedirs("./weights")

    tb_writer = SummaryWriter('./logs')

    # 获取数据集
    train_dataset, val_dataset, train_dataloader, val_dataloader = get_dataset_dataloader(args.data_path, args.batch_size)

    # 获取模型
    model = BaseModel(name=args.model_name, num_classes=args.num_classes).to(device)

    # 优化器
    optimizer = optim.Adam(params=model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=5E-5)

    # cosine
    lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    best_acc = 0.0

    start = time.time()
    for epoch in range(args.epochs):
        # train
        train_loss, train_acc = train_one_epoch(
            model=model,
            optimizer=optimizer,
            dataloader=train_dataloader,
            device=device,
            epoch=epoch
        )

        scheduler.step()

        # validate
        val_loss, val_acc = evaluate(
            model=model,
            dataloader=val_dataloader,
            device=device,
            epoch=epoch
        )

        # tensorboard
        tags = ['train_loss', 'train_acc', 'val_loss', 'val_acc', 'learning_rate']
        tb_writer.add_scalar(tags[0], train_loss, epoch)
        tb_writer.add_scalar(tags[1], train_acc, epoch)
        tb_writer.add_scalar(tags[2], val_loss, epoch)
        tb_writer.add_scalar(tags[3], val_acc, epoch)
        tb_writer.add_scalar(tags[4], optimizer.param_groups[0]['lr'], epoch)
        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), "./weights/" + args.model_name + ".pth")
    end = time.time()
    print("Training 耗时为:{:.1f}".format(end - start))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', type=str, default='densenet')
    parser.add_argument('--num_classes', type=int, default=7)
    parser.add_argument('--epochs', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--lrf', type=float, default=0.01)
    parser.add_argument('--data_path', type=str, default=r'C:\Users\11831\Desktop\FinalProject\Code\data\training')
    parser.add_argument('--flag', type=bool, default=False)
    parser.add_argument('--device', default='cuda:0')

    opt = parser.parse_args()
    print(opt)
    main(opt)

测试代码

import os
import json
import argparse

import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt

from models.base_model import BaseModel


def main(args):
    global error_num
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    data_transform = transforms.Compose(
        [
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ]
    )

    img_path = args.img_path
    assert os.path.exists(img_path), f"file {img_path} dose not exist."
    img = Image.open(img_path)
    plt.imshow(img)
    img = data_transform(img)

    # [C, H, W] -> [1, C, H, W]
    img = torch.unsqueeze(img, dim=0)

    json_path = './class_indices.json'
    assert os.path.exists(json_path), f"file {json_path} does not exist."
    json_file = open(json_path, 'r')
    class_indict = json.load(json_file)

    model = BaseModel(name=args.model_name, num_classes=args.num_classes).to(device)

    model.load_state_dict(torch.load(args.model_weight_path, map_location=device))
    model.eval()
    with torch.no_grad():
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()

    print_res = "real: {}   predict: {}   prob: {:.3f}".format(args.real_label, class_indict[str(predict_cla)],
                                                               predict[predict_cla].numpy())
    if args.real_label != class_indict[str(predict_cla)]:
        error_num += 1


root = r'C:\Users\11831\Desktop\FinalProject\Code\data\testing'
# 遍历文件夹,一个文件夹对应一个类别
flower_classes = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]

# 排序,保证顺序一致
flower_classes.sort()

# 支持的图片格式
images_format = [".jpg", ".JPG", ".png", ".PNG"]

all_images = []
# 遍历每个文件夹下的文件
for cla in flower_classes:
    cla_path = os.path.join(root, cla)
    # 获取每个类别文件夹下所有图片的路径与类别
    images = [[os.path.join(cla_path, i), cla] for i in os.listdir(cla_path)
              if os.path.splitext(i)[-1] in images_format]
    all_images.extend(images)

total_num = len(all_images)

cur_num = 0
if __name__ == '__main__':
    error_num = 0
    for each in all_images:
        cur_num += 1
        path = each[0]
        label = each[1]
        print('{}/{}'.format(cur_num, total_num))
        parser = argparse.ArgumentParser()
        parser.add_argument('--img_path', type=str, default=path)
        parser.add_argument('--real_label', type=str, default=label)
        parser.add_argument('--model_name', type=str, default='densenet')
        parser.add_argument('--num_classes', type=int, default=7)
        parser.add_argument('--model_weight_path', type=str, default='./weights/densenet.pth')

        args = parser.parse_args()
        main(args)

    print("error_num={},total_num={},correct_ratio={}".format(error_num, total_num, 1 - error_num / total_num))

运行结果及报错内容

img

img

img

我的解答思路和尝试过的方法

采用的数据集:https://www.kaggle.com/datasets/kmader/skin-cancer-mnist-ham10000
这个数据集每个种类的样本数量是不均等的

我想要达到的结果

希望训练,测试,验证的准确率都达到90%

加个attention机制就好了

改用其它模型或者调一下数字设置,不用模型对图层的作用不一样