使用全连接神经网络实现mnist数据集分类,运行结果Test Loss:0.000000, Acc:0.000000
代码如下:
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
#定义超参数
batch_size = 32
learning_rate = 0.01
epochs = 100
#input_size = 28*28
#hidden_size1 = 400
#hidden_size2 = 300
#hidden_size3 = 200
#hidden_size4 = 100
#建立带有激励函数和批标准化函数的网络
class Batch_Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, n_hidden_3, n_hidden_4, out_dim):
super(Batch_Net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1), nn.ReLU(True))
self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2), nn.ReLU(True))
self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, n_hidden_3), nn.BatchNorm1d(n_hidden_3), nn.ReLU(True))
self.layer4 = nn.Sequential(nn.Linear(n_hidden_3, n_hidden_4), nn.BatchNorm1d(n_hidden_4), nn.ReLU(True))
self.layer5 = nn.Sequential(nn.Linear(n_hidden_4, out_dim))
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
#数据预处理。transforms.ToTensor()函数将图片转换成PyTorch中的Tensor,并进行归一化处理(数据在0~1之间)
#transforms.Normalize()函数用于标准化。它对数据进行减均值,再除以标准差的操作。他的两个参数分别是均值和标准差
#transforms.Compose()函数将各种预处理的操作组合到一起
data_tf = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])]
)
#数据集的下载
train_dataset = datasets.MNIST(
root='./data', train=True, transform=data_tf, download=True
)
test_dataset = datasets.MNIST(root='./data', train=False, transform=data_tf)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
#选择模型
#model = net.simpleNet(28*28,300,100,10)
#model = Activation(28*28,300,100,10)
model = Batch_Net(28*28, 400, 300, 200, 100, 10)
#if torch.cuda.is_available():
#model = model.cuda()
#定义损失函数和优化函数
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
#训练模型
epoch = 0
for data in train_loader:
img, label = data
img = img.view(img.size(0), -1)
if torch.cuda.is_available():
img = img.cuda()
label = label.cuda()
else:
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
print_loss = loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch += 1
if epoch % 100 == 0:
print('epoch:{}, loss:{:.4}'.format(epoch, loss.data.item()))
#测试模型
model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
img, label = data
img = img.view(img.size(0), -1)
if torch.cuda.is_available():
img = img.cuda()
label = label.cuda()
out = model(img)
loss = criterion(out, label)
eval_loss += loss.data.item()*label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.item()
print('Test Loss:{:.6f}, Acc:{:.6f}'.format(
eval_loss/(len(test_dataset)),
eval_acc/(len(test_dataset))
))
运行结果如下:
epoch:100, loss:0.9135
epoch:200, loss:0.5194
epoch:300, loss:0.3021
epoch:400, loss:0.4691
epoch:500, loss:0.3569
epoch:600, loss:0.4184
epoch:700, loss:0.1398
epoch:800, loss:0.1943
epoch:900, loss:0.3422
epoch:1000, loss:0.1451
epoch:1100, loss:0.1886
epoch:1200, loss:0.08195
epoch:1300, loss:0.1362
epoch:1400, loss:0.3054
epoch:1500, loss:0.06256
epoch:1600, loss:0.1101
epoch:1700, loss:0.09489
epoch:1800, loss:0.05862
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
Test Loss:0.000000, Acc:0.000000
这是哪里出现了问题,需要怎么解决?