restore() missing 1 required positional argument: 'trainable'。已经修改参数,但无效果
升级paddlenlp到最新版本,检查你的参数的顺序和类型是否匹配
这里博主针对minisit数据集搭建了一个简单的分类网络并进行训练保存,如果各位有自己已经训练好的动态图模型则大可不必进行这个步骤。
import paddle
from paddle import nn
from paddle.vision.transforms import ToTensor
from tqdm import tqdm
class MYCONV(nn.Layer):
def __init__(self,in_chanel,out_chanel,kernel_size=(3,3),stride=(1,1),padding="SAME"):
super(MYCONV, self).__init__()
self.conv=nn.Conv2D(in_chanel,out_chanel,kernel_size,stride,padding)
self.bn = nn.BatchNorm(out_chanel)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mnist(nn.Layer):
def __init__(self):
super(Mnist, self).__init__()
self.conv1=MYCONV(1,16,kernel_size=(3,3),stride=(1,1),padding="SAME")
self.conv2=MYCONV(16,32,kernel_size=(3,3),stride=(1,1),padding="SAME")
self.avgpool1=nn.AvgPool2D(kernel_size=2,stride=2, padding=0)
self.conv3=MYCONV(32,64,kernel_size=(3,3),stride=(1,1),padding="SAME")
self.conv4=MYCONV(64,32,kernel_size=(3,3),stride=(1,1),padding="SAME")
self.gap = nn.AdaptiveAvgPool2D(1)
self.flatten = nn.Flatten()
self.linear_1 = nn.Linear(32, 16)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.2)
self.linear_2 = nn.Linear(16, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.avgpool1(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.gap(x)
x = self.flatten(x)
x = self.linear_1(x)
x = self.relu(x)
x = self.linear_2(x)
return x
def train_model(train_loader,model,loss_fn,optim):
model.train()
total_loss=0
total_acc=0
with tqdm(total=len(train_loader), desc=f'Epoch {epoch + 1}/{Epochs}', postfix=dict, mininterval=0.3) as pbar:
for iteration, data in enumerate(train_loader()):
x_data = data[0] # 训练数据
y_data = data[1] # 训练数据标签
predicts = model(x_data) # 预测结果
# 计算损失 等价于 prepare 中loss的设置
loss = loss_fn(predicts, y_data)
# 计算准确率 等价于 prepare 中metrics的设置
acc = paddle.metric.accuracy(predicts, y_data)
# 反向传播
loss.backward()
# 更新参数
optim.step()
# 梯度清零
optim.clear_grad()
#更新进度条
total_loss+=loss.item()
total_acc+=acc.item()
pbar.set_postfix(**{'total_loss': "%.4f"%(total_loss / (iteration + 1)),'acc':"%.4f"%(total_acc/ (iteration + 1))})
pbar.update(1)
def eval_model(test_loader,model,loss_fn):
model.eval()
total_loss=0
total_acc=0
for iteration, data in enumerate(test_loader()):
x_data = data[0] # 测试数据
y_data = data[1] # 测试数据标签
predicts = model(x_data) # 预测结果
# 计算损失与精度
loss = loss_fn(predicts, y_data)
acc = paddle.metric.accuracy(predicts, y_data)
total_loss+=loss.item()
total_acc+=acc.item()
print("Test result, loss is: {:.4f}, acc is: {:.4f}".format(total_loss / (iteration + 1), total_acc/ (iteration + 1)))
# 加载数据集
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor())
test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=ToTensor())
# 用 DataLoader 实现数据加载
train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = paddle.io.DataLoader(test_dataset, batch_size=64, drop_last=True)
mnist = Mnist()
paddle.summary(mnist, (64, 1, 28, 28))
# 设置迭代次数
Epochs = 5
# 设置优化器
optim = paddle.optimizer.Adam(parameters=mnist.parameters())
# 设置损失函数 其中内置了softmax和onehot
loss_fn = paddle.nn.CrossEntropyLoss(
weight=None,
ignore_index=-100,
reduction='mean',
soft_label=False,
axis=-1,
use_softmax=True,)
for epoch in range(Epochs):
train_model(train_loader,mnist,loss_fn,optim)
eval_model(test_loader,mnist,loss_fn)
#保存与加载模型
eval_model(test_loader,mnist,loss_fn)
paddle.save(mnist.state_dict(), "mnist.pdparams", protocol=4)