5.27日毕设求问,怎么绘制每一步epoch的loss图,最好能请附上程序

import numpy as np
import torch
import torch.utils.data as Data
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import h5py
from SENET import SEBlock
import matplotlib.pyplot as plt


f = h5py.File('all_xdata.h5', 'r')


bearing1_1_train = f['bearing1_1_train']
bearing1_2_train = f['bearing1_2_train']
bearing1_3_train = f['bearing1_3_train']
bearing1_4_train = f['bearing1_4_train']
bearing1_5_train = f['bearing1_5_train']
bearing1_6_train = f['bearing1_6_train']
bearing1_7_train = f['bearing1_7_train']
bearing2_1_train = f['bearing2_1_train']
bearing2_2_train = f['bearing2_2_train']
bearing2_3_train = f['bearing2_3_train']
bearing2_4_train = f['bearing2_4_train']
bearing2_5_train = f['bearing2_5_train']
bearing2_6_train = f['bearing2_6_train']
bearing2_7_train = f['bearing2_7_train']
bearing3_1_train = f['bearing3_1_train']
bearing3_2_train = f['bearing3_2_train']
bearing3_3_train = f['bearing3_3_train']
bearing1_1_target = f['bearing1_1_target']
bearing1_2_target = f['bearing1_2_target']
bearing1_3_target = f['bearing1_3_target']
bearing1_4_target = f['bearing1_4_target']
bearing1_5_target = f['bearing1_5_target']
bearing1_6_target = f['bearing1_6_target']
bearing1_7_target = f['bearing1_7_target']
bearing2_1_target = f['bearing2_1_target']
bearing2_2_target = f['bearing2_2_target']
bearing2_3_target = f['bearing2_3_target']
bearing2_4_target = f['bearing2_4_target']
bearing2_5_target = f['bearing2_5_target']
bearing2_6_target = f['bearing2_6_target']
bearing2_7_target = f['bearing2_7_target']
bearing3_1_target = f['bearing3_1_target']
bearing3_2_target = f['bearing3_2_target']
bearing3_3_target = f['bearing3_3_target']



#训练集
train_data = np.concatenate((bearing2_4_train, bearing1_1_train, bearing1_2_train, bearing3_2_train,
                             bearing1_4_train, bearing1_5_train, bearing1_6_train, bearing1_7_train,
                             bearing2_1_train, bearing2_2_train, bearing3_3_train, bearing2_6_train,
                             bearing2_5_train, bearing2_3_train, bearing2_7_train, bearing3_1_train), axis=0)

train_target = np.concatenate((bearing2_4_target, bearing1_1_target, bearing1_2_target, bearing3_2_target,
                               bearing1_4_target, bearing1_5_target, bearing1_6_target, bearing1_7_target,
                               bearing2_1_target, bearing2_2_target, bearing3_3_target, bearing2_6_target,
                               bearing2_5_target, bearing2_3_target, bearing2_7_target, bearing3_1_target), axis=0)


test_data = bearing1_3_train
test_targets = bearing1_3_target
test_targets = np.array(test_targets)
# np.concatenate((a,b),axis=)

#构建网络
class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet,self).__init__()

        self.conv1 = nn.Conv2d(in_channels=4, out_channels=64, kernel_size=(10,1), stride=1)
        self.pool1 = nn.MaxPool2d(kernel_size=(10,1), stride=10)
        self.seblock1 = SEBlock(64,r=16) 

        self.conv2 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(10,1), stride=1)
        self.pool2 = nn.MaxPool2d(kernel_size=(10,1),stride=10)
        self.seblock2 = SEBlock(32, r=8)     
        self.fc1 = nn.Linear(5*32, 10)   
        self.fc2 = nn.Linear(10, 1)      

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.seblock1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.seblock2(x)
        x = self.pool2(x)
        x = x.view(-1, 5*32)
        x = self.fc1(x)
        x =F.relu(x)
        x = self.fc2(x)
        x = F.sigmoid(x)     
        return x

net = ConvNet()
Loss = nn.MSELoss()
LR = 0.001    
optimizer = optim.SGD(net.parameters(), lr=LR)   
BATCH_SIZE = 10   
train = torch.tensor(train_data)
targets = torch.tensor(train_target)
train_dataset = Data.TensorDataset(train,targets)
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=False)

#训练
for epoch in range(500):  
    for step, (batch_train, batch_targets) in enumerate(train_loader):
        batch_train = batch_train.reshape((batch_train.shape[0], 4, -1, 1))
        output = net(batch_train.float())
        loss = Loss(output, batch_targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch:', epoch, '|step:', step, '|loss:', loss)
prediction = []
for k in range(test_data.shape[0]):   
    data = test_data[k, :, :]
    data = data.reshape((4, -1, 1))  
    data = torch.tensor(data)
    data = Variable(torch.unsqueeze(data, dim=0).float(), requires_grad=False)
    output = net(data)
    prediction.append(output.data)
prediction = np.array(prediction)

看你用了torch,建议你直接保存loss的log图,然后用tensorboardX, import tensorboardX pip 安装就行

您好,我是有问必答小助手,您的问题已经有小伙伴解答了,您看下是否解决,可以追评进行沟通哦~

如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~

ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632