Python图像分类遇到的问题

写Python图像分类时遇到的问题,本人Python学的不到位,可以帮忙看看嘛

from __future__ import print_function
import argparse

import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR

from utils.config_utils import read_args, load_config, Dict2Object


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3, 1)
        self.conv2 = nn.Conv2d(32, 64, 3, 1)
        self.dropout1 = nn.Dropout(0.25)
        self.dropout2 = nn.Dropout(0.5)
        self.fc1 = nn.Linear(9216, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = F.log_softmax(x, dim=1)
        return output


def train(args, model, device, train_loader, optimizer, epoch, ):
    correct = 0
    test_loss = 0
    """
    tain the model and return the training accuracy
    :param args: input arguments
    :param model: neural network model
    :param device: the device where model stored
    :param train_loader: data loader
    :param optimizer: optimizer
    :param epoch: current epoch
    :return:
    """
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()

        predicted = output.argmax(dim=1, keepdims=True)
        correct += (predicted == target).sum().item()
        test_loss += loss.data.item()

    training_acc, training_loss = 100. * correct / len(train_loader.dataset), test_loss / len(train_loader.dataset)
    return training_acc, training_loss


def test(model, device, test_loader):
    """
    test the model and return the tesing accuracy
    :param model: neural network model
    :param device: the device where model stored
    :param test_loader: data loader
    :return:
    """
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            outputs = model(data)
            _, predicted = torch.max(outputs.data, dim=1)
            test_loss += F.nll_loss(outputs, target, reduction='sum').item
            correct += (predicted == target).sum().item()

    testing_acc, testing_loss = 100. * correct / len(test_loader.dataset), test_loss / len(test_loader.dataset)
    return testing_acc, testing_loss


def plot(epoches, performance):
    """
    plot the model peformance
    :param epoches: recorded epoches
    :param performance: recorded performance
    :return:
    """
    plt.title('model performance')
    plt.xlabel('epoches')
    plt.ylabel('performance')
    plt.plot(epoches,performance)
    plt.show()
    pass


def run(config):
    use_cuda = not config.no_cuda and torch.cuda.is_available()
    use_mps = not config.no_mps and torch.backends.mps.is_available()

    torch.manual_seed(config.seed)

    if use_cuda:
        device = torch.device("cuda")
    elif use_mps:
        device = torch.device("mps")
    else:
        device = torch.device("cpu")

    train_kwargs = {'batch_size': config.batch_size, 'shuffle': True}
    test_kwargs = {'batch_size': config.test_batch_size, 'shuffle': True}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1,
                       'pin_memory': True, }
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    # download data
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset1 = datasets.MNIST('./data', train=True, download=True, transform=transform)
    dataset2 = datasets.MNIST('./data', train=False, transform=transform)

    """add random seed to the DataLoader, pls modify this function"""
    train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

    model = Net().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=config.lr)

    """record the performance"""
    epoches = []
    training_accuracies = []
    training_loss = []
    testing_accuracies = []
    testing_loss = []

    scheduler = StepLR(optimizer, step_size=1, gamma=config.gamma)
    for epoch in range(1, config.epochs + 1):
        train_acc, train_loss = train(config, model, device, train_loader, optimizer, epoch)
        """record training info, Fill your code"""
        test_acc, test_loss = test(model, device, test_loader)
        """record testing info, Fill your code"""
        scheduler.step()
        """update the records, Fill your code"""

    """plotting training performance with the records"""
    plot(epoches, training_loss)

    """plotting testing performance with the records"""
    plot(epoches, testing_accuracies)
    plot(epoches, testing_loss)

    if config.save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")






if __name__ == '__main__':
    arg = read_args()

    """toad training settings"""
    config = load_config(arg)

    """train model and record results"""
    run(config)

   


问题报错

```python
test setup failed
file C:\Users\admin\Desktop\CSC1004-python-project-main\main.py, line 71
  def test(model, device, test_loader):
E       fixture 'model' not found
>       available fixtures: anyio_backend, anyio_backend_name, anyio_backend_options, cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory
>       use 'pytest --fixtures [testpath]' for help on them.


```
`

model参数的值不是6 >available...列出的可用值
python调试三板斧 https://ask.csdn.net/questions/7908322/54130133

您好,我是有问必答小助手,您的问题已经有小伙伴帮您解答,感谢您对有问必答的支持与关注!
PS:问答VIP年卡 【限时加赠:IT技术图书免费领】,了解详情>>> https://vip.csdn.net/askvip?utm_source=1146287632
  • 你可以参考下这个问题的回答, 看看是否对你有帮助, 链接: https://ask.csdn.net/questions/692773
  • 我还给你找了一篇非常好的博客,你可以看看是否有帮助,链接:python基本语法总结(超级全面,细致,只用一周就可以入门python到实践),会持续更新
  • 除此之外, 这篇博客: 车间调度问题简单,Python实现中的 车间调度问题,Python实现 部分也许能够解决你的问题, 你可以仔细阅读以下内容或跳转源博客中阅读:
  • 学习https://www.bilibili.com/video/BV1Wy4y1H74p?spm_id_from=333.999.0.0,使用Python实现标准算例车间作业的编码、作业的简单分配以及简单的甘特图的绘制功能。
    中间由于自己太菜遇到很多问题,需要学习的东西还有很多很多。

    源文件:https://download.csdn.net/download/fwx1111/23404436

    //2021.09.17
    import numpy as np
    import random
    import pandas as pd
    import matplotlib.pyplot as plt
    
    
    #num_job = 3 # 工件数目
    #num_machine = 4  #机器数目
    pop = 6
    
    data1 = [
        [2,1,0,3,1,6,3,7,5,3,4,6],
        [1,8,2,5,4,10,5,10,0,10,3,4],
        [2,5,3,4,5,8,0,9,1,1,4,7],
        [1,5,0,5,2,5,3,3,4,8,5,9],
        [2,9,1,3,4,5,5,4,0,3,3,1],
        [1,3,3,3,5,9,0,10,4,4,2,1],
    ]
    data = np.array(data1)
    
    #class Encode():
    
        # 生成编码,传入的参数为num_job, num_machine
    def createChrome(num_job, num_machine):
        a = []
        for i in range(1,num_job+1):
            a.append(i)
    
        chrome = []
        for j in range(num_machine):
            chrome += a                   # 生成初始的编码
        random.shuffle(chrome)            # 将编码乱序排列,生成作业码的乱序
        return chrome
    
        # 根据种群数量和作业数量和设备数量生成初始种群
    def createChromes(num_job, num_machine, pop):
        chromes = np.zeros((pop, num_job * num_machine),dtype=int)
        for i in range(pop):
            chromes[i,:] = createChrome(num_job,num_machine)
        return chromes
    
    # 初始化工时设备数据
    def initData(data):
        for i in range(0,11,2):
            data[:,i] = data[:,i]+1
        return data
    
    
    def createSchedule(data,chrome):
        num_job = np.size(data,0)
        num_machine = np.size(data,1) // 2
        schedule = np.zeros((num_job * num_machine,5))
    
        # 定义中间数组
        jobCanStartTime = np.zeros((1, num_job),dtype=int)
        jobProcessId = np.ones((1, num_job),dtype=int)
    
        
        # 获取编码中当前的作业号
        chrome = createChrome(num_job,num_machine)
        data = initData(data)
        for i in range(num_job * num_machine):
            nowJobId = chrome[i]-1
            nowProcessId = jobProcessId[0,nowJobId]
            nowMachId = data[nowJobId, 2*nowProcessId-2]
            nowProcTime = data[nowJobId, 2*nowProcessId -1]
            
            machSch = schedule[schedule[:,1]==nowMachId,:]
            jobCanST = jobCanStartTime[0,nowJobId]
            if np.size(machSch,0) == 0:    # 该工件还未安排作业
                startTime = jobCanStartTime[0,nowJobId]
                endTime = startTime + nowProcTime
            else:   # 设备已安排了工作
                machSch = machSch[np.argsort(machSch[:,3]),:]
                rows = np.size(machSch,0)
                # 处理第一行已排作业,检查是否能将当前作业排到之前
                done = 0
                if jobCanST < machSch[0,3]:
                    if machSch[0,3] - jobCanST > nowProcTime:
                        startTime = jobCanST
                        endTime = startTime + nowProcTime
                        done = 1
                if done == 0 :
    
                    for j in range(rows):
                        if jobCanStartTime[0,nowJobId] < machSch[j,3]:
                            if machSch[j,3] - max(jobCanST,machSch[j-1,4]) > nowProcTime:
                                startTime = max(jobCanST,machSch[j-1,4])
                                endTime = startTime + nowProcTime
                                done = 1 
                                break
                if done ==0:  # 表示该作业不能排到该设备已有作业之前
                    startTime = max(jobCanST,machSch[rows-1,4])
                    endTime = startTime + nowProcTime
                    
    
    
    
    
    
            schedule[i,0] = nowJobId + 1 
            schedule[i,1] = nowMachId
            schedule[i,2] = nowProcessId
            schedule[i,3] = startTime
            schedule[i,4] = endTime
            jobCanStartTime[0,nowJobId] = endTime
            jobProcessId[0,nowJobId] += 1 
    
    
        return schedule
    
    
    # 根据调度方案绘制甘特图
    def drawGant(schedule):
        rows = np.size(schedule,0)
        num_job = int(max(schedule[:,1]))
        mycolor = np.random.random((num_job,3))
        mycolor = list(mycolor)
    
        for i in range(rows):
            x = [schedule[i, 3],schedule[i,4]]
            y = [schedule[i,1],schedule[i,1]]
            n = int(schedule[i,0])
            plt.plot(x,y,linewidth=8.0,color=mycolor[n-1] )
        plt.show()
    
    
    
    
    e=createChromes(6,6,6)
    
    
    s = createSchedule(data,e)
    print(s)
    
    d = drawGant(s)
    
  • 您还可以看一下 程序员学院老师的Python数据分析与挖掘从零开始到实战课程中的 Python数据分析实战与运用之数据分析篇课程导读小节, 巩固相关知识点