特征金字塔(降采样)训练分类报错:size mismatch, m1: [512 x 8], m2: [512 x 8]


import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorflow import keras
import numpy as np
from torch.autograd import Variable
import tensorflow as tf
import matplotlib.pyplot as plt
import paddle
import warnings
from paddle.metric import Accuracy
from torch.autograd import Variable
import matplotlib.image as imgg
from PIL import Image as pil
import os
import torchvision
from torchvision.transforms import ToPILImage
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms as tt
from torchvision.utils import make_grid
import time
from torch.autograd import Variable
BATCHSIZE=20
LR=0.01


feature_name1="J:\\multi-scale\\data_test\\data_33.txt"
feature_data1=np.loadtxt(feature_name1)
feature_data1=np.float32(feature_data1)
train_X1=feature_data1.reshape([100,32,32])

train_X1 = torch.tensor(train_X1)

feature_name2="J:\\multi-scale\\data_test\\data_17.txt"
feature_data2=np.loadtxt(feature_name2)
feature_data2=np.float32(feature_data2)
train_X2=np.reshape(feature_data2,(100,16,16))
train_X2 = torch.tensor(train_X2)


tag_name="J:\\multi-scale\\data_test\\ch_lable_33.txt"
tag_data=np.loadtxt(tag_name)
train_Y = np.int_(tag_data)
train_Y = torch.tensor(train_Y)
class Bottleneck(nn.Module):
    expansion = 2

    def __init__(self, in_planes, planes, stride):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion*planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
            
                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)

            )

    def forward(self, x):

        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class FPN(nn.Module):
    def __init__(self, block, num_blocks):
        super(FPN, self).__init__()
        self.in_planes = 16

        self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=7, stride=1, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(16)

        # Bottom-up layers
        self.layer1 = self._make_layer(block, 14, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=1)

        self.out=nn.Linear(1024,1)
        #如何改成8分类 self.out=nn.Linear(num,8)
      
    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_planes, planes, stride))
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)

    def _upsample_add(self, x, y):
        
        _,_,H,W = y.size()
        return F.upsample(x, size=(H,W), mode='bilinear') + y

    def forward(self, x):
        self.eval()
        # Bottom-up
        c1 = F.relu(self.bn1(self.conv1(x)))
        c1 = F.max_pool2d(c1, kernel_size=2)
        print("fin %s " % (1),c1.shape)
        c2 = self.layer1(c1)
        c2 = F.max_pool2d(c2, kernel_size=2)
        print("fin %s " % (2),c2.shape)
        c3 = self.layer2(c2)
        c3 = F.max_pool2d(c3, kernel_size=2)
        print("fin %s " % (3),c3.shape)
        c3 = c3.view(x.size(0), -1)
        
        out=self.out(c3)
        return  out


def train(net):
    optimizer = torch.optim.Adam(net.parameters(),lr=LR)
    loss_func = nn.CrossEntropyLoss()
    for i in range(100):
        input=train_X1[i].reshape([-1,1,32,32]) 
        ou=net(input)
        print(ou)
        b_y=train_Y[i]

        loss=loss_func(ou,b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    return net

datatrain=DataLoader(dataset=[train_X1,train_Y],batch_size=BATCHSIZE,shuffle=1)
net=FPN(Bottleneck, [2,2])
train(net)


错误提示:

img


和输入强行改成 input=train_X1[i].reshape([-1,1,32,32]) 有关?否则会报错:

img

此外,如何将全连接层nn.Linear改成8分类呢