Traceback (most recent call last):
File "D:/Users/weiran/PycharmProjects/hello_world/test.py", line 100, in <module>
out = model(img)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "D:\Users\weiran\PycharmProjects\hello_world\net.py", line 51, in forward
x=self.layer1(x)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\container.py", line 119, in forward
input = module(input)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\conv.py", line 399, in forward
return self._conv_forward(input, self.weight, self.bias)
File "D:\Users\weiran\PycharmProjects\hello_world\lib\site-packages\torch\nn\modules\conv.py", line 396, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Given groups=1, weight of size [16, 3, 3, 3], expected input[64, 227, 227, 3] to have 3 channels, but got 227 channels instead
这是报错
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
import glob
import os
from net import simpleNet, CNNNet
from PIL import Image
import numpy as np
import numpy
batch_size = 64
learning_rate = 1e-2
num_epoches = 20
data_tf = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5],[0.5])])
pic_dir_positive = "D:\\kaoyan\\minst_pytorch\\Positive"
pic_dir_negative = "D:\\kaoyan\\minst_pytorch\\Negative"
pic_path_pos = os.path.join(pic_dir_positive, "*.jpg")
pic_path_nag = os.path.join(pic_dir_negative, "*.jpg")
pic_list_pos = glob.glob(pic_path_pos)
pic_list_nag = glob.glob(pic_path_nag)
train_list_pos = list()
train_list_nag = list()
test_list_pos = list()
test_list_nag = list()
for i, pic in enumerate(pic_list_pos):
if i % 5 == 0:
test_list_pos.append([pic,1])
else:
train_list_pos.append([pic,1])
for i, pic in enumerate(pic_list_nag):
if i%5 == 0:
test_list_nag.append([pic,0])
else:
train_list_nag.append([pic,0])
train_list = train_list_pos + train_list_nag
test_list = test_list_pos + test_list_nag
print(len(train_list))
print(len(test_list))
train_array_list=list()
for pic,label in train_list:
img_pil = Image.open(pic)
img_pil_1 = np.array(img_pil)
img_tensor = torch.from_numpy(img_pil_1)
train_array_list.append((img_tensor,label))
test_array_list=list()
for pic,label in test_list:
img_pil_ = Image.open(pic)
img_pil_2 = np.array(img_pil_)
img_tensor = torch.from_numpy(img_pil_2)
test_array_list.append((img_tensor,label))
class mydataset(Dataset):
def __init__(self, tensor_label):
self.data = tensor_label
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
train_data = mydataset(train_array_list)
test_data = mydataset(test_array_list)
train_loader= DataLoader(train_data,batch_size=64,shuffle=True)
test_loader = DataLoader(test_data,batch_size=64,shuffle=True)
model = CNNNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
model.eval()
eval_loss = 0
eval_acc = 0
for ep in range(20):
train_acc = 0
model.train()
for data in train_loader:
img, label = data
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
_, pred = torch.max(out, 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
num_correct = (pred == label).sum()
train_acc += num_correct.item()
print('Training Acc: {:.6f}'.format(train_acc / (len(train_data))))
model.eval()
num_correct = 0
eval_loss = 0
for data in test_loader:
img, label = data
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.item()
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.item()
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(test_data)), eval_acc / (len(test_data))))
这是代码
class CNNNet(nn.Module):
def __init__(self):
super(CNNNet,self).__init__()
self.layer1=nn.Sequential(
nn.Conv2d(3,16,kernel_size=3),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(16,32,kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer3=nn.Sequential(
nn.Conv2d(32,64,kernel_size=3),
nn.ReLU(inplace=True)
)
self.layer4 = nn.Sequential(
nn.Conv2d(64,128,kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc=nn.Sequential(
nn.Linear(128*4*4,1024),
nn.ReLU(inplace=True),
nn.Linear(1024,128),
nn.ReLU(inplace=True),
nn.Linear(128,10)
)
def forward(self, x):
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
x=x.view(x.size(0),-1)
x=self.fc(x)
return x
这是网络,求解决
这是你参数传递的有问题。
RuntimeError: Given groups=1, weight of size [16, 3, 3, 3], expected input[64, 227, 227, 3] to have 3 channels, but got 227 channels instead
他期望大小是[64, 227, 227, 3] to have 3 channels, but got 227 channels instead
还是网络用错了。
这个报错的意思是,预期输入的图像通道是4,但是得到了227个通道的图像,所以在图片输入之前要把图像的通道变化一下
第100行代码
out = model(img)
后面加上
out=out.convert("RGB")
您好,我是有问必答小助手,您的问题已经有小伙伴解答了,您看下是否解决,可以追评进行沟通哦~
如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~
ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632