以下是报错问题及 yolo.py common.py yolov5s.yaml 修改的地方
报错:
RuntimeError: Given groups=1, weight of size [16, 64, 1, 1], expected input[1, 256, 16, 16] to have 64 channels, but got 256 channels instead
代码:
#yolo.py
class ASFF_Detect(nn.Module): #add ASFFV5 layer and Rfb
stride = None # strides computed during build
onnx_dynamic = False # ONNX export parameter
def __init__(self, nc=1, anchors=(), ch=(), multiplier=0.5,rfb=False,inplace=True): # detection layer
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
self.l0_fusion = ASFFV5(level=0, multiplier=multiplier,rfb=rfb)
self.l1_fusion = ASFFV5(level=1, multiplier=multiplier,rfb=rfb)
self.l2_fusion = ASFFV5(level=2, multiplier=multiplier,rfb=rfb)
self.l3_fusion = ASFFV5(level=3, multiplier=multiplier,rfb=rfb)
self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
z = [] # inference output
result=[]
result.append(self.l3_fusion(x))
result.append(self.l2_fusion(x))
result.append(self.l1_fusion(x))
result.append(self.l0_fusion(x))
x=result
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic:
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
y = x[i].sigmoid()
if self.inplace:
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy, wh, conf = y.tensor_split((2, 4), 4)
xy = (xy * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
y = torch.cat((xy, wh, conf), 4)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
def _make_grid(self, nx=20, ny=20, i=0):
d = self.anchors[i].device
shape = 1, self.na, ny, nx, 2 # grid shape
if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility
yv, xv = torch.meshgrid(torch.arange(ny).to(d), torch.arange(nx).to(d), indexing='ij')
else:
yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d))
grid = torch.stack((xv, yv), 2).expand(shape).float()
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float()
return grid, anchor_grid
#common.py
class ASFFV5(nn.Module):
def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=True):
"""
ASFF version for YoloV5 .
different than YoloV3
multiplier should be 1, 0.5
which means, the channel of ASFF can be
512, 256, 128 -> multiplier=1
256, 128, 64 -> multiplier=0.5
For even smaller, you need change code manually.
"""
super(ASFFV5, self).__init__()
self.level = level
self.dim = [int(1024*multiplier), int(512*multiplier),
int(256*multiplier),int(128*multiplier)]
print(self.dim)
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = Conv(int(512*multiplier), self.inter_dim, 3, 2)
self.stride_level_2 = Conv(int(256*multiplier), self.inter_dim, 3, 2)
self.stride_level_3 = Conv(int(128*multiplier), self.inter_dim, 3, 2)
self.expand = Conv(self.inter_dim, int(1024*multiplier), 3, 1)
elif level == 1:
self.compress_level_0 = Conv(int(1024*multiplier), self.inter_dim, 1, 1)
self.stride_level_2 = Conv(int(256*multiplier), self.inter_dim, 3, 2)
self.stride_level_3 = Conv(int(128*multiplier), self.inter_dim, 3, 2)
self.expand = Conv(self.inter_dim, int(512*multiplier), 3, 1)
elif level == 2:
self.compress_level_0 = Conv(int(1024*multiplier), self.inter_dim, 1, 1)
self.compress_level_1 = Conv(int(512*multiplier), self.inter_dim, 1, 1)
self.stride_level_3 = Conv(int(128*multiplier), self.inter_dim, 3, 2)
self.expand = Conv(self.inter_dim, int(256*multiplier), 3, 1)
elif level == 3:
self.compress_level_0 = Conv(int(1024*multiplier), self.inter_dim, 1, 1)
self.compress_level_1 = Conv(int(512*multiplier), self.inter_dim, 1, 1)
self.compress_level_2 = Conv(int(256*multiplier), self.inter_dim, 1, 1)
self.expand = Conv(self.inter_dim, int(128*multiplier), 3, 1)
# when adding rfb, we use half number of channels to save memory
compress_c = 8 if rfb else 16
self.weight_level_0 = Conv(
self.inter_dim, compress_c, 1, 1)
self.weight_level_1 = Conv(
self.inter_dim, compress_c, 1, 1)
self.weight_level_2 = Conv(
self.inter_dim, compress_c, 1, 1)
self.weight_level_3 = Conv(
self.inter_dim, compress_c, 1, 1)
self.weight_levels = Conv(
compress_c*3, 3, 1, 1)
self.vis = vis
def forward(self, x): #l,m,s
"""
# 128, 256, 512
512, 256, 128
from small -> large
"""
x_level_0=x[3] #l
x_level_1=x[2] #m
x_level_2=x[1] #s
x_level_3=x[0] #s
print('x_level_0: ', x_level_0.shape)
print('x_level_1: ', x_level_1.shape)
print('x_level_2: ', x_level_2.shape)
print('x_level_3: ', x_level_3.shape)
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
level_3_downsampled_inter = F.max_pool2d(x_level_3, 3, stride=4, padding=1)
level_3_resized = self.stride_level_3(level_3_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
level_3_downsampled_inter = F.max_pool2d(x_level_3, 3, stride=2, padding=1)
level_3_resized = self.stride_level_3(level_3_downsampled_inter)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')
x_level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(x_level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_3_resized = self.stride_level_3(x_level_3)
elif self.level == 3:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=8, mode='nearest')
level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(level_1_compressed, scale_factor=4, mode='nearest')
x_level_2_compressed = self.compress_level_2(x_level_2)
level_2_resized = F.interpolate(x_level_2_compressed, scale_factor=2, mode='nearest')
level_3_resized = x_level_3
print('level: {}, l1_resized: {}, l2_resized: {}, l3_resized: {}'.format(self.level,level_1_resized.shape, level_2_resized.shape,level_3_resized.shape))
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
level_3_weight_v = self.weight_level_3(level_3_resized)
print('level_0_weight_v: ', level_0_weight_v.shape)
print('level_1_weight_v: ', level_1_weight_v.shape)
print('level_2_weight_v: ', level_2_weight_v.shape)
print('level_3_weight_v: ', level_3_weight_v.shape)
levels_weight_v = torch.cat(
(level_0_weight_v, level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] +\
level_1_resized * levels_weight[:, 1:2, :, :] +\
level_2_resized * levels_weight[:, 2:3 :, :] +\
level_3_resized * levels_weight[:, 3:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
#yolov5s.yaml
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Parameters
nc: 80 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple
anchors:
- [9,11, 21,19, 17,41] # P3/8
- [43,32, 39,70, 86,64] # P4/16
- [65,131, 134,130, 120,265] # P5/32
- [282,180, 247,354, 512,387] # P6/64
# YOLOv5 v6.0 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 6, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 3, C3, [1024]],
[-1, 1, SPPF, [1024, 5]], # 9
]
# YOLOv5 v6.0 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
[-1, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 2], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [128, False]], # 21 (P3/8-small)
[-1, 1, Conv, [128, 3, 2]],
[[-1, 18], 1, Concat, [1]], # cat head P3
[-1, 3, C3, [ 256, False ]], # 24 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 27 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 30 (P5/32-large)
[[21, 24, 27, 30], 1, ASFF_Detect, [nc, anchors]], # Detect(P3, P4, P5)
]