TypeError: forward() takes 2 positional arguments but 3 were given



```python
class FasterRCNN(nn.Module):
    def __init__(self):
        super(FasterRCNN, self).__init__()
        self.backbone = torchvision.models.vgg16(pretrained=True)
        self.rpn = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 9 * 4, kernel_size=1, stride=1, padding=0),
        )
        self.roi_pooling = nn.AdaptiveAvgPool2d((7, 7))
        self.classifier = nn.Sequential(
            nn.Linear(7 * 7 * 512, 4096),
            nn.ReLU(),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Linear(4096, 21),
        )

    def forward(self, x):
        x = self.backbone.features(x)
        rpn_output = self.rpn(x)
        roi_boxes = self.convert_to_roi_boxes(rpn_output)
        roi_features = self.roi_pooling(x, roi_boxes)
        roi_features = roi_features.view(roi_features.size(0), -1)
        output = self.classifier(roi_features)
        return output

    def convert_to_roi_boxes(self, rpn_output):
        rpn_output = rpn_output.permute(0, 2, 3, 1).contiguous().view(-1, 4)
        roi_boxes = torch.zeros((rpn_output.size(0), 4))
        roi_boxes[:, 0] = rpn_output[:, 0] - 0.5 * rpn_output[:, 2]
        roi_boxes[:, 1] = rpn_output[:, 1] - 0.5 * rpn_output[:, 3]
        roi_boxes[:, 2] = rpn_output[:, 0] - 0.5 * rpn_output[:, 2]
        roi_boxes[:, 3] = rpn_output[:, 1] - 0.5 * rpn_output[:, 3]
        return roi_boxes


# 准备数据
transform = torchvision.transforms.Compose([
    # 对每张图片进行170*170的中心裁剪
    torchvision.transforms.Resize((256, 256)),
    # 对每张图片进行随机翻转
    torchvision.transforms.RandomHorizontalFlip(),
    # 把图片变成tensor张量
    torchvision.transforms.ToTensor(),
    # 标准化图像的每个通道
    torchvision.transforms.Normalize([0.4914, 0.4822, 0.4465],
                                     [0.2023, 0.1994, 0.2010])
])

trainset = torchvision.datasets.ImageFolder(os.path.join(rectangle_dir, 'train'),
                                            transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True,
                                          num_workers=2)

# 定义损失函数和优化器
net = FasterRCNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

# 训练模型
for epoch in range(2):
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if i % 2000 == 1999:  # 每2000个小批量数据打印一次损失值
            print(
                '[%d,%5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
            running_loss = 0.0

print('finished Training')


Traceback (most recent call last):
  File "/home/u1/ywj/deepfashion/clothes_change_ywj/use_torch_on_deepfashion.py", line 185, in <module>
    outputs = net(inputs)
  File "/home/u1/miniconda3/envs/clothes_change_ywj/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/u1/ywj/deepfashion/clothes_change_ywj/use_torch_on_deepfashion.py", line 141, in forward
    roi_features = self.roi_pooling(x, roi_boxes)
  File "/home/u1/miniconda3/envs/clothes_change_ywj/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
TypeError: forward() takes 2 positional arguments but 3 were given

进程已结束,退出代码1
求助,该问题如何解决

根据报错信息,是因为在执行 self.roi_pooling(x, roi_boxes) 时传递了3个参数,而 nn.AdaptiveAvgPool2d() 层只需要2个参数。修改了 forward() 函数中的参数,将原先的 self.roi_pooling(x, roi_boxes) 修改为 self.roi_pooling(x) 。你这 convert_to_roi_boxes() 函数返回的格式不对,我进行了微改,改过后的代码如下所示:

class FasterRCNN(nn.Module):
    def __init__(self):
        super(FasterRCNN, self).__init__()
        self.backbone = torchvision.models.vgg16(pretrained=True)
        self.rpn = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 9 * 4, kernel_size=1, stride=1, padding=0),
        )
        self.roi_pooling = nn.AdaptiveAvgPool2d((7, 7))
        self.classifier = nn.Sequential(
            nn.Linear(7 * 7 * 512, 4096),
            nn.ReLU(),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Linear(4096, 21),
        )
 
    def forward(self, x, roi_boxes):
        x = self.backbone.features(x)
        rpn_output = self.rpn(x)
        roi_boxes = self.convert_to_roi_boxes(rpn_output, roi_boxes)
        roi_features = self.roi_pooling(x)
        roi_features = roi_features.reshape(-1, 512, 7, 7)
        roi_features = roi_features[:, :, roi_boxes[0], roi_boxes[1]]
        roi_features = roi_features.reshape(-1, 7*7*512)
        output = self.classifier(roi_features)
        return output, roi_boxes
 
    def convert_to_roi_boxes(self, rpn_output, roi_boxes):
        rpn_output = rpn_output.permute(0, 2, 3, 1).contiguous().view(-1, 4)
        roi_boxes = torch.zeros((rpn_output.size(0), 4))
        roi_boxes[:, 0] = rpn_output[:, 0] - 0.5 * rpn_output[:, 2]
        roi_boxes[:, 1] = rpn_output[:, 1] - 0.5 * rpn_output[:, 3]
        roi_boxes[:, 2] = rpn_output[:, 0] + 0.5 * rpn_output[:, 2]
        roi_boxes[:, 3] = rpn_output[:, 1] + 0.5 * rpn_output[:, 3]
        roi_boxes = torch.round(roi_boxes).long()
        return roi_boxes


你看看有没有用,有用记得采纳下,没用再看看🚀🚀

  • 你可以参考下这篇文章:TypeError: forward() takes 2 positional arguments but 3 were given
  • 以下回答由chatgpt基于相关博客总结生成:

    可能出现这个错误的原因是,你定义的forward()函数只有两个参数,而你实际上传入了三个参数。解决这个问题的方法是检查你定义的forward()函数的参数个数,并将其与调用时传入的参数个数进行比较,保证参数数量匹配。如果参数数量不匹配,可以修改forward()函数来接受更多的参数,或者检查调用时传递的参数是否正确。

    以下是一个修改forward()函数来接受更多参数的示例代码:

    import torch.nn as nn
    
    class MyModule(nn.Module):
        def __init__(self):
            super(MyModule, self).__init__()
            # 定义各层
            self.conv1 = nn.Conv2d(1, 20, 5)
            self.conv2 = nn.Conv2d(20, 50, 5)
            self.fc1 = nn.Linear(4*4*50, 500)
            self.fc2 = nn.Linear(500, 10)
    
        def forward(self, x, y):
            # 前向传播
            x = self.conv1(x)
            x = F.relu(x)
            x = F.max_pool2d(x, (2, 2))
            x = self.conv2(x)
            x = F.relu(x)
            x = F.max_pool2d(x, (2, 2))
            x = x.view(-1, 4*4*50)
            x = self.fc1(x)
            x = F.relu(x)
            x = self.fc2(x)
    
            # 输出结果
            return x + y
    

    这个示例代码中,我们将forward()函数的参数从原来的一个参数x修改为两个参数x和y,并将前向传播的结果x与传入的参数y相加后输出。这样,在调用时就可以传递两个参数进去了。

    另外,如果你使用的是GPU,你可以尝试提升训练阶段forward的速度,具体的方法是在import之后加入以下两行代码:

    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True
    

    这样可以让程序在开始时花费一点额外时间,为整个网络的每个卷积层搜索最适合它的卷积实现算法,从而实现网络的加速。不过,这个方法只适用于网络结构固定且输入形状(包括 batch size,图片大小,输入的通道)不变的情况。如果卷积层的设置一直变化,将会导致程序不停地做优化,反而会耗费更多的时间。