pytorch与PyG


self.gat1 = GATConv(self.input_dim, self.hidden_dim, dropout=0.5, heads=self.heads)
self.gat2 = GATConv(self.hidden_dim*self.heads, self.out_dim, dropout=0.5, heads=1, concat=False)

self.inner_attention = nn.Sequential(
            self.gat1,
            nn.ReLU(),
            nn.Dropout(p=0.6),
            self.gat2,
            nn.ReLU()
        )

# 请问我这样调用的时候为什么会报错啊?谢谢
x, edge_index = graph_data.x, graph_data.edge_index
out = self.inner_attention()(x, edge_index)


TypeError: forward() missing 1 required positional argument: 'input'

我还以为是你自己实现的网络结构呢。。如果是自带的话,那就是输入格式的问题了。具体你可以看看这个,Sequential多输入需要封装一下。
https://blog.csdn.net/qq_23968185/article/details/108277724
或者你可以直接写在forward()里面,不用Sequential。

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATConv
from torch_geometric.data import Data

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.gat1 = GATConv(self.input_dim, self.hidden_dim, dropout=0.5, heads=self.heads)
        self.gat2 = GATConv(self.hidden_dim * self.heads, self.out_dim, dropout=0.5, heads=1, concat=False)
    
    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.gat1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.gat2(x, edge_index)
        x = F.relu(x)
        return x #返回什么自己确定下
edge_index = torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)
x = torch.tensor([[-1], [0], [1]], dtype=torch.float)
data = Data(x=x, edge_index=edge_index)
model=Net()
out=model(data)
print(out)

试试看下把括号去掉行不行
out = self.inner_attention(x, edge_index)