在进行pytorch深度学习时遇到标题所述问题
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0!
def training_step(self, batch):
x = batch['feature'].float()
y_app = batch['app_label'].long()
x_tra_all, y_tra_all = drop_na(batch)
app_out = self(x)
tra_all_out = self(x_tra_all)
out_app = nn.Linear(in_features = 50, out_features = 17)
out_tra = nn.Linear(in_features = 50, out_features = 12)
out_all = nn.Linear(in_features = 50, out_features = 6 )
#.to(device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
y_hat_app = out_app(app_out)
y_hat_tra = out_tra(tra_all_out)
y_hat_all = out_all(tra_all_out)
entropy_app = F.cross_entropy(y_hat_app, y_app)
entropy_tra = F.cross_entropy(y_hat_tra, y_tra_all)
entropy_all = F.cross_entropy(y_hat_all, y_tra_all)
entropy = (entropy_app + entropy_tra + entropy_all) / 3.0
self.log('training_loss', entropy, prog_bar=True, logger=True, on_step=True, on_epoch=True)
loss = {'loss': entropy}
return loss
我已经尝试过使用.cuda()和.to('cuda:0')但仍然不起作用?也许是我加的位置不对?