Training model
DeepCFD, train_metrics, train_loss, test_metrics, test_loss = train_model(model, loss_func, train_dataset, test_dataset, optimizer,
epochs=1000, batch_size=64, device=device,
m_mse_name="Total MSE",
m_mse_on_batch=lambda scope: float(torch.sum((scope["output"] - scope["batch"][1]) ** 2)),
m_mse_on_epoch=lambda scope: sum(scope["list"]) / len(scope["dataset"]),
m_ux_name="Ux MSE",
m_ux_on_batch=lambda scope: float(torch.sum((scope["output"][:,0,:,:] - scope["batch"][1][:,0,:,:]) ** 2)),
m_ux_on_epoch=lambda scope: sum(scope["list"]) / len(scope["dataset"]),
m_uy_name="Uy MSE",
m_uy_on_batch=lambda scope: float(torch.sum((scope["output"][:,1,:,:] - scope["batch"][1][:,1,:,:]) ** 2)),
m_uy_on_epoch=lambda scope: sum(scope["list"]) / len(scope["dataset"]),
m_p_name="p MSE",
m_p_on_batch=lambda scope: float(torch.sum((scope["output"][:,2,:,:] - scope["batch"][1][:,2,:,:]) ** 2)),
m_p_on_epoch=lambda scope: sum(scope["list"]) / len(scope["dataset"]), patience=25, after_epoch=after_epoch
)
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-12-1c78444578e6> in <module>
1 # Training model
----> 2 DeepCFD, train_metrics, train_loss, test_metrics, test_loss = train_model(model, loss_func, train_dataset, test_dataset, optimizer,
3 epochs=1000, batch_size=64, device=device,
4 m_mse_name="Total MSE",
5 m_mse_on_batch=lambda scope: float(torch.sum((scope["output"] - scope["batch"][1]) ** 2)),
~\Models\train_functions.py in train_model(model, loss_func, train_dataset, val_dataset, optimizer, process_batch, eval_model, on_train_batch, on_val_batch, on_train_epoch, on_val_epoch, after_epoch, epochs, batch_size, patience, device, **kwargs)
149 print("Warning: " + name + " metric is incomplete!")
150 scope["metrics_def"] = metrics_def
--> 151 return train(scope, train_dataset, val_dataset, eval_model=eval_model, on_train_batch=on_train_batch,
152 on_val_batch=on_val_batch, on_train_epoch=on_train_epoch, on_val_epoch=on_val_epoch, after_epoch=after_epoch,
153 batch_size=batch_size, patience=patience)
~\Models\train_functions.py in train(scope, train_dataset, val_dataset, patience, batch_size, print_function, eval_model, on_train_batch, on_val_batch, on_train_epoch, on_val_epoch, after_epoch)
74 # Training
75 scope["dataset"] = train_dataset
---> 76 train_loss, train_metrics = epoch(scope, train_loader, on_train_batch, training=True)
77 scope["train_loss"] = train_loss
78 scope["train_metrics"] = train_metrics
~\Models\train_functions.py in epoch(scope, loader, on_batch, training)
29 if "device" in scope and scope["device"] is not None:
30 tensors = [tensor.to(scope["device"]) for tensor in tensors]
---> 31 loss, output = loss_func(model, tensors)
32 if training:
33 optimizer.zero_grad()
<ipython-input-7-06a9e787c284> in loss_func(model, batch)
1 def loss_func(model, batch):
2 x, y = batch
----> 3 output = model(x)
4 lossu = ((output[:,0,:,:] - y[:,0,:,:]) ** 2).reshape((output.shape[0],1,output.shape[2],output.shape[3]))
5 lossv = ((output[:,1,:,:] - y[:,1,:,:]) ** 2).reshape((output.shape[0],1,output.shape[2],output.shape[3]))
d:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
d:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in _forward_unimplemented(self, *input)
199 registered hooks while the latter silently ignores them.
200 """
--> 201 raise NotImplementedError
202
203
NotImplementedError:
看到好多资料是因为缩进问题,但好像我的适用不了
但是要该怎么解决呢!
你自己的原生类继承了某个类,调用父类的某个函数但是传参有问题导致的。这样你相当于是调用一个父类方法,由于传参有问题没有找到合适的,程序认为你应该重写父类的方法完成你的调用。所以提示你未实现的错误。
请问您解决了吗