自定义了个loss 但是训练的时候不下降 不知道是定义的方式不对还是别的什么问题
代码如下:
```python
class MIL_loss(nn.Module):
def __init__(self):
super(MIL_loss, self).__init__()
def forward(self,nomaly,anomaly,bs):
nomly_z = torch.tensor([])
anomly_z = torch.tensor([])
nomly_fin = torch.tensor([])
anomly_fin = torch.tensor([])
total_loss = 0
for i in range(bs):
for j in range(0,28,4):
nomaly_d=nomaly[i]
anomaly_d=anomaly[i]
nomaly_b=nomaly_d[j:j+4]
anomaly_b=anomaly_d[j:j+4]
nomaly_mean=torch.mean(nomaly_b)
anomaly_mean=torch.mean(anomaly_b)
nomaly_mean=torch.tensor([nomaly_mean],requires_grad=True,dtype=torch.float32)
anomaly_mean=torch.tensor([anomaly_mean],requires_grad=True,dtype=torch.float32)
nomly_z=torch.cat((nomly_z,nomaly_mean),0)
anomly_z=torch.cat((anomly_z,anomaly_mean),0)
nomly_fin=torch.cat((nomly_fin,nomly_z),0)
anomly_fin = torch.cat((anomly_fin, anomly_z), 0)
nomaly_max_mean=torch.max(nomly_fin)
anomaly_max_mean = torch.max(anomly_fin)
loss=1-anomaly_max_mean+nomaly_max_mean
if loss<0:
loss=0
total_loss=total_loss+loss
nomly_z = torch.tensor([])
anomly_z = torch.tensor([])
nomly_fin = torch.tensor([])
anomly_fin = torch.tensor([])
loss = 0
return total_loss