用神经网络拟合曲线失败了,求解答

不知道为什么自己弄出的测试数据都是可以拟合的,但是用到实际的数据就不行了。


from __future__ import print_function
import numpy as np
import torch
import torch.autograd
from torch.autograd import Variable
import matplotlib.pyplot as plt
import scipy.io as sio


p = sio.loadmat('C:\long\文献\ToStudents\DelayExtractionCode\S13PY.mat')
h = p['Data_all']
H = torch.from_numpy(np.real(h))
# H = H.float()
Freq = p['Freq_all']
f = torch.from_numpy(Freq)


# 声明模型
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden1, bias=False)
        self.relu = torch.nn.ELU()
        self.predict = torch.nn.Linear(n_hidden, n_output, bias=False)

    def forward(self, x):
        x = self.hidden(x)
![img](https://img-mid.csdnimg.cn/release/static/image/mid/ask/879036980256179.png "#left")

        x = self.relu(x)
        x = self.predict(x)
        # out = F.log_softmax(x)
        return x


net = Net(n_feature=1, n_hidden=100,  n_output=1)
# print(net)
net = net.double()

print(net)
loss_func = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(net.parameters(), lr=0.0001)

for t in range(1000):

    x, y = Variable(f), Variable(H)
    optimizer.zero_grad()                                  
    prediction = net(x)                                   
    loss = loss_func(prediction, y)    
    loss.backward()                                        
    optimizer.step()                                      

    if t % 5 == 0:
        plt.cla()
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)

plt.ioff()
plt.show()