有没有Elman神经网络用于回归预测的Python代码?

哪位大佬有Elman神经网络的代码,不用神经网络库函数实现的,求一份参考。

你可以看看这个 https://www.cpuheater.com/deep-learning/introduction-to-recurrent-neural-networks-in-pytorch/?spm=a2c4e.11153959.blogcont573311.12.75d2668ccjJf5x

有没有用numpy实现的

from sklearn.model_selection import train_test_split

from sklearn.metrics import mean_squared_error, r2_score

import numpy as np

import matplotlib.pyplot as plt

from sklearn.preprocessing import MinMaxScaler

from sklearn import datasets



class ELMAN_RNN(object):

def __init__(self, input_num, hidden_num, output_num, learning_rate):

self.input_num = input_num

self.hidden_num = hidden_num

self.output_num = output_num

self.learning_rate = learning_rate

self.hidden_weights = np.random.random((self.input_num, self.hidden_num))

self.output_weights = np.random.random((self.hidden_num, self.output_num))

self.rnn_weights = np.random.random((self.hidden_num, self.hidden_num))

self.hidden_bias = np.random.rand(1)

self.output_bias = np.random.rand(1)

self.hidden_output = np.zeros((1, self.hidden_num))

self.hidden_output_forward = np.zeros((1, self.hidden_num))


def training(self, train_input, train_output):

"""training one time"""

output = self.feed_forward(train_input)

self.bptt(train_input, output, train_output)


def calculate_the_mse(self, X, y):

"""get the total error loss"""

loss = 0.0

for i in range(X.shape[0]):

result = self.feed_forward(X[i, :])

print('result: ', result)

loss += self.get_the_total_error(y[i, :], result)

# print('loss:', loss)

return loss


def get_the_total_error(self, y, result):

loss = y ** 2 - result ** 2

return loss


def feed_forward(self, input):

"""calculate feed_forward value"""

print(self.hidden_output)

self.hidden_output = self.sigmoid(

np.dot(np.array(input).reshape(1, self.input_num), self.hidden_weights) + np.dot(self.hidden_output,

self.rnn_weights) + self.hidden_bias)

print(self.hidden_output)

return self.softmax(np.dot(self.hidden_output, self.output_weights) + self.output_bias)


def bptt(self, input, output, train_output):

"""update the weights of all layers"""

# claculate delta of output layers

delta_of_output_layers = [0] * self.output_num

for i in range(self.output_num):

delta_of_output_layers[i] = self.calculate_output_wrt_rawout(output[0, i], train_output)


# caculate delta of hidden layers

delta_of_hidden_layers = [0] * self.hidden_num

for i in range(self.hidden_num):

d_error_wrt_hidden_output = 0.0

for j in range(self.output_num):

d_error_wrt_hidden_output += delta_of_output_layers[j] * self.output_weights[i, j]

delta_of_hidden_layers[i] = d_error_wrt_hidden_output * self.calculate_output_wrt_netinput(

self.hidden_output[0, i])


# get the δw of output layers and update the weights (w2)

for i in range(self.output_num):

for weight_j in range(self.output_weights.shape[0]):

delta_wrt_weight_j = delta_of_output_layers[i] * self.hidden_output[0, weight_j]


self.output_weights[weight_j, i] -= self.learning_rate * delta_wrt_weight_j


# get the δw of hidden layers and update the weights (w1)

for i in range(self.hidden_num):

for weight_j in range(self.hidden_weights.shape[0]):

delta_wrt_weight_j = delta_of_hidden_layers[i] * input[weight_j]


self.hidden_weights[weight_j, i] -= self.learning_rate * delta_wrt_weight_j


# get the δw of rnn_weights and update the weights(w3)

for i in range(self.hidden_num):

for weight_j in range(self.rnn_weights.shape[0]):

delta_wrt_weight_j = delta_of_hidden_layers[i] * self.hidden_output_forward[0, weight_j]

self.rnn_weights[weight_j, i] -= self.learning_rate * delta_wrt_weight_j


self.hidden_output_forward = self.hidden_output


def sigmoid(self, x):

"""activation function"""

return 1.0 / (1.0 + np.exp(-x))


def linear(self, x):

"""the activation for multiple output function"""

return x


def calculate_output_wrt_rawout(self, output, train_output):

"""derivative of softmax function, actually in classification train_output equal to 1"""

return (output - train_output)


def calculate_output_wrt_netinput(self, output):

"""the derivative of sigmoid function"""

return output * (1 - output)



if __name__ == "__main__":

import matplotlib.pyplot as plt


elman = ELMAN_RNN(input_num=13, hidden_num=20, output_num=1, learning_rate=0.02)


data = datasets.load_boston()

X = data.data[:200, :]

y = data.target.reshape(-1, 1)[:200, :]

X = MinMaxScaler((0, 1)).fit_transform(X)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)


loss = []

for i in range(10):

for j in range(X_train.shape[0]):

elman.training(X_train[j], y_train[j])

loss.append(elman.calculate_the_mse(X_test, y_test))

print(loss)


plt.figure()

plt.plot(loss)

plt.title('the loss with the training')

plt.show()

print('training finished!')

这个代码怎么改一下,自己不知道怎么改了

https://juejin.im/post/5bdcf501f265da6174644b63

这个符合你的胃口吗?

该作者的GitHub:https://github.com/RayDean/DeepLearning

你的代码我看过,有没有不用库函数实现的源码

我先再看下

或者你用这个预测下波士顿房价,发来看下可以吗,这个库我没用过


哥你逗我玩吗,第一个库是处理数组的,另一个库是画图的。。。

你要拿手画吗QAQ

我说错了

下面还有一个库...

尴尬哈哈哈

神经网络那个库没用过,你要熟的话,可以写一个预测Boston房价的代码吗,我参考下

https://github.com/rougier/neural-networks/blob/master/elman.py
这个就是啥库也不需要。。。

这个貌似不是elman,没有中间承接层,只是多层感知器