LSTM进行超参数搜索

使用LSTM多变量时间预测模型,实现对海洋中S、T浓度预测,在已经分好的训练集中进行超参数搜索,获取最优参数进行数据预测,数据来源:http://mds.nmdis.org.cn/pages/dataViewDetail.html?dataSetId=18


import os
import pandas as pd
#import re
path = r"C:\Users\Asus\Desktop\毕设\data\new"  # 读取csv文件目录路径
# listdir()--返回path指定 的 文件夹中包含的文件或者文件夹名字 的 列表
FileNames = os.listdir(path)# 因此Filename是一个列表
df = []
for fn in FileNames:
    fullfilename = os.path.join(path, fn)
    df.append(pd.read_csv(fullfilename,encoding='utf-8',index_col = None))
data = pd.concat(df)
data=data[['Station', 'date', 'lat', 'lon', 'SampleDepth','T', 'S']]
data["date"] = pd.to_datetime(data["date"], format='%m/%d/%Y')

group = data.groupby(['Station'])
SAT=data['Station'].value_counts()
n_SAT=pd.DataFrame(SAT)
n_SAT=n_SAT[n_SAT['Station']>=100]
n_SAT['counts']=n_SAT['Station']
n_SAT['Station'] = n_SAT.index
n_SAT

n_data=[]
for i in n_SAT['Station']:
    n_data.append(data.loc[data["Station"]==i])
data0 = pd.concat(n_data)
a = data0[['lat','lon']]
data0['location']=a.apply(lambda x: str(x['lat'])+" "+str(x['lon']),axis=1)
#准备数据
#T_data = data0[[ 'date', 'location','SampleDepth','T']]
#S_data = data0[[ 'date', 'location','SampleDepth','S']]

#T_data = T_data.groupby('date').mean()
data0 =data0.sort_values(by='date')
b =  data0.groupby(['date','location']).mean()
b['indexs']=b.index

ls = list(b['indexs'])
bd = []
bl = []

for i in range(0,len(ls)):
    d = ls[i]
    for j in range(0,len(d)):
        if j == 0:
            n = str(d[j])
            bd.append(n.split(' ')[0])
        else:
            bl.append(d[j])
b['location']=bl
b['date']=bd
T_data = b[[ 'date', 'lat','lon','SampleDepth','T']]
T_data= T_data.set_index('date')

接下来是使用LSTM进行超参数搜索的步骤:
1.准备训练集和测试集。首先,将数据集分为训练集和测试集。通常情况下,将70%的数据用于训练,30%的数据用于测试。这里可以使用scikit-learn库的train_test_split函数来完成这一步。
2.定义LSTM模型。可以使用Keras库来定义LSTM模型。根据经验,LSTM模型通常需要调整的参数包括神经元数量、层数、批大小和时序长度等。
3.进行超参数搜索。可以使用GridSearchCV函数或RandomizedSearchCV函数来进行超参数搜索。GridSearchCV函数通过指定一组参数网格来进行搜索,而RandomizedSearchCV函数则通过随机采样一组参数来进行搜索。这里可以使用RandomizedSearchCV函数进行搜索,代码如下:

from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import RandomizedSearchCV
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import numpy as np

# 定义LSTM模型
def create_model(neurons, dropout_rate):
    model = Sequential()
    model.add(LSTM(neurons, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Dropout(dropout_rate))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model

# 准备数据
scaler = MinMaxScaler(feature_range=(0, 1))
T_data_scaled = scaler.fit_transform(T_data)
X = []
y = []
for i in range(1, len(T_data_scaled)):
   

以下答案由GPT-3.5大模型与博主波罗歌共同编写:
这里给出一个基于Keras框架的LSTM多变量时间序列预测模型,以实现对海洋中S、T浓度的预测,代码如下:

import os
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

# 读取CSV文件,并整理为模型可用的格式
path = r"C:\Users\Asus\Desktop\毕设\data\new"
FileNames = os.listdir(path)
df = []
for fn in FileNames:
    fullfilename = os.path.join(path, fn)
    df.append(pd.read_csv(fullfilename, encoding='utf-8', index_col=None))
data = pd.concat(df)
data = data[['Station', 'date', 'lat', 'lon', 'SampleDepth', 'T', 'S']]
data["date"] = pd.to_datetime(data["date"], format='%m/%d/%Y')

group = data.groupby(['Station'])
SAT = data['Station'].value_counts()
n_SAT = pd.DataFrame(SAT)
n_SAT = n_SAT[n_SAT['Station'] >= 100]
n_SAT['counts'] = n_SAT['Station']
n_SAT['Station'] = n_SAT.index

n_data = []
for i in n_SAT['Station']:
    n_data.append(data.loc[data["Station"] == i])
data0 = pd.concat(n_data)
a = data0[['lat', 'lon']]
data0['location'] = a.apply(lambda x: str(x['lat']) + " " + str(x['lon']), axis=1)

data0 = data0.sort_values(by='date')
b = data0.groupby(['date', 'location']).mean()
b['indexs'] = b.index

ls = list(b['indexs'])
bd = []
bl = []

for i in range(0, len(ls)):
    d = ls[i]
    for j in range(0, len(d)):
        if j == 0:
            n = str(d[j])
            bd.append(n.split(' ')[0])
        else:
            bl.append(d[j])
b['location'] = bl
b['date'] = bd
T_data = b[['date', 'lat', 'lon', 'SampleDepth', 'T']]
T_data = T_data.set_index('date')

# 定义超参数
input_num = 1
look_back = 20
epochs = 20
batch_size = 32
train_split = 0.67
dropout_rate = 0.2
neurons = 32

# 数据预处理
dataset = T_data.values.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

# 划分训练集和测试集
train_size = int(len(dataset) * train_split)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]

# 将数据整理为前后多少时间窗口的形式
def create_dataset(dataset, look_back=input_num):
    dataX, dataY = [], []
    for i in range(len(dataset) - look_back):
        a = dataset[i:(i + look_back), :]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return np.array(dataX), np.array(dataY)

train_X, train_Y = create_dataset(train)
test_X, test_Y = create_dataset(test)

# 定义模型
model = Sequential()
model.add(LSTM(neurons, input_shape=(look_back, input_num)))
model.add(Dropout(dropout_rate))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')

# 训练模型
model.fit(train_X, train_Y, epochs=epochs, batch_size=batch_size, verbose=2)

# 在测试集上进行预测
trainPredict = model.predict(train_X)
testPredict = model.predict(test_X)
trainPredict = scaler.inverse_transform(trainPredict)
#trainY = scaler.inverse_transform(train_Y)
testPredict = scaler.inverse_transform(testPredict)
#testY = scaler.inverse_transform(test_Y)

# 计算均方误差
trainScore = math.sqrt(mean_squared_error(train_Y, trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(test_Y, testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))

# 可视化预测结果
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()

在这个示例中,我们使用了LSTM网络进行时间序列预测,并通过Keras框架实现。我们首先读取数据,划分训练集和测试集,并进行数据预处理,将数据整理为适合LSTM模型的形式。接着,我们定义了超参数,并创建了LSTM模型,训练模型并进行预测。最后,我们通过均方误差对模型进行评估,并可视化了预测结果。

当然,这只是一个简单的示例,并不能直接应用到你的问题中。在实际问题中,可能需要对模型进行更多的调参和优化,以获得更好的性能。同时,对于不同的问题,可能需要更改模型的具体结构和超参数。
如果我的回答解决了您的问题,请采纳!

参考GPT和自己的思路:接下来,您可以使用以下代码进行超参数搜索。

首先,您需要安装和导入一些必要的库。

!pip install tensorflow
!pip install keras-tuner


然后,定义模型构建函数和超参数搜索空间。以下示例包括以下超参数:

LSTM层数量
LSTM层中神经元数量
Dropout率
学习速率
batch_size
epochs

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch

def build_model(hp):
    model = keras.Sequential()
    for i in range(hp.Int('num_layers', 2, 5)):
        model.add(layers.LSTM(units=hp.Int('units_' + str(i),
                                            min_value=32,
                                            max_value=512,
                                            step=32),
                              dropout=hp.Float('dropout_' + str(i),
                                               min_value=0.0,
                                               max_value=0.5,
                                               default=0.25,
                                               step=0.05),
                              return_sequences=True if i < hp.Int('num_layers', 2, 5)-1 else False))
    model.add(layers.Dense(1))

    model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp.Float('learning_rate',
                                                                          min_value=1e-4,
                                                                          max_value=1e-2,
                                                                          sampling='LOG',
                                                                          default=1e-3)),
                  loss='mse',
                  metrics=['mae'])
    return model

tuner = RandomSearch(
    build_model,
    objective='val_mae',
    max_trials=100,
    executions_per_trial=1,
    directory='my_dir',
    project_name='LSTM_hyperparameter_tuning')

tuner.search(x_train, y_train,
             epochs=50,
             batch_size=hp.Int('batch_size', 32, 128, step=32),
             validation_split=0.2)


其中,build_model()函数定义了LSTM模型架构和训练参数,并且使用HyperParameters对象来动态设置LSTM层数、每层的神经元数、Dropout率、学习速率、batch_size和epochs。RandomSearch是一个KerasTuner的随机搜索算法,用于对给定的超参数空间进行搜索。在本例中,我们将搜索100个不同的模型。执行结果将存储在“my_dir/LSTM_hyperparameter_tuning”目录中。

最后,您需要拟合最佳模型并进行预测。

best_model = tuner.get_best_models(num_models=1)[0]
best_hyperparameters = tuner.get_best_hyperparameters(1)[0]

best_model.fit(x_train, y_train, epochs=100, batch_size=best_hyperparameters.get('batch_size'), validation_split=0.2)

y_pred = best_model.predict(x_test)
将预测值和真实值还原成原始范围
y_pred = y_scaler.inverse_transform(y_pred)
y_test = y_scaler.inverse_transform(y_test)

计算模型的MSE和MAE
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)

print('测试集MSE: %.3f' % mse)
print('测试集MAE: %.3f' % mae)

将预测结果和真实结果转化成DataFrame格式,并加上对应的日期
pred_results = pd.DataFrame(y_pred, columns=['T_pred'])
pred_results['date'] = test_dates
pred_results.set_index('date', inplace=True)

true_results = pd.DataFrame(y_test, columns=['T_true'])
true_results['date'] = test_dates
true_results.set_index('date', inplace=True)

可视化预测结果和真实结果
import matplotlib.pyplot as plt

plt.plot(pred_results.index, pred_results['T_pred'], label='Predictions')
plt.plot(true_results.index, true_results['T_true'], label='True values')
plt.xlabel('Date')
plt.ylabel('Temperature (℃)')
plt.title('LSTM Multivariate Time Series Prediction')
plt.legend()
plt.show()

参考GPT和自己的思路,下面是一个LSTM多变量时间预测模型的示例代码,可以参考:

# 导入必要的库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM

# 读取数据
data = pd.read_csv("your_file_path.csv")
# 进行必要的数据预处理
# ...

# 定义参数
lookback = 30  # 使用前30个样本来预测下一个样本
n_features = 2  # 使用T和S两个特征
batch_size = 32
epochs = 50

# 将数据集分为训练集和测试集
train_size = int(len(data) * 0.8)
train_data = data.iloc[:train_size,:]
test_data = data.iloc[train_size:,:]

# 进行数据缩放
scaler = MinMaxScaler()
train_data_scaled = scaler.fit_transform(train_data[['T', 'S']])
test_data_scaled = scaler.transform(test_data[['T', 'S']])

# 将数据转化为适合LSTM的格式
def create_dataset(dataset, lookback):
    X, Y = [], []
    for i in range(len(dataset)-lookback):
        x = dataset[i:i+lookback, :]
        y = dataset[i+lookback, 0]
        X.append(x)
        Y.append(y)
    return np.array(X), np.array(Y)

train_X, train_Y = create_dataset(train_data_scaled, lookback)
test_X, test_Y = create_dataset(test_data_scaled, lookback)

# 定义模型
model = Sequential()
model.add(LSTM(64, input_shape=(lookback, n_features)))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam')

# 训练模型
history = model.fit(train_X, train_Y, batch_size=batch_size, epochs=epochs, validation_split=0.2)

# 评估模型
train_loss = model.evaluate(train_X, train_Y, batch_size=batch_size, verbose=0)
test_loss = model.evaluate(test_X, test_Y, batch_size=batch_size, verbose=0)
print("Train loss:", train_loss)
print("Test loss:", test_loss)

# 进行预测
test_data_scaled_with_lookback = np.concatenate((train_data_scaled[-lookback:, :], test_data_scaled))
test_X_with_lookback, test_Y_with_lookback = create_dataset(test_data_scaled_with_lookback, lookback)
predictions = model.predict(test_X_with_lookback)

# 将预测结果进行反缩放
predictions_rescaled = scaler.inverse_transform(np.concatenate((test_data_scaled[:lookback], predictions)))

# 绘制结果
plt.plot(test_data['date'], test_data['T'], label="True T")
plt.plot(test_data['date'], predictions_rescaled[:, 0], label="Predicted T")
plt.legend()
plt.show()


不知道你这个问题是否已经解决, 如果还没有解决的话:

如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^