python编写的优化卷积神经网络程序最后输出不了。

如题,最后我想输出参数,结果报错:NameError: name 'epochs' is not defined

import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Conv1D, Flatten, MaxPooling1D
from keras.optimizers import adam_v2
from bayes_opt import BayesianOptimization


def cnn_model(num_filters, kernel_size, pool_size, dense_layer_size, epochs, batch_size):
    # 加载数据,数据清洗和特征选择
    data = pd.read_csv('1.csv', encoding='gb18030')
    features = ['Nb%', 'Ti%', 'AL%', 'V%', 'Cr%', 'Mo%', 'C%', 'Mn%', 'P%', 'Ni%', 'Cu%', 'Si%', 'S%', '奥氏体化温度0℃',
                '油冷时间0min', '保温时间0min', '上下窜动时间0min', '回火0退火温度0℃', '保温时间0min']
    X = data[features]
    y = data['硬度']
    X = np.nan_to_num(X)
    y = np.nan_to_num(y)

    scaler = MinMaxScaler()  # 特征缩放
    X = scaler.fit_transform(X)
    pca = PCA(n_components=5)  # 选择5个主成分
    X_pca = pca.fit_transform(X)

    # 数据划分
    X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=400)

    # 重塑数据为三维数组
    X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
    X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))

    # 构建卷积神经网络模型
    model = Sequential()
    model.add(Conv1D(filters=int(num_filters), kernel_size=int(kernel_size), activation='relu',
                     input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(MaxPooling1D(pool_size=int(pool_size)))
    model.add(Flatten())
    model.add(Dense(int(dense_layer_size), activation='relu'))
    model.add(Dense(1))

    # 编译模型
    model.compile(optimizer=Adam(), loss='mse')

    # 训练模型
    history = model.fit(X_train, y_train, epochs=int(epochs), batch_size=int(batch_size),
                        validation_data=(X_test, y_test), verbose=0)

    # 返回最小的验证损失
    return -np.min(history.history['val_loss'])


from bayes_opt import BayesianOptimization
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import cross_val_score

# 加载数据集
boston = load_boston()
X = boston.data
y = boston.target


# 定义随机森林模型
def rf_cv(n_estimators, min_samples_split, max_features):
    """
    定义交叉验证函数
    """
    estimator = RandomForestRegressor(
        n_estimators=int(n_estimators),
        min_samples_split=int(min_samples_split),
        max_features=max_features,
        random_state=2
    )
    cval = cross_val_score(estimator, X, y, scoring='neg_mean_squared_error', cv=10)
    return cval.mean()


# 定义参数搜索空间
pbounds = {
    'n_estimators': (50, 200),
    'min_samples_split': (2, 25),
    'max_features': (0.1, 1.0)
}

# 初始化贝叶斯优化器
optimizer = BayesianOptimization(
    f=rf_cv,
    pbounds=pbounds,
    random_state=1,
)

# 进行优化
optimizer.maximize(init_points=5, n_iter=20)
# 将优化结果转换为数据框
df = pd.DataFrame.from_dict(optimizer.max, orient='index').T

# 将数据写入 Excel 文件
df.to_excel('optimizer_results.xlsx', index=False)
# 输出最佳参数和对应的交叉验证分数
print(optimizer.max)
print(epochs)
print(batch_size)
print(Dense)
print(filters=int)
print(kernel_size)

很明显最后我想输出的几个量明显是我在程序中优化的量,但是却显示没有定义,不知道如何解决。

引用chatgpt部分指引作答:
运行结果如下:所有参数的输出都有了。你直接导出到excel就行了,到出到excel的代码之前给你了的。

img

修改后的代码如下:

import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Conv1D, Flatten, MaxPooling1D
from keras.optimizers import adam_v2
from bayes_opt import BayesianOptimization
#from keras.optimizers import adam

 
 
def cnn_model(num_filters, kernel_size, pool_size, dense_layer_size, epochs, batch_size):
    print(f"num_filters: {num_filters}, kernel_size: {kernel_size}, pool_size: {pool_size}, dense_layer_size: {dense_layer_size}, epochs: {epochs}, batch_size: {batch_size}")
    
    # 加载数据,数据清洗和特征选择
    data = pd.read_csv('1.csv', encoding='gb18030')
    features = ['Nb%', 'Ti%', 'AL%', 'V%', 'Cr%', 'Mo%', 'C%', 'Mn%', 'P%', 'Ni%', 'Cu%', 'Si%', 'S%', '奥氏体化温度0℃',
                '油冷时间0min', '保温时间0min', '上下窜动时间0min', '回火0退火温度0℃', '保温时间0min']
    X = data[features]
    y = data['硬度']
    X = np.nan_to_num(X)
    y = np.nan_to_num(y)

    scaler = MinMaxScaler()  # 特征缩放
    X = scaler.fit_transform(X)
    pca = PCA(n_components=5)  # 选择5个主成分
    X_pca = pca.fit_transform(X)

    # 数据划分
    X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=400)

    # 重塑数据为三维数组
    X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
    X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))

    # 构建卷积神经网络模型
    model = Sequential()
    model.add(Conv1D(filters=int(num_filters), kernel_size=int(kernel_size), activation='relu',
                     input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(MaxPooling1D(pool_size=int(pool_size)))
    model.add(Conv1D(filters=int(num_filters), kernel_size=1, padding='same', activation='relu'))
    model.add(Flatten())
    model.add(Dense(int(dense_layer_size), activation='relu'))
    model.add(Dense(1))

    # 编译模型
    model.compile(optimizer=adam_v2.Adam(), loss='mse')

    # 训练模型
    model.fit(X_train, y_train, epochs=int(epochs), batch_size=int(batch_size), verbose=0)

    # 评估模型
    loss = model.evaluate(X_test, y_test, verbose=0)

    return -loss  # 最小化均方误差作为优化目标
# 设置超参数搜索空间
pbounds = {'num_filters': (16, 64),
'kernel_size': (3, 5),
'pool_size': (2, 3),
'dense_layer_size': (16, 32),
'epochs': (25, 50),
'batch_size': (16, 32)}

# 初始化贝叶斯优化器并开始搜索最佳超参数
optimizer = BayesianOptimization(f=cnn_model, pbounds=pbounds, verbose=2)
optimizer.maximize()

# 输出最佳的超参数组合及其对应的均方误差
print('最佳超参数组合:', optimizer.max)
print('最小均方误差:', -optimizer.max['target'])

@中本王
引用继续完成:

import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Conv1D, Flatten, MaxPooling1D
from keras.optimizers import adam_v2
from bayes_opt import BayesianOptimization

def cnn_model(num_filters, kernel_size, pool_size, dense_layer_size, epochs, batch_size):
    # 加载数据,数据清洗和特征选择
    data = pd.read_csv('1.csv', encoding='gb18030')
    features = ['Nb%', 'Ti%', 'AL%', 'V%', 'Cr%', 'Mo%', 'C%', 'Mn%', 'P%', 'Ni%', 'Cu%', 'Si%', 'S%', '奥氏体化温度0℃',
                '油冷时间0min', '保温时间0min', '上下窜动时间0min', '回火0退火温度0℃', '保温时间0min']
    X = data[features]
    y = data['硬度']
    X = np.nan_to_num(X)
    y = np.nan_to_num(y)

    scaler = MinMaxScaler()  # 特征缩放
    X = scaler.fit_transform(X)
    pca = PCA(n_components=5)  # 选择5个主成分
    X_pca = pca.fit_transform(X)

    # 数据划分
    X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.2, random_state=400)

    # 重塑数据为三维数组
    X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
    X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))

    # 构建卷积神经网络模型
    model = Sequential()
    model.add(Conv1D(filters=int(num_filters), kernel_size=int(kernel_size), activation='relu',
                     input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(MaxPooling1D(pool_size=int(pool_size)))
    model.add(Flatten())
    model.add(Dense(int(dense_layer_size), activation='relu'))
    model.add(Dense(1))

    # 编译模型
    model.compile(optimizer=adam_v2.Adam(), loss='mse')

    # 训练模型
    history = model.fit(X_train, y_train, epochs=int(epochs), batch_size=int(batch_size),
                        validation_data=(X_test, y_test), verbose=0)

    # 返回最小的验证损失
    return -np.min(history.history['val_loss'])

from bayes_opt import BayesianOptimization

# 加载数据集
data = pd.read_csv('1.csv', encoding='gb18030')

# 定义参数搜索空间
pbounds = {
    'num_filters': (16, 64),
    'kernel_size': (3, 5),
    'pool_size': (2, 4),
    'dense_layer_size': (16, 64),
    'epochs': (10, 50),
    'batch_size': (16, 64)
}

# 初始化贝叶斯优化器
optimizer = BayesianOptimization(
    f=cnn_model,
    pbounds=pbounds,
    random_state=1,
)

# 进行优化
optimizer.maximize(init_points=5, n_iter=20)

# 将优化结果转换为数据框
df = pd.DataFrame.from_dict(optimizer.max, orient='index').T

# 将数据写入 Excel 文件
df.to_excel('optimizer_results.xlsx', index=False)

# 输出最佳参数和对应的交叉验证分数
print(optimizer.max)

# 输出优化后的 epochs、batch_size 等参数
best_params = optimizer.max['params']
default_params = {'learning_rate': 0.01, 'batch_size': 32, 'epochs': 10}
epochs = best_params.get('epochs', default_params['epochs'])
batch_size = best_params.get('batch_size', default_params['batch_size'])
dense_layer_size = best_params.get('dense_layer_size', default_params['dense_layer_size'])
num_filters = best_params.get('num_filters', default_params['num_filters'])
kernel_size = best_params.get('kernel_size', default_params['kernel_size'])
pool_size = best_params.get('pool_size', default_params['pool_size'])
print(f'epochs: {epochs}, batch_size: {batch_size}, dense_layer_size: {dense_layer_size}, num_filters: {num_filters}, kernel_size: {kernel_size}, pool_size: {pool_size}')

结合ChatGPT和自己的理解:
在程序的最后几行,您使用了一些变量,但这些变量并没有在代码中定义过。这些变量是在您定义 cnn_model 函数中出现的变量,其中您使用了这些变量作为优化的参数。
如果您想输出这些变量的值,可以将这些变量在 cnn_model 函数外进行定义,在函数内进行调用和修改,然后再在代码的最后几行输出这些变量的值。
例如,您可以在代码的最开始部分进行如下的变量定义:
num_filters = 16
kernel_size = 3
pool_size = 2
dense_layer_size = 8
epochs = 10
batch_size = 32
然后在调用 cnn_model 函数时,将这些变量作为参数传递进去:
result = cnn_model(num_filters, kernel_size, pool_size, dense_layer_size, epochs, batch_size)
最后,您可以在代码的最后几行输出这些变量的值:
print(num_filters)
print(kernel_size)
print(pool_size)
print(dense_layer_size)
print(epochs)
print(batch_size)
这样就可以输出这些变量的值了。

引用chatGPT作答,在您的代码中,epochs和batch_size是cnn_model函数的参数,它们只在cnn_model函数内部使用,并没有定义在函数外部。在您的代码中,您尝试打印epochs和batch_size,但它们没有被定义,所以会报错。您可以尝试在cnn_model函数的最后一行打印epochs和batch_size,或者将这些参数传递到函数外部,然后打印它们。例如,您可以在cnn_model函数的最后一行添加以下代码:

print("epochs: ", epochs)
print("batch_size: ", batch_size)

这将打印出您在调用cnn_model函数时传递的epochs和batch_size参数的值。如果您想将这些参数保存到Excel文件中,可以将它们添加到df数据框中,例如:

df = pd.DataFrame.from_dict(optimizer.max, orient='index').T
df['epochs'] = epochs
df['batch_size'] = batch_size
df.to_excel('optimizer_results.xlsx', index=False)

这将在df数据框中添加名为epochs和batch_size的列,并将它们的值设置为相应的函数参数值。然后,您可以将整个数据框写入Excel文件中。