CNN代码报错,请求帮助这是是报错信息:

CNN代码报错,不知道怎么解决,请求帮助
这是是报错信息:

img

下面是我的代码:

from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Conv2D, Input, MaxPooling2D, Dropout, concatenate, UpSampling2D
import numpy as np
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from PIL import Image
import matplotlib.pyplot as plt

import os

from tensorflow.python.keras import models, layers
from tensorflow.python.keras.callbacks import EarlyStopping

from tensorflow.python.keras.optimizers import gradient_descent_v2

def generateds(path, txt):#图片路径, 标签文件
    f = open(txt, 'r')  # 以只读形式打开txt文件
    contents = f.readlines()  # 读取文件中所有行
    f.close()  # 关闭txt文件
    x, y_ = [], []  # 建立空列表
    for content in contents:  # 逐行取出
        value = content.split()  # 以空格分开,图片路径为value[0] , 标签为value[1] , 存入列表
        print(path)
        print(value[0])
        img_path = path + value[0]  # 拼出图片路径和文件名
        print(img_path)
        img = Image.open(img_path)  # 读入图片
        img = img.resize((64, 64), Image.ANTIALIAS)
        img = np.array(img.convert('RGB'))  # 图片变为8位宽灰度值的np.array格式
        img = img / 255.  # 数据归一化 (实现预处理)
        x.append(img)  # 归一化后的数据,贴到列表x
        y_.append(value[1])  # 标签贴到列表y_
        print('loading : ' + content)  # 打印状态提示

    x = np.array(x,dtype='float32')  # 变为np.array格式
    y_ = np.array(y_)  # 变为np.array格式
    y_ = y_.astype(np.int64)  # 变为64位整型
    return x, y_  # 返回输入特征x,返回标签y_

train_path = r'D:/ningyupeng/pythonProject/data_cell_images/'  # 训练集图片路径
train_txt = r'D:/ningyupeng/pythonProject/data_cell_images/train.txt'  # 训练集标签文件
x_train_savepath = r'D:/ningyupeng/pythonProject/data_cell_images/mnist_x_train.npy'
y_train_savepath = r'D:/ningyupeng/pythonProject/data_cell_images/mnist_y_train.npy'

test_path = r'D:/ningyupeng/pythonProject/data_cell_images/'  # 测试集图片路径
test_txt = r'D:/ningyupeng/pythonProject/data_cell_images/test.txt'  # 测试集标签文件
x_test_savepath = r'D:/ningyupeng/pythonProject/data_cell_images/mnist_x_test.npy'
y_test_savepath = r'D:/ningyupeng/pythonProject/data_cell_images/mnist_y_test.npy'

x_train, y_train = generateds(train_path, train_txt)
x_test, y_test = generateds(test_path, test_txt)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = x_train / 255.0, x_test / 255.0  # 归一化,将每一个像素缩放到0-1之间
print("x_train.shape", x_train.shape)
x_train = x_train.reshape(x_train.shape[0], 64, 64, 3)
x_test = x_test.reshape(x_test.shape[0], 64, 64, 3)
print("x_train.shape", x_train.shape)

##train = train[0]
# 定义u-Net网络模型
# def CNN():
model = models.Sequential()

# Input + Conv 1 + ReLU + Max Pooling
model.add(layers.Conv2D(32, (5, 5), activation='relu', padding='same', input_shape=x_train.shape[1:]))

model.add(layers.MaxPool2D(strides=4))
model.add(layers.BatchNormalization())

# Conv 2 + ReLU + Max Pooling
model.add(layers.Conv2D(64, (5, 5), padding='same', activation='relu'))

model.add(layers.MaxPool2D(strides=2))
model.add(layers.BatchNormalization())

# Conv 3 + ReLU + Max Pooling
model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))

model.add(layers.MaxPool2D(strides=2))
model.add(layers.BatchNormalization())

# Conv 4 + ReLU + Max Pooling
model.add(layers.Conv2D(256, (3, 3), dilation_rate=(2, 2), padding='same', activation='relu'))
model.add(layers.Conv2D(256, (3, 3), activation='relu'))
model.add(layers.MaxPool2D(strides=2))
model.add(layers.BatchNormalization())

# Fully Connected + ReLU
model.add(layers.Flatten())

model.add(layers.Dense(300, activation='relu'))
model.add(layers.Dense(100, activation='relu'))

# Output
model.add(layers.Dense(2, activation='softmax'))

model.summary()


# 开始训练
# model = CNN()
# 每个epoch后保存模型到 uNet_Skin.hdf5
model_checkpoint = ModelCheckpoint('./CNN_Skin_test.ckpt',monitor='loss',verbose=1,save_weights_only=True,save_best_only=True)

opt = gradient_descent_v2.SGD(lr=1e-1, momentum=0.9)
# 优化器为 Adam,损失函数为 binary_crossentropy,评价函数为 accuracy
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

es = EarlyStopping(monitor='val_accuracy',mode='max',patience=3,verbose=1)

# 训练
history = model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1,
                   validation_data=(x_test, y_test), validation_freq=1, shuffle=True, callbacks=[es])
# 展示一下精确度随训练的变化图
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['accuracy', 'val_accuracy'], loc='upper left')
plt.show()
# 展示一下loss随训练的变化图
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['loss', 'val_loss'], loc='upper left')
plt.show()


因为2.6.0下tensorflow.python.keras.layers才有.BatchNormalization() 所以升级到2.6.0吧

不知道你这个问题是否已经解决, 如果还没有解决的话:

如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^