Keras模型二分类预测值全为1

编辑代码实现二分类,代码如下

import numpy as np
import os,cv2,random
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping

from tensorflow.keras import layers,Sequential,losses,optimizers,callbacks

train_data=[]#定义数据集,用于存放图片数据
train_label=[]#定义标签集,用于存放标签信息

def load_data(data_dst,label_dst,path,lab):#加载数据函数
    for filename in os.listdir(path):
        filename = path+'\\'+filename
        img = cv2.imread(filename)
        img = cv2.resize(img,(64,64))#将图片转化为64*64大小
        data_dst.append(img)
        label_dst.append(lab)
#定义预处理函数,增强数据集里的数据
def preprocess(data_dst,label_dst,path,labels):
    for filename in os.listdir(path):#加载图片
        filename = path+'\\'+filename
        img = cv2.imread(filename)
        img = cv2.resize(img,(64,64))
        #随机对图片预处理,预处理量约为总数的2/5
        x=random.randint(1,10)
        if x==1:#对图片进行旋转操作
            img = tf.image.rot90(img,2)
        elif x==2:#对图片进行水平翻转操作
            img = tf.image.random_flip_left_right(img)
        elif x==3:#对图片进行垂直翻转操作
            img = tf.image.random_flip_up_down(img)
        elif x==4:#对图片进行随机裁剪操作
            img = tf.image.resize(img,(70,70))
            img = tf.image.random_crop(img,[64,64,3])
        data_dst.append(img)
        label_dst.append(labels)
#加载训练集数据
load_data(train_data,train_label,r'C:\Users\AS\PycharmProjects\pythonProject4\face\cza',0)
load_data(train_data,train_label,r'C:\Users\AS\PycharmProjects\pythonProject4\face\zlp',1)
preprocess(train_data,train_label,r'C:\Users\AS\PycharmProjects\pythonProject4\face\cza',0)
preprocess(train_data,train_label,r'C:\Users\AS\PycharmProjects\pythonProject4\face\zlp',1)
#将数据集和标签集转化为数组
train_data = np.array(train_data)
train_label = np.array(train_label)

# 归一化
train_data = train_data.astype('float32')/255.0
print(train_data.shape)
network = Sequential([  # 封装网络容器
    # 第一层
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu,input_shape=(64,64,3),kernel_initializer='random_uniform'),
    # 构建64个3*3的卷积核,输出通道为64,激活函数为ReLu,卷积两次
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.BatchNormalization(),  # 参数标准化
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding="same"),#添加池化层,大小减半
    layers.Dropout(rate=0.5),  # 添加dropout层,防止过拟合,断开率为0.5
    # 第二层
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding="same"),
    layers.Dropout(rate=0.5),
    # 第三层
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding="same"),
    layers.Dropout(rate=0.5),
    # 第四层
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding="same"),
    layers.Dropout(rate=0.5),
    # 第五层
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding="same"),
    layers.Dropout(rate=0.4),
    # 全连接层
    layers.Flatten(),#打平层
    layers.Dense(512, activation='relu'),
    layers.Dropout(rate=0.5),
    layers.Dense(128, activation='relu'),
    layers.Dropout(rate=0.5),
    layers.Dense(64, activation='relu'),
    layers.Dropout(rate=0.5),
    layers.Dense(16, activation='relu'),
    layers.Dropout(rate=0.5),
    layers.Dense(2, activation=None),#二分类
])
network.build(input_shape=(64,64,3))
network.summary()
network.compile(optimizer=optimizers.Adam(learning_rate=0.01,decay=0.01/50),loss=losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy','mae'])
history = network.fit(train_data,train_label,batch_size=32,epochs=50,verbose=1,callbacks=EarlyStopping(monitor='var_accuracy',min_delta=0.0001,patience=3),validation_split=0.1,shuffle=True,validation_freq=2)
network.save("train_model.h5")
print("Successfully saving total model.")
del network

预测值全为1,已排除权重,数据集平衡,文件读取问题

import numpy as np
import os,cv2,random
import tensorflow as tf
import tensorflow.keras as k
test_data = []
test_label = []
def load_data(data_dst,label_dst,path,lab):#加载数据函数
    for filename in os.listdir(path):
        filename = path+'\\'+filename
        img = cv2.imread(filename)
        img = cv2.resize(img,(64,64))#将图片转化为64*64大小
        data_dst.append(img)
        label_dst.append(lab)
def preprocess(data_dst,label_dst,path,labels):
    for filename in os.listdir(path):#加载图片
        filename = path+'\\'+filename
        img = cv2.imread(filename)
        img = cv2.resize(img,(64,64))
        #随机对图片预处理,预处理量约为总数的2/5
        x=random.randint(1,10)
        if x==1:#对图片进行旋转操作
            img = tf.image.rot90(img,2)
        elif x==2:#对图片进行水平翻转操作
            img = tf.image.random_flip_left_right(img)
        elif x==3:#对图片进行垂直翻转操作
            img = tf.image.random_flip_up_down(img)
        elif x==4:#对图片进行随机裁剪操作
            img = tf.image.resize(img,(70,70))
            img = tf.image.random_crop(img,[64,64,3])
        data_dst.append(img)
        label_dst.append(labels)
load_data(test_data,test_label,r'C:\Users\AS\PycharmProjects\pythonProject4\test\cza',0)
load_data(test_data,test_label,r'C:\Users\AS\PycharmProjects\pythonProject4\test\zlp',1)
test_data = np.array(test_data)

test_label = np.array(test_label)
test_data = test_data.astype('float32')/255
print(test_data.shape)
network = k.models.load_model('Train_model.h5')
print(test_label)
pre = network.predict(test_data)
print(pre)
pre = np.argmax(pre,axis=1)
print(pre)

img

你好,我是有问必答小助手,非常抱歉,本次您提出的有问必答问题,技术专家团超时未为您做出解答


本次提问扣除的有问必答次数,将会以问答VIP体验卡(1次有问必答机会、商城购买实体图书享受95折优惠)的形式为您补发到账户。


因为有问必答VIP体验卡有效期仅有1天,您在需要使用的时候【私信】联系我,我会为您补发。