傳入model.compile為四維陣列 修改

我在訓練的時候出現以下錯誤訊息

main.py
train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])

error message
發生例外狀況: ValueError
Found a sample_weight array with shape (1, 512, 3072, 1). In order to use timestep-wise sample weighting, you should pass a 2D sample_weight array.
  File "C:\Labbb\unet_mao0\main.py", line 88, in <module>
    train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])
ValueError: Found a sample_weight array with shape (1, 512, 3072, 1). In order to use timestep-wise sample weighting, you should pass a 2D sample_weight array.

我傳入的是一個四維陣列,系統要求是一維或是二維(sample_weight_mode='temporal')

model.py
model.compile(sample_weight_mode='temporal', optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])

我嘗試從sample_weight_array這裡去修改 但是修改完系統變成要求我輸入的是4D yield應該是會影像到輸入?

origin data.py
        y_binary = to_categorical(y_int)  # Convert integer labels to binary vectors
        yield (img_batch, y_binary, mask_batch)
data.py
        y_binary = to_categorical(y_int)  
        y_binary = y_binary.reshape(y_binary.shape[:2])
        yield (y_binary)
error message
發生例外狀況: ValueError
Error when checking input: expected input_1 to have 4 dimensions, but got array with shape (512, 3072)
  File "C:\Labbb\unet_mao0\main.py", line 88, in <module>
    train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])
ValueError: Error when checking input: expected input_1 to have 4 dimensions, but got array with shape (512, 3072)

我要怎樣修改才能把這個四維陣列傳入這損失函數內,或者是把這Unet輸出修改成一維二維來符合損失函數

以下完整代碼

main.py
from model import *
from data import *

import tensorflow._api.v2.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)



def show_train_history(train_history, train,name):
    plt.plot(train_history.history[train])
    plt.title(name)
    plt.ylabel('train')
    plt.xlabel('Epoch')
    plt.legend(['train'], loc='center right')
    #plt.show()
    plt.savefig(name+".png")
    plt.close()


acc_name ="Accuracy"
loss_name="Loss"


data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
# 訓練資料路徑列表
folders = [

    'data/syu/train1day/auto_correlation_reverse',
    'data/syu/train1day/energy',
    'data/syu/train1day/entropy_reverse',
    'data/syu/train1day/homogeneity',
    'data/syu/train1day/temprature',
    'data/syu/train1day/clahe',   
    #'data/syu/train2day/auto_correlation_reverse',
    #'data/syu/train2day/energy',
    #'data/syu/train2day/entropy_reverse',
    #'data/syu/train2day/homogeneity',
    #'data/syu/train2day/temprature',
    #'data/syu/train2day/clahe',
    #'data/syu/train3day/auto_correlation_reverse',
    #'data/syu/train3day/energy',
    #'data/syu/train3day/entropy_reverse',
    #'data/syu/train3day/homogeneity',
    #'data/syu/train3day/temprature',
    #'data/syu/train3day/clahe',
    #'data/syu/train4day/auto_correlation_reverse',
    #'data/syu/train4day/energy',
    #'data/syu/train4day/entropy_reverse',
    #'data/syu/train4day/homogeneity',
    #'data/syu/train4day/temprature',
    #'data/syu/train4day/clahe',
    #'data/syu/train5day/auto_correlation_reverse',
    #'data/syu/train5day/energy',
    #'data/syu/train5day/entropy_reverse',
    #'data/syu/train5day/homogeneity',
    #'data/syu/train5day/temprature',
    #'data/syu/train5day/clahe',
    #'data/syu/train6day/auto_correlation_reverse',
    #'data/syu/train6day/energy',
    #'data/syu/train6day/entropy_reverse',
    #'data/syu/train6day/homogeneity',
    #'data/syu/train6day/temprature',
    #'data/syu/train6day/clahe',
    #'data/syu/train7day/auto_correlation_reverse',
    #'data/syu/train7day/energy',
    #'data/syu/train7day/entropy_reverse',
    #'data/syu/train7day/homogeneity',
    #'data/syu/train7day/temprature',
    #'data/syu/train7day/clahe',
]

myGene = trainGenerator(1, folders, 'image', 'label',data_gen_args,save_to_dir = None)
#預測的話這行也要
model = unet(input_size=(512, 512, 8))

model_checkpoint = ModelCheckpoint('weight/unet_8channels.hdf5', monitor='loss',verbose=1, save_best_only=True)
train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])

#load_weights訓練好的model參數
#model.load_weights('unet_membrane.hdf5')

#印出訓練過程中的Accuracy和Loss
acc_name ="Accuracy"
loss_name="Loss"
show_train_history(train_history, '1accuracy',acc_name)
show_train_history(train_history, 'loss',loss_name)


#testGene = testGenerator("data/membrane/test/image")
#results = model.predict_generator(testGene,116,verbose=1)
#saveResult("data/membrane/test/pred",results)

#預測,第一個需要預測檔案的位置,第二個是預測後要放到的位置,第三個是模型,第四個是幾張要預測的影像張數
#predict_RGBcolor_img("data/membrane/test/image","data/membrane/test/pred",model,117)
data.py
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np 
import os
import glob
import skimage.io as io
import skimage.transform as trans
import cv2
from matplotlib import pyplot as plt
from skimage import img_as_ubyte
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]

COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
                          Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])



def normalized_the_gray_value(img):
    gray_max=img.max()
    gray_min=img.min()
    
    normalized_img= ( (img-gray_min) / (gray_max-gray_min) )*255
    
    return normalized_img
    

#  change the img to  YUV and normalized the Y value
def RGB_Y_normalized(img,gamma_value):
    #print(img_out)
    #print(gamma_value)
    YUV = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
    
    Y, U, V = cv2.split(YUV)
    #print(y)
    y_max=Y.max()
    y_min=Y.min()
    #print(y_min)
    #print(y_max)
    temp_y=  ( ( ( Y-y_min)  / ( y_max - y_min ) )  **gamma_value )*255 
    
    # Y normalized and return
    #YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
    #YUV_array = np.zeros((img.shape[0], img.shape[1], 3))
    YUV_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
    YUV_array[:, :, 0], YUV_array[:, :, 1], YUV_array[:, :, 2] = temp_y, YUV[:,:,1], YUV[:,:,2]
    
    #print(YCrCb_array[:, :, 0])
    
    
    # return the YUV channel  to RGB channel image array
    #print(np.shape(YUV_array))
    final_rgb=cv2.cvtColor(YUV_array, cv2.COLOR_YUV2BGR)
    
    
    
    
    return final_rgb
    
    

#  change the img to  YCbCr and normalized the Y value
def YCbCr_normalized(img,gamma_value):
    #print(img_out)
    #print(gamma_value)
    YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    
    Y, Cr, Cb = cv2.split(YCrCb)
    #print(y)
    y_max=Y.max()
    y_min=Y.min()
    #print(y_min)
    #print(y_max)
    temp_y=  ( ( ( Y-y_min)  / ( y_max - y_min ) )  **gamma_value )*255 
    
    # Y normalized and return
    #YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
    YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3))
    YCrCb_array[:, :, 0], YCrCb_array[:, :, 1], YCrCb_array[:, :, 2] = temp_y, YCrCb[:,:,1], YCrCb[:,:,2]
    
    #print(YCrCb_array[:, :, 0])
    return YCrCb_array


def adjustData(img,mask,flag_multi_class,num_class):
    if(flag_multi_class):
        img = img / 255
        mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
        new_mask = np.zeros(mask.shape + (num_class,))
        for i in range(num_class):
            #for one pixel in the image, find the class in mask and convert it into one-hot vector
            #index = np.where(mask == i)
            #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
            #new_mask[index_mask] = 1
            new_mask[mask == i,i] = 1
        new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
        mask = new_mask
    elif(np.max(img) > 1):
        img = img / 255
        mask = mask /255
        mask[mask > 0.5] = 1
        mask[mask <= 0.5] = 0
    return (img,mask)



def trainGenerator(batch_size, train_paths, image_folder, mask_folder, aug_dict,image_color_mode="rgb", mask_color_mode="grayscale", 
                   image_save_prefix="image", mask_save_prefix="mask", 
                   flag_multi_class=False, num_class=6, save_to_dir="data_train/test/", target_size=(512, 512), seed=1):
    '''
    can generate image and mask at the same time
    use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
    if you want to visualize the results of generator, set save_to_dir = "your path"
    '''

    image_datagen = ImageDataGenerator(**aug_dict)
    mask_datagen = ImageDataGenerator(**aug_dict)
    image_generators = []
    mask_generators = []
    for train_path in train_paths:
        
        image_color_mode = "rgb" if os.path.basename(train_path) == "clahe" else "grayscale"
        
        image_generator = image_datagen.flow_from_directory(
            train_path,
            classes=['image1','image2','image3','image4','image6','image7'],
            class_mode=None,
            color_mode=image_color_mode,
            target_size=target_size,
            batch_size=batch_size,
            save_to_dir=save_to_dir,
            save_prefix=image_save_prefix,
            seed=seed
        )
        mask_generator = mask_datagen.flow_from_directory(
            train_path,
            classes=['label1','label2','label3','label4','label6','label7'],
            class_mode=None,
            color_mode=mask_color_mode,
            target_size=target_size,
            batch_size=batch_size,
            save_to_dir=save_to_dir,
            save_prefix=mask_save_prefix,
            seed=seed
        )
        image_generators.append(image_generator)
        mask_generators.append(mask_generator)

    train_generators = zip(*image_generators, *mask_generators)
    for data in train_generators:
        images = []
        masks = []
        for i in range(len(train_paths)):
            img, mask = data[i], data[i + len(train_paths)]
            img, mask = adjustData(img, mask, flag_multi_class, num_class)
            images.append(img)
            masks.append(mask)
        img_batch = np.concatenate(images, axis=3)
        mask_batch = np.concatenate(masks, axis=2)
        y_int = np.argmax(mask_batch, axis=3)  # Assuming mask_batch has shape (batch_size, height, width, num_classes)
        y_binary = to_categorical(y_int)  # Convert integer labels to binary vectors
        yield (img_batch, y_binary, mask_batch)
       #yield (img_batch, mask_batch)
    save_to_dir = "C:/Labbb"  # 可视化结果的保存目录


def testGenerator(test_path,num_image = 30,target_size = (512,512),flag_multi_class = True,as_gray = True):
    for i in range(num_image):
        img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = False)
        img = img / 255
        img = trans.resize(img,target_size)
        #img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
        img = np.reshape(img,(1,)+img.shape)
        yield img


def geneTrainNpy(image_path,mask_path,gamma_value,flag_multi_class = True,num_class = 6,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
    
    # image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))
    image_name_arr = glob.glob(os.path.join(image_path,"*.png"))

    # mask name array ----  get the mask path
    mask_name_arr = glob.glob(os.path.join(mask_path,"*.png"))
    
    
    target_size = (512,512)
    image_arr = []
    mask_arr = []
    for index,item in enumerate(image_name_arr):
        #img = io.imread(item,as_gray = image_as_gray)
        img = cv2.imdecode(np.fromfile(item, dtype=np.uint8), 1)

        #plt.subplot(1, 2, 1)
        #plt.title('img')
        #plt.imshow(img)
        
        # Rerurn the Y normalized rgb channel array
        img =RGB_Y_normalized(img,gamma_value)
        
        #print("take RGB to YCbCr " ,end=" ")
        # resize the image
        img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
        #img = trans.resize(img,target_size)
        
        
        
        #img = normalized_the_gray_value(img)
        
        #plt.subplot(1, 2, 2)
        #plt.title('img')
        #plt.imshow(img)
        
        #plt.imshow(img,cmap='gray')
        
        
        img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
        #mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)
        
        mask = cv2.imdecode(np.fromfile(mask_name_arr[index], dtype=np.uint8), 0)
        #mask = io.imread(item,as_gray = mask_as_gray)
        
        
        
        
        
        # resize the mask
        mask = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_LINEAR)
        #mask = trans.resize(mask,target_size)
        
        #plt.subplot(1, 2, 2)
        #plt.title('mask')
        #plt.imshow(mask,cmap='gray')
        
        mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
        
       
        #plt.show()
        
        #print(index ,item , mask_name_arr[index],img.shape)
        
        img,mask = adjustData(img,mask,flag_multi_class,num_class)
        image_arr.append(img)
        mask_arr.append(mask)
    image_arr = np.array(image_arr)
    mask_arr = np.array(mask_arr)
    return image_arr,mask_arr


'''def labelVisualize(num_class,color_dict,img):
    img = img[:,:,0] if len(img.shape) == 3 else img
    img_out = np.zeros(img.shape + (3,))
    for i in range(num_class):
        img_out[img == i,:] = color_dict[i]
    return img_out / 255'''



'''def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
    for i,item in enumerate(npyfile):
        img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
        io.imsave(os.path.join(save_path,"%d_predict.jpg"%i),img)'''

def predict_RGBcolor_img(test_path,save_path,model,test_pic_num):
    target_size = (512,512)
    files_name=os.listdir(test_path)
    for i in range(len(files_name)):
        img = io.imread(os.path.join(test_path,"%s"%files_name[i]))
        #print(files_name)
       # io.imsave(os.path.join(save_path,"%s"%files_name[i]),img_as_ubyte(img))
        img = img / 255
        img = trans.resize(img,target_size)
        img = np.reshape(img,(1,)+img.shape)
        results = model.predict(img)
        a=results[0][:,:,0]
        '''for i in range(len(a)):
            for j in range(len(a[i])):
                if a[i][j]>0.5:
                    a[i][j]=255
                else:
                    a[i][j]=0'''
        a[a<=0.5]=0
        a[a>0.5]=255
        a=a.astype(np.uint8)
        #print(a.shape)
        #a[a<0.5]=0
        #a[a>=0.5]=255
        #plt.imshow(results[0][:,:,0],cmap="gray")
        #plt.show()
        io.imsave(os.path.join(save_path,"%s"%files_name[i]),img_as_ubyte(a))
model.py
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import backend as keras


#chatgpt 八通道
def unet(pretrained_weights=None, input_size=(512, 512, 8)):
    #sample_weight_array_2d = np.reshape(sample_weight_array, (512, 3072))
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
    conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)

    
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
    #conv10 = Conv2D(2, 1, activation='sigmoid')(conv9)
    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])  # 使用分類交叉熵作為損失函數
    #model.compile(sample_weight_mode='temporal',optimizer=Adam(lr=1e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])  # 使用分類交叉熵作為損失函數
    if pretrained_weights:
        model.load_weights(pretrained_weights)

    return model

一般分割之类的网络要求你输入的格式为NCHW或者NWHC,tensorflow可以设置,默认应该是后者,也就是【batch_size,height ,width ,channels】的格式输入。
所以你需要在网络预处理阶段将图片转成这个格式,然后才能输入到Conv2D中