在训练时我发现所有的资料都被分在同一类 要怎么把他们分成七类 (train1day train2day.....) (in main.py)
classes problem
Found 33 images belonging to 1 classes.
Found 38 images belonging to 1 classes.
Found 98 images belonging to 1 classes.
然后训练到一半出现了以下错误讯息,要怎么解决,把代码丢进去chatgpt或是网上发现有类似的解决方法,但是添加以下gpu之类的代码后和调整batch,问题仍然没有解决
(These codes were found from ChatGPT and other people's inquiries.)
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
error message
NotFoundError
No algorithm worked!
[[node model/conv2d/Relu (defined at C:\Labbb\unet_mao0\main.py:87) ]] [Op:__inference_train_function_2655]
Function call stack:
train_function
File "C:\Labbb\unet_mao0\main.py", line 87, in <module>
main.py
folders = [
'data/syu/train1day/auto_correlation1_reverse',
'data/syu/train1day/energy1',
'data/syu/train1day/entropy1_reverse',
'data/syu/train1day/homogeneity1',
'data/syu/train1day/temprature',
'data/syu/train1day/clahe1',
train2day train3day....
'data/syu/train7day/auto_correlation7_reverse',
'data/syu/train7day/energy7',
'data/syu/train7day/entropy7_reverse',
'data/syu/train7day/homogeneity7',
'data/syu/train7day/temprature',
'data/syu/train7day/clahe7'
]
myGene = trainGenerator(4, folders,'image','label',data_gen_args,save_to_dir = None)
model = unet(input_size=(512, 512, 8))
model_checkpoint = ModelCheckpoint('weight/unet_8channels.hdf5', monitor='loss',verbose=1, save_best_only=True)
train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])_____line87
以下是完整代码
main.py
from model import *
from data import *
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
def show_train_history(train_history, train,name):
plt.plot(train_history.history[train])
plt.title(name)
plt.ylabel('train')
plt.xlabel('Epoch')
plt.legend(['train'], loc='center right')
#plt.show()
plt.savefig(name+".png")
plt.close()
acc_name ="Accuracy"
loss_name="Loss"
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
# 訓練資料路徑列表
folders = [
'data/syu/train1day/auto_correlation1_reverse',
'data/syu/train1day/energy1',
'data/syu/train1day/entropy1_reverse',
'data/syu/train1day/homogeneity1',
'data/syu/train1day/temprature',
'data/syu/train1day/clahe1',
'data/syu/train2day/auto_correlation2_reverse',
'data/syu/train2day/energy2',
'data/syu/train2day/entropy2_reverse',
'data/syu/train2day/homogeneity2',
'data/syu/train2day/temprature',
'data/syu/train2day/clahe2',
'data/syu/train3day/auto_correlation3_reverse',
'data/syu/train3day/energy3',
'data/syu/train3day/entropy3_reverse',
'data/syu/train3day/homogeneity3',
'data/syu/train3day/temprature',
'data/syu/train3day/clahe3',
'data/syu/train4day/auto_correlation4_reverse',
'data/syu/train4day/energy4',
'data/syu/train4day/entropy4_reverse',
'data/syu/train4day/homogeneity4',
'data/syu/train4day/temprature',
'data/syu/train4day/clahe4',
'data/syu/train5day/auto_correlation5_reverse',
'data/syu/train5day/energy5',
'data/syu/train5day/entropy5_reverse',
'data/syu/train5day/homogeneity5',
'data/syu/train5day/temprature',
'data/syu/train5day/clahe5',
'data/syu/train6day/auto_correlation6_reverse',
'data/syu/train6day/energy6',
'data/syu/train6day/entropy6_reverse',
'data/syu/train6day/homogeneity6',
'data/syu/train6day/temprature',
'data/syu/train6day/clahe6',
'data/syu/train7day/auto_correlation7_reverse',
'data/syu/train7day/energy7',
'data/syu/train7day/entropy7_reverse',
'data/syu/train7day/homogeneity7',
'data/syu/train7day/temprature',
'data/syu/train7day/clahe7'
]
myGene = trainGenerator(4, folders,'image','label',data_gen_args,save_to_dir = None)
#預測的話這行也要
model = unet(input_size=(512, 512, 8))
#model = unet()
#model_checkpoint = ModelCheckpoint('unet_membrane_610_2.hdf5', monitor='loss',verbose=1, save_best_only=True)
###model_checkpoint = ModelCheckpoint('weight/unet_merge6.hdf5', monitor='loss',verbose=1, save_best_only=True)
model_checkpoint = ModelCheckpoint('weight/unet_8channels.hdf5', monitor='loss',verbose=1, save_best_only=True)
train_history=model.fit_generator(myGene,steps_per_epoch=200,epochs=20,callbacks=[model_checkpoint])
#load_weights訓練好的model參數
#model.load_weights('unet_membrane.hdf5')
#印出訓練過程中的Accuracy和Loss
acc_name ="Accuracy"
loss_name="Loss"
show_train_history(train_history, '1accuracy',acc_name)
show_train_history(train_history, 'loss',loss_name)
#testGene = testGenerator("data/membrane/test/image")
#results = model.predict_generator(testGene,116,verbose=1)
#saveResult("data/membrane/test/pred",results)
#預測,第一個需要預測檔案的位置,第二個是預測後要放到的位置,第三個是模型,第四個是幾張要預測的影像張數
#predict_RGBcolor_img("data/membrane/test/image","data/membrane/test/pred",model,117)
model.py
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import backend as keras
#chatgpt 八通道
def unet(pretrained_weights=None, input_size=(512, 512, 8)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
#conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9) # 輸出通道數改為8
conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy']) # 使用分類交叉熵作為損失函數
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
data.py
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
import skimage.transform as trans
import cv2
from matplotlib import pyplot as plt
from skimage import img_as_ubyte
import tensorflow as tf
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def normalized_the_gray_value(img):
gray_max=img.max()
gray_min=img.min()
normalized_img= ( (img-gray_min) / (gray_max-gray_min) )*255
return normalized_img
# change the img to YUV and normalized the Y value
def RGB_Y_normalized(img,gamma_value):
#print(img_out)
#print(gamma_value)
YUV = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
Y, U, V = cv2.split(YUV)
#print(y)
y_max=Y.max()
y_min=Y.min()
#print(y_min)
#print(y_max)
temp_y= ( ( ( Y-y_min) / ( y_max - y_min ) ) **gamma_value )*255
# Y normalized and return
#YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
#YUV_array = np.zeros((img.shape[0], img.shape[1], 3))
YUV_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
YUV_array[:, :, 0], YUV_array[:, :, 1], YUV_array[:, :, 2] = temp_y, YUV[:,:,1], YUV[:,:,2]
#print(YCrCb_array[:, :, 0])
# return the YUV channel to RGB channel image array
#print(np.shape(YUV_array))
final_rgb=cv2.cvtColor(YUV_array, cv2.COLOR_YUV2BGR)
return final_rgb
# change the img to YCbCr and normalized the Y value
def YCbCr_normalized(img,gamma_value):
#print(img_out)
#print(gamma_value)
YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
Y, Cr, Cb = cv2.split(YCrCb)
#print(y)
y_max=Y.max()
y_min=Y.min()
#print(y_min)
#print(y_max)
temp_y= ( ( ( Y-y_min) / ( y_max - y_min ) ) **gamma_value )*255
# Y normalized and return
#YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3), "uint8")
YCrCb_array = np.zeros((img.shape[0], img.shape[1], 3))
YCrCb_array[:, :, 0], YCrCb_array[:, :, 1], YCrCb_array[:, :, 2] = temp_y, YCrCb[:,:,1], YCrCb[:,:,2]
#print(YCrCb_array[:, :, 0])
return YCrCb_array
def adjustData(img,mask,flag_multi_class,num_class):
if(flag_multi_class):
img = img / 255
mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
#for one pixel in the image, find the class in mask and convert it into one-hot vector
#index = np.where(mask == i)
#index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
#new_mask[index_mask] = 1
new_mask[mask == i,i] = 1
new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
mask = new_mask
elif(np.max(img) > 1):
img = img / 255
mask = mask /255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img,mask)
def trainGenerator(batch_size, train_paths, image_folder, mask_folder, aug_dict, image_color_mode="rgba",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
flag_multi_class=True, num_class = 6, save_to_dir="data_train/test/", target_size=(512, 512), seed=1):
'''
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
'''
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generators = []
mask_generators = []
for train_path in train_paths:
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed
)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed
)
image_generators.append(image_generator)
mask_generators.append(mask_generator)
train_generators = zip(*image_generators, *mask_generators)
for data in train_generators:
images = []
masks = []
for i in range(len(train_paths)):
img, mask = data[i], data[i + len(train_paths)]
img, mask = adjustData(img, mask, flag_multi_class, num_class)
images.append(img)
masks.append(mask)
img_batch = np.concatenate(images, axis=3)
#mask_batch = np.concatenate(masks, axis=3)
mask_batch = np.concatenate(masks, axis=2)
yield (img_batch, mask_batch)
save_to_dir = "C:/Labbb" # 橘色字部分 可視化結果
def testGenerator(test_path,num_image = 30,target_size = (512,512),flag_multi_class = True,as_gray = True):
for i in range(num_image):
img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = False)
img = img / 255
img = trans.resize(img,target_size)
#img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img
img = np.reshape(img,(1,)+img.shape)
yield img
def geneTrainNpy(image_path,mask_path,gamma_value,flag_multi_class = True,num_class = 6,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
# image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))
image_name_arr = glob.glob(os.path.join(image_path,"*.png"))
# mask name array ---- get the mask path
mask_name_arr = glob.glob(os.path.join(mask_path,"*.png"))
target_size = (512,512)
image_arr = []
mask_arr = []
for index,item in enumerate(image_name_arr):
#img = io.imread(item,as_gray = image_as_gray)
img = cv2.imdecode(np.fromfile(item, dtype=np.uint8), 1)
#plt.subplot(1, 2, 1)
#plt.title('img')
#plt.imshow(img)
# Rerurn the Y normalized rgb channel array
img =RGB_Y_normalized(img,gamma_value)
#print("take RGB to YCbCr " ,end=" ")
# resize the image
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
#img = trans.resize(img,target_size)
#img = normalized_the_gray_value(img)
#plt.subplot(1, 2, 2)
#plt.title('img')
#plt.imshow(img)
#plt.imshow(img,cmap='gray')
img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
#mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)
mask = cv2.imdecode(np.fromfile(mask_name_arr[index], dtype=np.uint8), 0)
#mask = io.imread(item,as_gray = mask_as_gray)
# resize the mask
mask = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_LINEAR)
#mask = trans.resize(mask,target_size)
#plt.subplot(1, 2, 2)
#plt.title('mask')
#plt.imshow(mask,cmap='gray')
mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
#plt.show()
#print(index ,item , mask_name_arr[index],img.shape)
img,mask = adjustData(img,mask,flag_multi_class,num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr,mask_arr
'''def labelVisualize(num_class,color_dict,img):
img = img[:,:,0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i,:] = color_dict[i]
return img_out / 255'''
'''def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2):
for i,item in enumerate(npyfile):
img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
io.imsave(os.path.join(save_path,"%d_predict.jpg"%i),img)'''
def predict_RGBcolor_img(test_path,save_path,model,test_pic_num):
target_size = (512,512)
files_name=os.listdir(test_path)
for i in range(len(files_name)):
img = io.imread(os.path.join(test_path,"%s"%files_name[i]))
#print(files_name)
# io.imsave(os.path.join(save_path,"%s"%files_name[i]),img_as_ubyte(img))
img = img / 255
img = trans.resize(img,target_size)
img = np.reshape(img,(1,)+img.shape)
results = model.predict(img)
a=results[0][:,:,0]
'''for i in range(len(a)):
for j in range(len(a[i])):
if a[i][j]>0.5:
a[i][j]=255
else:
a[i][j]=0'''
a[a<=0.5]=0
a[a>0.5]=255
a=a.astype(np.uint8)
#print(a.shape)
#a[a<0.5]=0
#a[a>=0.5]=255
#plt.imshow(results[0][:,:,0],cmap="gray")
#plt.show()
io.imsave(os.path.join(save_path,"%s"%files_name[i]),img_as_ubyte(a))
不知道你这个问题是否已经解决, 如果还没有解决的话:我能够尝试帮您解决这个问题。首先,关于如何将所有资料分成七类,您可以采用以下步骤:
下面是代码示例:
import csv
import pandas as pd
# 七个类别名称
classes = ['class1', 'class2', 'class3', 'class4', 'class5', 'class6', 'class7']
# 读取CSV文件
with open('data.csv') as csv_file:
csv_reader = csv.reader(csv_file)
# 用pandas加载数据
df = pd.read_csv(csv_file)
# 遍历所有数据,将其分配到相应的类别列表
for row in csv_reader:
label = row[0] # 数据标签
idx = classes.index(label) # 获取标签在列表中的位置
class_list[idx].append(row) # 将数据添加到相应的类别列表中
# 输出每个类别的数据长度
for i in range(len(classes)):
print(f"Number of samples for {classes[i]}: {len(class_list[i])}")
关于在训练中出现“no algorithm worked”错误,可能有多种原因。以下是一些您可以尝试的解决方法:
torch.cuda.is_available()
函数检查GPU是否可用。import torch.optim as optim
optimizer = optim.Adam(model.parameters(), lr=0.0001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
如果以上方法均无效,请在提供的代码之上添加错误处理代码,以了解哪一行代码导致了错误。
希望这些建议能够帮助您解决问题。