import tensorflow as tf
import matplotlib.pyplot as plt
import pathlib
import random
all_image_path=list('C:/Users/admin/Desktop/onlypic/'+df['FileName'].astype(str)+'.png')
all_image_label=list(df['label'])
image_count = len(all_image_path)
def load_preprosess_image(image_path,image_label):
#读取图片
img_raw = tf.io.read_file(image_path)
#img_raw=tf.gfile.FastGFile(image_path,'rb').read()
#解码图片
img_tensor = tf.image.decode_jpeg(img_raw,channels=3)
#统一图片大小
img_tensor = tf.image.resize(img_tensor,[256,256])
#转换数据类型
img_tensor = tf.cast(img_tensor, tf.float32)
#归一化
img = img_tensor/255
#将总标签列表中每个标签各自成列表
label = tf.reshape(image_label,[1])
return img,label
path_ds = tf.data.Dataset.from_tensor_slices((all_image_path,all_image_label))
dataset = path_ds.map(load_preprosess_image)
#划分训练集和测试集
#测试个数取整
test_count = int(image_count*0.2)
#训练个数
train_count = image_count - test_count
#训练数据集
train_dataset = dataset.skip(test_count)
#测试数据集
test_dataset = dataset.take(test_count)
#电脑性能好的话batch值可以大点
batch_size = 16
#训练数据集重复,乱序,规定batch值
train_dataset = train_dataset.repeat().shuffle(buffer_size=train_count).batch(batch_size)
#测试数据集不用过多处理
test_dataset = test_dataset.batch(batch_size)
covn_base = tf.keras.applications.VGG16(weights='imagenet',include_top=False)
model = tf.keras.Sequential()
model.add(covn_base)
#全局平均值化 降维
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(512,activation='relu'))
model.add(tf.keras.layers.Dense(1,activation='sigmoid'))
#训练模型
#预训练网络权重不被训练
covn_base.trainable = False
#没自定义训练,均默认
#配置优化器,损失函数,显示准确率
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc'])
#步长
steps_per_epoch = train_count//batch_size
validation_steps = test_count//batch_size
#记录数据
history = model.fit(train_dataset,epochs=10,steps_per_epoch=steps_per_epoch,validation_data=test_dataset,validation_steps=validation_steps)
#调用参数,绘制图型
plt.subplot(211)
plt.plot(history.epoch,history.history['acc'])
plt.plot(history.epoch,history.history['val_acc'])
plt.subplot(212)
plt.plot(history.epoch,history.history.get('loss'))
plt.plot(history.epoch,history.history.get('val_loss'))
plt.show()
#测试准确率高的话,保存一下,以备后有
model.save('destinguish_Cat_Ddo.h5')
代码是来自https://blog.csdn.net/weixin_48395629/article/details/107948076
我用的图片集是一些png图片,但是总是会报错,用其他的模型也经常是这样,想知道这个大概是什么原因,需要如何解决呢?
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-254-abce13a0b2bc> in <module>()
18
19 #记录数据
---> 20 history = model.fit(train_dataset,epochs=10,steps_per_epoch=steps_per_epoch,validation_data=test_dataset,validation_steps=validation_steps)
21
22 #调用参数,绘制图型
~\Anaconda31\lib\site-packages\tensorflow\python\keras\engine\training_v1.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
806 max_queue_size=max_queue_size,
807 workers=workers,
--> 808 use_multiprocessing=use_multiprocessing)
809
810 def evaluate(self,
~\Anaconda31\lib\site-packages\tensorflow\python\keras\engine\training_arrays_v1.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
662 validation_steps=validation_steps,
663 validation_freq=validation_freq,
--> 664 steps_name='steps_per_epoch')
665
666 def evaluate(self,
~\Anaconda31\lib\site-packages\tensorflow\python\keras\engine\training_arrays_v1.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs)
292 else:
293 actual_inputs = ins()
--> 294 batch_outs = f(actual_inputs)
295 except errors.OutOfRangeError:
296 if is_dataset:
~\Anaconda31\lib\site-packages\tensorflow\python\keras\backend.py in __call__(self, inputs)
3955
3956 fetched = self._callable_fn(*array_vals,
-> 3957 run_metadata=self.run_metadata)
3958 self._call_fetch_callbacks(fetched[-len(self._fetches):])
3959 output_structure = nest.pack_sequence_as(
~\Anaconda31\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
1480 ret = tf_session.TF_SessionRunCallable(self._session._session,
1481 self._handle, args,
-> 1482 run_metadata_ptr)
1483 if run_metadata:
1484 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd5 in position 105: invalid continuation byte
您好,请问解决了么