在做CNN车牌识别训练时出现错误

错误内容如下:

During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "C:\Users\10488\Desktop\data\train.py", line 74, in <module>
    summary_str = sess.run(summary_op, feed_dict)

  File "D:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 967, in run
    result = self._run(None, fetches, feed_dict, options_ptr,

  File "D:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1190, in _run
    results = self._do_run(handle, final_targets, final_fetches,

  File "D:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1368, in _do_run
    return self._do_call(_run_fn, feeds, fetches, targets, options,

  File "D:\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1394, in _do_call
    raise type(e)(node_def, op, message)

InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_5' with dtype float
     [[node Placeholder_5 (defined at C:\Users\10488\Desktop\data\train.py:30) ]]

错误代码如下:

with tf.compat.v1.Session() as sess:
    sess.run(init_op)
    
    train_writer = tf.compat.v1.summary.FileWriter(logs_path, sess.graph)
    saver = tf.compat.v1.train.Saver()

    start_time1 = time.time()
    for step in range(epoch):
        # 生成车牌图像以及标签数据
        img_batch, lbl_batch = get_batch()

        start_time2 = time.time()
        time_str = datetime.datetime.now().isoformat()

        feed_dict = {image_holder:img_batch, label_holder:lbl_batch, keep_prob:0.6}
        _1, _2, _3, _4, _5, _6, _7, ls1, ls2, ls3, ls4, ls5, ls6, ls7, acc = sess.run(
            [train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7, 
             loss1, loss2, loss3, loss4, loss5, loss6, loss7, accuracy], feed_dict)
        summary_str = sess.run(summary_op, feed_dict)
        train_writer.add_summary(summary_str,step)
        duration = time.time() - start_time2
        loss_total = ls1 + ls2 + ls3 + ls4 + ls5 + ls6 + ls7
        if step % 10 == 0:
            sec_per_batch = float(duration)
            print('%s: Step %d, loss_total = %.2f, acc = %.2f%%, sec/batch = %.2f' %
                (time_str, step, loss_total, acc * 100, sec_per_batch))
        if step % 5000 == 0 or (step + 1) == epoch:
            checkpoint_path = os.path.join(model_path,'./model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
    end_time = time.time()
    print("Training over. It costs {:.2f} minutes".format((end_time - start_time1) / 60))

summary_str = sess.run(summary_op, feed_dict)

feed_dict 缺少参数,补充 keep_prob:0.5

summary_str = sess.run(summary_op, feed_dict= {keep_prob: 0.5})

 

import os
import time
import datetime
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from input_data import OCRIter
import model

os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'

img_h = 72
img_w = 272
num_label = 7
batch_size = 32
epoch = 10000
learning_rate = 0.0001

logs_path = 'logs\\1005'
model_path = 'saved_model\\1005'

image_holder = tf.compat.v1.placeholder(tf.float32, [batch_size, img_h, img_w, 3])
label_holder = tf.compat.v1.placeholder(tf.int32, [batch_size, 7])
keep_prob = tf.compat.v1.placeholder(tf.float32)


def get_batch():
    data_batch = OCRIter(batch_size, img_h, img_w)
    image_batch, label_batch = data_batch.iter()
    return np.array(image_batch), np.array(label_batch)


logit1, logit2, logit3, logit4, logit5, logit6, logit7 = model.cnn_inference(
    image_holder, keep_prob)

loss1, loss2, loss3, loss4, loss5, loss6, loss7 = model.calc_loss(
    logit1, logit2, logit3, logit4, logit5, logit6, logit7, label_holder)

train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7 = model.train_step(
    loss1, loss2, loss3, loss4, loss5, loss6, loss7, learning_rate)

accuracy = model.pred_model(logit1, logit2, logit3, logit4, logit5, logit6, logit7, label_holder)

input_image=tf.compat.v1.summary.image('input', image_holder)

summary_op = tf.compat.v1.summary.merge(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SUMMARIES))

init_op = tf.compat.v1.global_variables_initializer()

with tf.compat.v1.Session() as sess:
    sess.run(init_op)
    
    train_writer = tf.compat.v1.summary.FileWriter(logs_path, sess.graph)
    saver = tf.compat.v1.train.Saver()

    start_time1 = time.time()
    for step in range(epoch):
        # 生成车牌图像以及标签数据
        img_batch, lbl_batch = get_batch()

        start_time2 = time.time()
        time_str = datetime.datetime.now().isoformat()

        feed_dict = {image_holder:img_batch, label_holder:lbl_batch, keep_prob:0.6}
        _1, _2, _3, _4, _5, _6, _7, ls1, ls2, ls3, ls4, ls5, ls6, ls7, acc = sess.run(
            [train_op1, train_op2, train_op3, train_op4, train_op5, train_op6, train_op7, 
             loss1, loss2, loss3, loss4, loss5, loss6, loss7, accuracy], feed_dict)
        summary_str = sess.run(summary_op, feed_dict)
        train_writer.add_summary(summary_str,step)
        duration = time.time() - start_time2
        loss_total = ls1 + ls2 + ls3 + ls4 + ls5 + ls6 + ls7
        if step % 10 == 0:
            sec_per_batch = float(duration)
            print('%s: Step %d, loss_total = %.2f, acc = %.2f%%, sec/batch = %.2f' %
                (time_str, step, loss_total, acc * 100, sec_per_batch))
        if step % 5000 == 0 or (step + 1) == epoch:
            checkpoint_path = os.path.join(model_path,'./model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
    end_time = time.time()
    print("Training over. It costs {:.2f} minutes".format((end_time - start_time1) / 60))

这是train.py的整段代码

您好,我是有问必答小助手,您的问题已经有小伙伴解答了,您看下是否解决,可以追评进行沟通哦~

如果有您比较满意的答案 / 帮您提供解决思路的答案,可以点击【采纳】按钮,给回答的小伙伴一些鼓励哦~~

ps:问答VIP仅需29元,即可享受5次/月 有问必答服务,了解详情>>>https://vip.csdn.net/askvip?utm_source=1146287632

您好,我是有问必答小助手,您的问题已经有小伙伴帮您解答,感谢您对有问必答的支持与关注!
PS:问答VIP年卡 【限时加赠:IT技术图书免费领】,了解详情>>> https://vip.csdn.net/askvip?utm_source=1146287632