tensorflow2.0坑,求解救cannot  interpreted

 shape=(None, 139, 139, 3)cannot  interpreted as a Tensor.

 

 

 

 

标题

 

import tensorflow.compat.v1 as tf
import sys
import glob
tf.compat.v1.disable_eager_execution()
tf.reset_default_graph()
epochs = 15
batch_size = 32
total_sum = 0
epoch = 0
random_ratio = 0.05

filenames_train=['face_train_{}.tfrecords'.format(i) for i in range(5)]
filenames_test=['face_test_{}.tfrecords'.format(i) for i in range(5)]

test_num = sum([int(len(glob.glob('./face/{}_face/*.png'.format(i)))*random_ratio) for i in range(5)])
train_num = sum([len(glob.glob('./face/{}_face/*.png'.format(i))) for i in range(5)])-test_num

next_eleement_train=dataset(filenames_train,batch_size=batch_size,epochs=epochs)
next_element_trest=dataset(filenames_test,batch_size=batch_size,epochs=1)
print(next_eleement_train[1])

input_data = tf.placeholder(tf.float32,shape=(None,139,139,3))
#input_data = tf.placeholder(tf.float32,shape=([139,139]))
input_label = tf.placeholder(tf.float32,shape=(None,5))
#139
hidden = tf.keras.layers.Conv2D(filters=16,kernel_size=3,strides=1,padding='valid',activation='relu')(input_data)
#137
hidden = tf.keras.layers.MaxPool2D(pool_size=2)(hidden)
#68
hidden = tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=2,padding='same',activation='relu')(hidden)
#34
hidden = tf.keras.layers.MaxPool2D(pool_size=2)(hidden)
#17
hidden = tf.keras.layers.Conv2D(filters=64,kernel_size=3,strides=2,padding='valid',activation='relu')(hidden)
#8
hidden = tf.keras.layers.MaxPool2D(pool_size=2)(hidden)
#4
hidden = tf.keras.layers.Conv2D(filters=128,kernel_size=3,strides=2,padding='valid',activation='relu')(hidden)
#1
hidden = tf.layers.Flatten()(hidden)

output = tf.keras.layers.Dense(5,activation='softmax')(hidden)

#损失函数
loss = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(input_label,output))

#优化器
opt = tf.train.AdamOptimizer()
train_op = opt.minimize(loss)

# 测试评估
acc = tf.reduce_mean(tf.keras.metrics.categorical_accuracy(input_label,output))
#correct_pred = tf.equal(tf.argmax(input_label,axis=1),tf.argmax(output,axis=1))
#acc = tf.reduce_mean(tf.cast(correct_pred,tf.float32))

tf.add_to_collection('my_op',input_data)
tf.add_to_collection('my_op',output)
tf.add_to_collection('my_op',loss)

init = tf.global_variables_initializer()
saver = tf.train.Saver()

with tf.Session()  as sess:
    sess.run([init])
    
    while epoch<epochs:
      data=sess.run([next_element_train[0]])
      label=sess.run([next_element_train[1]])
      
      
      total_sum+=batch_size
      _,loss_val = sess.run([train_op,loss],feed_dict={input_data:data,input_label:label})
      if total_sum%20==0:
          sys.stdout.write('epoch:{},index:{}\\{},loss:{:.4f}\r'.format(epoch,total_sum%train_num,train_num,loss_val))
          sys.stdout.flush()
      if total_sum//train_num>epoch:
          epoch = total_sum//train_num
          loss_val = sess.run([loss],feed_dict={input_data:data,input_label:label})
          
          next_element_test=dataset(filenames_test,batch_size=batch_size,epochs=1)
          total_test_sum = 0
          acc_test_lst = []
          while total_test_sum<test_num:
              total_test_sum+=batch_size
              test_data,test_label=sess.run([next_element_test[0],next_element_test[1]])
              acc_test = sess.run([acc],feed_dict={input_data:test_data,input_label:test_label})
              acc_test_lst.append(acc_test[0])
          acc_val = sum(acc_test_lst)/len(acc_test_lst)
          saver.save(sess, save_path="./model/my_model.ckpt")
          print('epoch:{},train_loss:{:.4f},test_acc:{:.4f}'.format(epoch,loss_val[0],acc_val))