TypeError: Failed to convert object of type <class 'tensorflow.python.keras.engine.sequential.Sequential'> to Tensor. Contents: <tensorflow.python.keras.engine.sequential.Sequential object at 0x0000011BE6CC07B8>. Consider casting elements to a supported type.
```import warnings
warnings.filterwarnings('ignore')
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dropout, Flatten, Dense,BatchNormalization,LeakyReLU,Reshape,Conv2DTranspose
from tensorflow.keras import backend as K
from keras.optimizers import SGD
from keras.models import load_model
import matplotlib.pyplot as plt
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
K.image_data_format() == 'channels_first'
from keras.utils import np_utils
def test_model_generator(input_shape=None):
model = Sequential()
model.add(Dense(19*19*32, use_bias=False, input_shape=(100,)))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Reshape((19, 19, 32))) # 8*8*256
model.add(Conv2DTranspose(32, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(LeakyReLU()) # 8*8*128
model.add(Conv2DTranspose(16, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(LeakyReLU()) # 16*16*128
model.add(Conv2DTranspose(15, (1, 1), strides=(1, 1), padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(LeakyReLU()) # 32*32*32
model.add(Conv2DTranspose(15, (1, 1), strides=(1, 1), padding='same', use_bias=False, activation='tanh'))
# 64*64*3
return model
def test_model_discriminator(C1=None):
model = Sequential()
model.add(Conv2D(32, (5, 5), strides=(2, 2), padding='same',
input_shape=[19, 19, 15]))
model.add(LeakyReLU())
model.add(Dropout(0.3)) # 32*32*32
model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.3)) # 16*16*64
model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU())
# model.add(layers.Dropout(0.3)) # 8*8*128
model.add(Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU()) # 4*4*256
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(1))
return model
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def dis_loss(real_output,fake_output):
return tf.reduce_mean(fake_output)-tf.reduce_mean(real_output)
def gen_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output),fake_output)
@tf.function
def train_step(images):
noise=tf.random.normal([BATCH_SIZE,noise_dim])
with tf.GradientTape() as gen_tape,tf.GradientTape() as dis_tape:
generated_images=test_model_generator(noise)
real_output=test_model_discriminator(images)
fake_output=test_model_discriminator(generated_images)
print(generated_images)
print(real_output)
print(fake_output)
discriminator_loss=dis_loss(real_output,fake_output)
generator_loss=gen_loss(fake_output)
dis_grads=dis_tape.gradient(discriminator_loss,discriminator.trainable_variables)
gen_gards=gen_tape.gradient(generator_loss,generator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(dis_grads,discriminator.trainable_variables))
generator_optimizer.apply_gradients(zip(gen_gards,generator.trainable_variables))
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow((predictions[i, :, :, 0] + 1)/2, cmap='gray')
plt.axis('off')
plt.savefig('./image/image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
def train(dataset,epochs):
for epoch in range(epochs):
for image_batch in dataset:
train_step(image_batch)
print('.', end='')
print()
generate_and_save_images(test_model_generator,
epoch + 1,
seed)
generate_and_save_images(test_model_generator,
epochs,
seed)
if __name__ == '__main__':
windowSize = 19
numPCAcomponents = 15
testRatio = 0.30
BATCH_SIZE = 64
EPOCHS = 1000
noise_dim = 100
num_examples_to_generate = 16
X_train = np.load("G:/code/HSI-SVM-master/data/XtrainWindowSize"
+ str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
y_train = np.load("G:/code/HSI-SVM-master/data/ytrainWindowSize"
+ str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
X_test = np.load("G:/code/HSI-SVM-master/data/XtestWindowSize"
+ str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
y_test = np.load("G:/code/HSI-SVM-master/data/ytestWindowSize"
+ str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
# 转化为(num,channels,height,width)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], X_train.shape[3]))
X_train=tf.cast(X_train,dtype=tf.float32)
X_train=(X_train/127.5)-1
# print(X_train)
# print(X_train.shape)
# print(X_train.dtype)
train_datasets=tf.data.Dataset.from_tensor_slices(X_train).shuffle(1000).batch(BATCH_SIZE)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], X_test.shape[3]))
X_test=tf.cast(X_test,dtype=tf.float32)
# 转化标签
y_train = np_utils.to_categorical(y_train)
print(y_train.shape)
y_test = np_utils.to_categorical(y_test)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
generator = test_model_generator()
discriminator = test_model_discriminator()
# 定义输入的形状
input_shape = X_train[0].shape
print(input_shape)
C1 = 3 * numPCAcomponents
train(train_datasets, EPOCHS)
不知道你这个问题是否已经解决, 如果还没有解决的话: