关于#ACGAN#的问题,如何解决?(语言-python)


from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential, layers
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.utils import to_categorical
import glob
import os
import PIL
import time

from IPython import display

def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 28, 28, 3)

    return model

def make_discriminator_model():
    model = tf.keras.Sequential()
    model.add(layers.Conv2D(3, (5, 5), strides=(2, 2), padding='same',
                                     input_shape=[28, 28, 3]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model

generator = make_generator_model()
discriminator = make_discriminator_model()
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

checkpoint_dir = 'F:/111biyesheji/DeepLearning/DeepLearning-GANs/training_checkpoints-GAN'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
                                 discriminator_optimizer=discriminator_optimizer,
                                 generator=generator,
                                 discriminator=discriminator)

checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()

noi = tf.random.normal([1, 100])
sample = checkpoint.generator(noi, training=False)
plt.imshow(sample[0, :, :, 0], cmap='gray')
# plt.show()

disc = checkpoint.discriminator
disc.summary()

val = disc(sample)
print (val)

new_model = Sequential()

for i in range(len(disc.layers) - 1):
    new_model.add(disc.layers[i])

# Freeze the layers
for layer in new_model.layers:
    layer.trainable = False

new_model.add(Dense(10, activation='softmax'))
new_model.summary()

new_model.layers[-1].weights

train_img = np.load('F:/111biyesheji/DeepLearning/DeepLearning-GANs/NUM/train-f.npy')
train_lb = np.load('F:/111biyesheji/DeepLearning/DeepLearning-GANs/NUM/train-t.npy')
test_img = np.load('F:/111biyesheji/DeepLearning/DeepLearning-GANs/NUM/test-f.npy')
test_lb = np.load('F:/111biyesheji/DeepLearning/DeepLearning-GANs/NUM/test-t.npy')

print(train_img.shape, train_lb.shape, test_img.shape, test_lb.shape, '\n')

# set random seed
np.random.seed(42)
trainsize = 1000  # number of training images we use, selected randomly
randtrain = np.random.choice(train_img.shape[0], trainsize, replace=True)
X_train, y_train = train_img[randtrain], train_lb[randtrain]

np.random.seed(10)
testsize = 1000  # number of testing images we use, selected randomly
randtest = np.random.choice(test_img.shape[0], testsize, replace=True)
X_test, y_test = test_img[randtest], test_lb[randtest]

print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)

train_images = X_train.reshape((trainsize, 28, 28, 3))
train_images = (train_images.astype('float32') - 127.5) / 127.5

test_images = X_test.reshape((testsize, 28, 28, 3))
test_images = (test_images.astype('float32') - 127.5) / 127.5

train_labels = to_categorical(y_train)
test_labels = to_categorical(y_test)

new_model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

history = new_model.fit(train_images, train_labels, batch_size=100, epochs=15,
                        validation_data=(test_images, test_labels))

plt.figure()
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.3, 1])
plt.legend(loc='lower right')
plt.show()
C:\Acacondon\install\envs\tf2.3\python.exe F:\111biyesheji\DeepLearning\DeepLearning-GANs\debug22.py 
2023-03-18 02:07:39.331593: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations:  AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-03-18 02:07:39.338684: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x230452c7ca0 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2023-03-18 02:07:39.338931: I tensorflow/compiler/xla/service/service.cc:176]   StreamExecutor device (0): Host, Default Version
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 14, 14, 3)         228       
_________________________________________________________________
leaky_re_lu_3 (LeakyReLU)    (None, 14, 14, 3)         0         
_________________________________________________________________
dropout (Dropout)            (None, 14, 14, 3)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 7, 7, 128)         9728      
_________________________________________________________________
leaky_re_lu_4 (LeakyReLU)    (None, 7, 7, 128)         0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 7, 7, 128)         0         
_________________________________________________________________
flatten (Flatten)            (None, 6272)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 6273      
=================================================================
Total params: 16,229
Trainable params: 16,229
Non-trainable params: 0
_________________________________________________________________
tf.Tensor([[0.23519428]], shape=(1, 1), dtype=float32)
Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 14, 14, 3)         228       
_________________________________________________________________
leaky_re_lu_3 (LeakyReLU)    (None, 14, 14, 3)         0         
_________________________________________________________________
dropout (Dropout)            (None, 14, 14, 3)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 7, 7, 128)         9728      
_________________________________________________________________
leaky_re_lu_4 (LeakyReLU)    (None, 7, 7, 128)         0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 7, 7, 128)         0         
_________________________________________________________________
flatten (Flatten)            (None, 6272)              0         
_________________________________________________________________
dense_2 (Dense)              (None, 10)                62730     
=================================================================
Total params: 72,686
Trainable params: 62,730
Non-trainable params: 9,956
_________________________________________________________________
(3568, 28, 28, 3) (3568, 2) (1185, 28, 28, 3) (1185, 2) 

(1000, 28, 28, 3) (1000, 2) (1000, 28, 28, 3) (1000, 2)
Epoch 1/15
Traceback (most recent call last):
  File "F:\111biyesheji\DeepLearning\DeepLearning-GANs\debug22.py", line 127, in <module>
    validation_data=(test_images, test_labels))
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
    return method(self, *args, **kwargs)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
    tmp_logs = train_function(iterator)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
    result = self._call(*args, **kwds)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\def_function.py", line 823, in _call
    self._initialize(args, kwds, add_initializers_to=initializers)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\def_function.py", line 697, in _initialize
    *args, **kwds))
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\function.py", line 2855, in _get_concrete_function_internal_garbage_collected
    graph_function, _, _ = self._maybe_define_function(args, kwargs)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\function.py", line 3213, in _maybe_define_function
    graph_function = self._create_graph_function(args, kwargs)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\function.py", line 3075, in _create_graph_function
    capture_by_value=self._capture_by_value),
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\framework\func_graph.py", line 986, in func_graph_from_py_func
    func_outputs = python_func(*func_args, **func_kwargs)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\eager\def_function.py", line 600, in wrapped_fn
    return weak_wrapped_fn().__wrapped__(*args, **kwds)
  File "C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\framework\func_graph.py", line 973, in wrapper
    raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:

    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function  *
        return step_function(self, iterator)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
        return fn(*args, **kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step  **
        outputs = model.train_step(data)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\training.py:749 train_step
        y, y_pred, sample_weight, regularization_losses=self.losses)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\engine\compile_utils.py:204 __call__
        loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\losses.py:149 __call__
        losses = ag_call(y_true, y_pred)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\losses.py:253 call  **
        return ag_fn(y_true, y_pred, **self._fn_kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
        return target(*args, **kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\losses.py:1535 categorical_crossentropy
        return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
        return target(*args, **kwargs)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\keras\backend.py:4687 categorical_crossentropy
        target.shape.assert_is_compatible_with(output.shape)
    C:\Acacondon\install\envs\tf2.3\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1134 assert_is_compatible_with
        raise ValueError("Shapes %s and %s are incompatible" % (self, other))

    ValueError: Shapes (100, 2, 3563) and (100, 10) are incompatible


进程已结束,退出代码1



在做ACGAN的图像分类,不是很理解,好像是分类器的问题。恳请详细解答

Shapes (100, 2, 3563) and (100, 10) are incompatible
数据维度不匹配

该回答引用GPTᴼᴾᴱᴺᴬᴵ
这里的问题是如何解决ACGAN?它需要的是一个条件生成对抗网络,可以在生成的图像中添加标签信息。您的代码示例中似乎没有实现ACGAN。以下是一个示例ACGAN代码的伪代码:

# 生成器
def make_generator_model():
    # 添加输入的标签
    label = layers.Input(shape=(1,), dtype='int32')
    emb = layers.Embedding(num_classes, latent_size)(label)

    # 添加噪声向量
    noise = layers.Input(shape=(latent_size,))

    # 将标签和噪声向量连接起来
    model_input = layers.multiply([noise, emb])

    # 添加生成器网络层
    # ...

    # 输出生成的图像
    return Model([noise, label], generated_image)

# 判别器
def make_discriminator_model():
    # 添加输入的标签
    label = layers.Input(shape=(1,), dtype='int32')
    emb = layers.Embedding(num_classes, image_size * image_size)(label)
    emb = layers.Reshape((image_size, image_size, 1))(emb)

    # 添加图像输入
    image = layers.Input(shape=(image_size, image_size, channels))

    # 将标签和图像连接起来
    model_input = layers.concatenate([image, emb])

    # 添加判别器网络层
    # ...

    # 输出判别结果
    return Model([image, label], validity)

# 定义模型
generator = make_generator_model()
discriminator = make_discriminator_model()

# 编译判别器
discriminator.compile(optimizer=optimizer,
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

# 定义合并模型,用于训练生成器
z = layers.Input(shape=(latent_size,))
label = layers.Input(shape=(1,))
img = generator([z, label])
valid = discriminator([img, label])

combined = Model([z, label], valid)

combined.compile(optimizer=optimizer,
                 loss='binary_crossentropy')

您需要将此代码集成到您的代码中,并进行相应的修改以适应您的数据集和其他参数。