如何把全连接层加到卷积层之后,使用TensorFlow

本人尝试改变一张文章源代码的网络结构,原文是使用四个卷积层进行降噪,我想要把最后一个卷积层变成全连接层,但是我改后似乎输入流只经过了最后一层全连接层,并且输出结果损失函数也不是很好,实在是找不到问题所在,不知道怎么更改,希望各位同僚看一看,感谢万分
这是我的代码:


 def build_network(self,
                      built_for_training=False):  # build a network similar with SRCNN, a fully-convolutional netowrk for channel noise estimation
        # built_for_train: denote whether the network is build for training or test. If for test, xavier initialization is not needed since the model will be loaded later.

        x_in = tf.placeholder(tf.float32, [None, self.net_config.feature_length])  # input data # 576
        x_in_reshape = tf.reshape(x_in, (-1, self.net_config.feature_length, 1, 1))  # HY:576
        layer_output = {}

        for layer in range(self.net_config.total_layers):  # construct layers 4
            # 给每层命名
            self.conv_filter_name[layer] = format("conv_layer%d" % (layer))
            self.bias_name[layer] = format("b%d" % (layer))
            self.bia_name[layer] = format("b_01%d" % (layer))
            self.weight_name[layer] = format("w_01%d" % (layer))

            # 给每层设置输入输出维度
            if layer == 0:
                x_input = x_in_reshape
                in_channels = 1
            else:
                x_input = layer_output[layer - 1]
                # in_channels表示每一层的卷积核数量
                in_channels = self.net_config.feature_map_nums[layer - 1]  # np.array([64,32,16,1])
            out_channels = self.net_config.feature_map_nums[layer]  # np.array([64,32,16,1])

            shape = [self.net_config.filter_sizes[layer], 1, in_channels, out_channels]
            if built_for_training:
                # xavier initialization for training                                          # np.array([9,3,3,15]),另一维度为1
                # get_variable 获取已经存在的名为name的变量
                with tf.variable_scope("layer_%d" % layer):
                    self.conv_filter[layer] = tf.get_variable("w", shape,
                                                              tf.float32, tf.contrib.layers.xavier_initializer())
                    self.bias[layer] = tf.get_variable("b", shape[-1],
                                                       tf.float32, tf.contrib.layers.xavier_initializer())
                    self.bia[layer] = tf.get_variable("b_01", 576,
                                                       tf.float32, tf.contrib.layers.xavier_initializer())
                    self.weight[layer] = tf.get_variable("w_01", [576, 576],
                                                              tf.float32, tf.contrib.layers.xavier_initializer())
                # Variable 创建变量,定义初始值
                self.best_conv_filter[layer] = tf.Variable(
                    tf.ones([self.net_config.filter_sizes[layer], 1, in_channels, out_channels], tf.float32),
                    dtype=tf.float32)
                self.best_bias[layer] = tf.Variable(tf.ones([out_channels], tf.float32), dtype=tf.float32)
                self.best_bia[layer] = tf.Variable(tf.ones(576, tf.float32), dtype=tf.float32)
                self.best_weight[layer] = tf.Variable(tf.ones([576, 576], tf.float32), dtype=tf.float32)

                self.assign_best_conv_filter[layer] = self.best_conv_filter[layer].assign(self.conv_filter[layer])
                self.assign_best_bias[layer] = self.best_bias[layer].assign(self.bias[layer])
                self.assign_best_bia[layer] = self.best_bia[layer].assign(self.bia[layer])
                self.assign_best_weight[layer] = self.best_weight[layer].assign(self.weight[layer])
            else:
                # just build tensors for testing and their values will be loaded later.
                self.conv_filter[layer] = tf.Variable(
                    tf.random_normal([self.net_config.filter_sizes[layer], 1, in_channels, out_channels], 0, 1,
                                     tf.float32), dtype=tf.float32,
                    name=self.conv_filter_name[layer])
                self.weight[layer] = tf.Variable(tf.random_normal([576,576], 0, 1,
                                     tf.float32), dtype=tf.float32,
                    name=self.weight_name[layer])
                self.bias[layer] = tf.Variable(tf.random_normal([out_channels], 0, 1, tf.float32), dtype=tf.float32,
                                               name=self.bias_name[layer])
                self.bia[layer] = tf.Variable(tf.random_normal([576], 0, 1, tf.float32), dtype=tf.float32,
                                               name=self.bia_name[layer])
#      层定义
            if layer == self.net_config.total_layers - 1:
                # 原代码
                # layer_output[layer] = tf.nn.conv2d(x_input, self.conv_filter[layer], [1, 1, 1, 1], 'SAME') + self.bias[
                #     layer]
                x_in = tf.reshape(x_in, [-1, 576])
                layer_output[layer] = tf.nn.relu(tf.matmul(x_in, self.weight[layer]) + self.bia[layer])

            else:
                # Activation Function
                layer_output[layer] = tf.nn.relu(
                    tf.nn.conv2d(x_input, self.conv_filter[layer], [1, 1, 1, 1], 'SAME') + self.bias[layer])
              



        y_out = layer_output[self.net_config.total_layers - 1]
        y_out = tf.reshape(y_out, (-1, self.net_config.label_length))  # +x_in# resnet ?,576


        return x_in, y_out
# 存储训练    
def save_network_temporarily(self, sess_in):
        for layer in range(self.net_config.save_layers):
            sess_in.run(self.assign_best_conv_filter[layer])
            sess_in.run(self.assign_best_bias[layer])
            sess_in.run(self.assign_best_weight[layer])
            sess_in.run(self.assign_best_bia[layer])