函数方式建模精度就下不去

问题遇到的现象和发生背景

Tensorflow为什么使用类方式建模改成函数方式建模精度就下不去了

问题相关代码,请勿粘贴截图

这是类方法建模 Resnet50的

class ResNet(Model):
    def get_config(self):
        config = {
            "block_layers": self.block_layers,
            "base_filters": self.base_filters,
            "block_type": self.block_type
        }
        base_config = super(ResNet, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def __init__(self, block_layers, name="resnet", base_filters=64, block_type='deep'):
        """
        pa
        :param block_layers:
        :param base_filters:
        :param block_type: deep or simple
        """
        super(ResNet, self).__init__(name=name)
        self.block_layers = block_layers
        self.base_filters = base_filters
        self.block_type = block_type
        self.feature = Sequential([
            layers.Conv2D(filters=base_filters, kernel_size=7, strides=2),
            layers.BatchNormalization(),
            layers.ReLU(),
            layers.MaxPool2D(pool_size=3, strides=2),
        ])
        nlb_dims = base_filters * 4
        if block_type == "deep":
            nlb_dims = base_filters * 16
        self.block1 = build_block(base_filters, block_layers[0], init_stride=1, block_type=block_type,
                                  is_downsample=True, name="res_block_1")
        self.block2 = build_block(base_filters * 2, block_layers[1], block_type=block_type, name='res_block_2')
        self.block3 = build_block(base_filters * 4, block_layers[2], block_type=block_type, name='res_block_3')
        # 非局部网络
        self.nlb = NonLocalBlock(input_dims=nlb_dims)
        self.block4 = build_block(base_filters * 8, block_layers[3], block_type=block_type, name='res_block_4')

    def call(self, inputs, training=None, mask=None):
        x = inputs
        x = self.feature(x)
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.nlb(x)
        x = self.block4(x)
        return x

这是方法建模

def resnet(base_filters=64, block_layers=(3, 4, 6, 3), block_type="deep", se=True, name='resnet', inputs=None):
    if inputs is None:
        inputs = layers.Input(shape=(128, 128, 2), dtype=tf.float32)
    nlb_dims = base_filters * 4
    if block_type == "deep":
        nlb_dims = base_filters * 16

    x = layers.Conv2D(filters=base_filters, kernel_size=7, strides=2)(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)
    x = build_block(base_filters, block_layers[0], init_stride=1, block_type=block_type, se=se, is_downsample=True,
                    name="res_block_1")(x)
    x = build_block(base_filters * 2, block_layers[1], block_type=block_type, se=se, name='res_block_2')(x)
    x = build_block(base_filters * 4, block_layers[2], block_type=block_type, se=se, name='res_block_3')(x)
    x = NonLocalBlock(input_dims=nlb_dims)(x)
    x = build_block(base_filters * 8, block_layers[3], block_type=block_type, se=se, name='res_block_4')(x)

    return Model(inputs, x, name=name)
运行结果及报错内容 、

上一个下降的很快,但是这个就很慢

因为这一行:

base_config = super(ResNet, self).get_config()