用一个函数作为网络的损失函数(直接输入两个numpy数据时候可以跑通),出现问题Cannot convert a symbolic Tensor to a numpy array。

问题遇到的现象和发生背景

在使用tensorflow编写MS-SSIM损失函数的时候发生报错。这个ms-ssim函数作为一个函数的时候可以跑的通,但是把他用作损失函数,输入参数改为y_true和y_pred并且使用model.compile时,就老会报错,要不然"Tensor" object has no attribute "size",要不然就是说Cannot convert a symbolic Tensor to a numpy array。

问题相关代码,请勿粘贴截图

##定义MS-SSIM损失函数
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
import tensorflow as tf

def _FSpecialGauss(size, sigma):
"Function to mimic the 'fspecial' gaussian MATLAB function."
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x2 + y2)/(2.0 * sigma**2)))
return g / g.sum()

def _SSIMForMultiScale(img1, img2, max_val=1, filter_size=1,filter_sigma=1.5, k1=0.01, k2=0.03):

"Return the Structural Similarity Map between `img1` and `img2`.
Arguments:
  img1: Numpy array holding the first RGB image batch.
  img2: Numpy array holding the second RGB image batch.
  max_val: the dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values).
  filter_size: Size of blur kernel to use (will be reduced for small images).
  filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced for small images).
  k1: Constant used to maintain stability in the SSIM calculation (0.01 in the original paper).
  k2: Constant used to maintain stability in the SSIM calculation (0.03 in the original paper).
Returns:
  Pair containing the mean SSIM and contrast sensitivity between `img1` and
  `img2`.
Raises:
  RuntimeError: If input images don't have the same shape or don't have four
  dimensions: [batch_size, height, width, depth].
"
#if img1.shape != img2.shape:
    #raise RuntimeError('Input images must have the same shape (%s vs. %s).',img1.shape, img2.shape)
if len(img1.shape.as_list()) != 4:
  raise RuntimeError('Input images must have four dimensions, not %d',
                   len(img1.shape.as_list()))

#img1 = img1.astype(np.float64)
img1 = img1
#img2 = img2.astype(np.float64)
img2 = img2
_, height, width, _ = img1.shape

# Filter size can't be larger than height or width of images.
size = filter_size                       #None与filter_size无法比较,但是知道我们的height是256,所以最小值就是filter_size
#size = min(filter_size, height, width)

# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0

#sess = tf.compat.v1.Session()
#img1 = img1.eval(session=sess)
#img2 = img2.eval(session=sess)

if filter_size:
    window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
    #mu1 = signal.fftconvolve(img1, window, mode='valid')
    mu1 = tf.nn.conv2d(img1, window, padding="VALID")
    #mu2 = signal.fftconvolve(img2, window, mode='valid')
    mu2 = tf.nn.conv2d(img2, window, padding="VALID")
    #sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
    sigma11 = tf.nn.conv2d(img1 * img1, window, padding="VALID")
    #sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
    sigma22 = tf.nn.conv2d(img2 * img2, window, padding="VALID")
    #sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
    sigma12 = tf.nn.conv2d(img1 * img2, window, padding="VALID")
else:
    # Empty blur kernel so no need to convolve.
    mu1, mu2 = img1, img2
    sigma11 = img1 * img1
    sigma22 = img2 * img2
    sigma12 = img1 * img2

mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12

# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
#ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
ssim = tf.reduce_mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
#cs = np.mean(v1 / v2)
cs = tf.reduce_mean(v1 / v2)
return ssim, cs

def MultiScaleSSIM(img1, img2, max_val=1, filter_size=1, filter_sigma=1.5,
k1=0.01, k2=0.03, weights=None): #filter一开始是11
"
Return the MS-SSIM score between img1 and img2.
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the maximum the and minimumallowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in the original paper).
weights: List of weights for each level from the original paper.

Returns:
  MS-SSIM score between `img1` and `img2`.

Raises:
  RuntimeError: If input images don't have the same shape or don't have four
  dimensions: [batch_size, height, width, depth].
"
#if img1.shape != img2.shape:
#  raise RuntimeError('Input images must have the same shape (%s vs. %s).',
#                     img1.shape, img2.shape)
#if img1.ndim != 4:
#  raise RuntimeError('Input images must have four dimensions, not %d',
#                     img1.shape)
if len(img1.shape.as_list()) != 4:
    raise RuntimeError('Input images must have four dimensions, not %d',
                   len(list(img1.shape())))

# Note: :默认权重总和不为1.0,但与论文/ matlab代码相匹配。
weights = np.array(weights if weights else
                   [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
#im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
im1, im2 = [x for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
    ssim,cs = _SSIMForMultiScale(
    im1, im2, max_val=max_val, filter_size=filter_size,
    filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim = tf.stack(mssim, ssim)
mcs = tf.stack(mcs, cs)
filtered = [convolve(im, downsample_filter, mode='reflect')
            for im in [im1, im2]]
im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) *
      (mssim[levels-1] ** weights[levels-1]))
运行结果及报错内容

AttributeError: 'Tensor' object has no attribute 'size'
Cannot convert a symbolic Tensor to a numpy array

我的解答思路和尝试过的方法

我以为在网络还未训练时候,y_true和y_pred没有shape而且值为None(因为很多比较显示无法用None与int比),然后就不知道应该如何修改代码,应该用tf的算法取代原来的加减乘除卷积这些操作吗?

我想要达到的结果

想要把ms-ssim作为损失函数运用于我的神经网络,使得模型可以model.fit可以运行