改写别人项目,但是结果图片不尽人意,结果如下:
def computeKernel(mat, nRowSamples, nColSamples, hx, hy):
if nRowSamples > mat.shape[0] or nColSamples > mat.shape[1]:
raise ValueError("Number of samples per row and col must be <= that of image.")
nPixels = mat.size
selected, rest = samplePixels(mat.shape[0], mat.shape[1], nRowSamples, nColSamples)
nSamples = len(selected)
Ka = np.zeros((nSamples, nSamples))
Kab = np.zeros((nSamples, nPixels - nSamples))
photometricWeight = 1.0 / (hy * hy)
spatialWeight = 1.0 / (hx * hx)
for i in range(nSamples):
# Ka
p1 = selected[i]
for j in range(i, nSamples):
val = negativeWeightedDistance(mat, p1, selected[j], spatialWeight, photometricWeight)
Ka[i, j] = val
Ka[j, i] = val
# Kab
for j in range(len(rest)):
Kab[i, j] = negativeWeightedDistance(mat, p1, rest[j], spatialWeight, photometricWeight)
Ka = np.exp(Ka)
Kab = np.exp(Kab)
if (Ka.transpose() == Ka).all():
print("Ka is symmetric")
else:
print("Ka is NOT symmetric")
P = np.zeros((nPixels,), dtype=np.int32)
for i in range(nSamples):
p = selected[i]
P[i] = to1DIndex(p[0], p[1], mat.shape[1])
for j in range(len(rest)):
p = rest[j]
P[j + nSamples] = to1DIndex(p[0], p[1], mat.shape[1])
return P, Ka, Kab
pass
def eigen2opencv(v, nrows, ncols):
mat = np.array(v).reshape((nrows, ncols))
return mat.copy()
pass
def rescaleForVisualization(mat):
mat = np.float32(mat)
minVal, maxVal, _, _ = cv2.minMaxLoc(mat)
rescaledMat = (mat - minVal) / (maxVal - minVal) * 255
return rescaledMat.astype(np.uint8)
pass
def trainFilter(self, channel, nRowSamples, nColSamples, hx, hy, nSinkhornIter, nEigenVectors):
# implementation of the filter training algorithm
# ...
print("Computing kernel")
P, Ka, Kab = computeKernel(channel, nRowSamples, nColSamples, hx, hy)
print("Nystrom approximation")
eigvals, phi = nystromApproximation(Ka, Kab)
print("Sinkhorn")
Wa, Wab = sinkhorn(phi, eigvals, nSinkhornIter)
print("Orthogonalize")
eigvecs, eigvals = orthogonalize(Wa, Wab, nEigenVectors,EPS)
# Permute values back into correct position
#eigvecs = np.dot(P, eigvecs)
#eigvecs2 = np.empty((0, eigvecs.shape[1]))
#for i in range(P.size):
#row_to_add = eigvecs[P[i], :]
#eigvecs2 = np.vstack((eigvecs2, row_to_add))
eigvecs2=np.take(eigvecs,P,axis=0)
#eigvecs2=eigvecs
for i in range(min(nEigenVectors, 5)):
v = eigvecs2[:,i]
print("Eigvec ", i, " eigval: ", eigvals[i], " minCoeff: ", np.min(v), " maxCoeff: ", np.max(v))
m = eigen2opencv(v, channel.shape[0], channel.shape[1])
m = rescaleForVisualization(m)
m = cv2.convertScaleAbs(m)
cv2.imshow("image" + str(i), m)
return eigvecs2,eigvals
pass
图像的手绘效果:
注:将第6行的打开图片改为你的原图名称,并放在同一文件夹中。
from PIL import Image
import numpy as np
vec_el = np.pi/2.2 # 光源的俯视角度,弧度值
vec_az = np.pi/4. # 光源的方位角度,弧度值
depth = 10. # (0-100)
im = Image.open('fcity.jpg').convert('L')
a = np.asarray(im).astype('float')
grad = np.gradient(a) #取图像灰度的梯度值
grad_x, grad_y = grad #分别取横纵图像梯度值
grad_x = grad_x*depth/100.
grad_y = grad_y*depth/100.
dx = np.cos(vec_el)*np.cos(vec_az) #光源对x 轴的影响
dy = np.cos(vec_el)*np.sin(vec_az) #光源对y 轴的影响
dz = np.sin(vec_el) #光源对z 轴的影响
A = np.sqrt(grad_x**2 + grad_y**2 + 1.)
uni_x = grad_x/A
uni_y = grad_y/A
uni_z = 1./A
a2 = 255*(dx*uni_x + dy*uni_y + dz*uni_z) #光源归一化
a2 = a2.clip(0,255)
im2 = Image.fromarray(a2.astype('uint8')) #重构图像
im2.save('fcityHandDraw.jpg')
或可下载资源:https://download.csdn.net/download/leyang0910/87699125
可以直接运行程序,图片我放在了文件夹里了。
如对您有帮助,请采纳此回答。谢谢!
该回答通过自己思路及引用到GPTᴼᴾᴱᴺᴬᴵ搜索,得到内容具体如下:
根据提供的代码,初步判断问题可能与 eigen2opencv 和 rescaleForVisualization 函数有关。
eigen2opencv 函数似乎将输入向量 v 重塑为形状为 (nrows, ncols) 的矩阵,但它返回重塑后矩阵的副本。不清楚为什么这里需要复制,但这可能会在重塑过程中引入一些错误。
rescaleForVisualization 函数用于将滤波器的输出重新缩放到 [0, 255] 的范围以进行可视化。但是,不清楚此函数是否正确应用或是否在重新缩放过程中引入了一些错误。
您可以尝试修改这些函数或使用不同的可视化技术来查看是否可以解决问题。此外,您可以通过打印中间结果并检查它们来尝试调试代码,以确定问题可能发生的位置。
如果以上回答对您有所帮助,点击一下采纳该答案~谢谢