Pnp求解位姿时,误差很大

Pnp求解位姿时,误差很大。有两张图,已知第一张图深度和位姿,求第二张那个图的位姿。通过特征点匹配,得到空间点的位姿,再在第二张那个图上通过pnp 求解位姿
求解出来第二张图的位姿和gt误差很大
class NewPoseEstimation1():
    def __init__(self, scene,im1, depthim1,queryim,K,pose1):
        self.im1 = im1
        self.depthim1 = depthim1 
        self.queryim  = queryim  
        self.K = K
        self.pose1 = pose1 
        self.scene = scene 
    def CalC(self): 
        if self.pose1.shape[0]==3:       
            zero =np.zeros([1,4])
            self.pose1 = np.append(self.pose1,zero,axis=0)
            self.pose1[3,3]=1
        pts_3d = []
        pts_2d = []
        key_points_1, key_points_2, matches = TransM(self.im1,self.queryim,self.K).find_feature_matches()
        for m in matches:
            d = self.depthim1[int(key_points_1[m.queryIdx].pt[1]), int(key_points_1[m.queryIdx].pt[0])]         
            # print(d)
            if d == 0:
                continue
            if self.scene == 'room':
                dd = d   
            p1 = pixel2cam(key_points_1[m.queryIdx].pt, self.K)
            p3d= [p1[0, 0]*dd, p1[1, 0]*dd, dd]
            if self.scene == 'room':
                p3d= np.append(p3d,[1])
                p3d= self.pose1[:3,:4] @ p3d
            p3d = p3d.numpy()
            pts_3d.append(p3d)
            pts_2d.append(key_points_2[m.trainIdx].pt)

        pts_3d = np.array(pts_3d)
        pts_2d = np.array(pts_2d)

        _, r, t = cv.solvePnP(pts_3d, pts_2d, self.K, np.array([]),flags =cv.SOLVEPNP_EPNP)

        # _, r, t ,_= cv.solvePnPRansac(pts_3d, pts_2d, self.K, np.array([]),flags =cv.SOLVEPNP_EPNP)
        R = cv.Rodrigues(r)[0]     
        print('PNP R = \n', R, '\nt = \n', t)
        Trans1q = RTmix(R,t).Trans() 
        if self.scene == 'room':
            querpose =np.linalg.inv(Trans1q)
        return querpose
class TransM():
    def __init__(self, im1, im2,K):
        self.im1 = im1
        self.im2 = im2   
        self.K = K 
    # ORB特征提取
    def find_feature_matches(self):
        K = self.K
        # cv.imwrite("im1.png", self.im1)
        orb = cv.ORB_create()
        kp1 = orb.detect(self.im1)  
        kp2 = orb.detect(self.im2)
        kp1, des1 = orb.compute(self.im1, kp1) # 求特征s
        kp2, des2 = orb.compute(self.im2, kp2) # 求特征
        bf = cv.BFMatcher(cv.NORM_HAMMING)# 初始化Matcher
        matches = bf.match(des1, des2)    # 配准
        # 进行初步筛选
        min_distance = 10000
        max_distance = 0
        # print(len(matches))
        for x in matches:
            if x.distance < min_distance: min_distance = x.distance
            if x.distance > max_distance: max_distance = x.distance
        # print('最小距离:%f' % min_distance)
        # print('最大距离:%f' % max_distance)
        good_match = []
        for x in matches:
            if x.distance <= max(3 * min_distance, 30):
                good_match.append(x)
        print('匹配数',len(good_match))
        outimage = cv.drawMatches(self.im1, kp1, self.im2, kp2, good_match, outImg=None)
        # cv.imwrite("match.png", outimage[:,:,::-1])
        cv.imwrite("match1.png", outimage)
        return kp1, kp2, good_match

img

img

有dl可以帮忙看看代码吗?赏金可以×10,求私

这里推荐一位优秀的博主,和博文供你参考和交流【关于OpenCV的那些事——相机姿态更新】,链接:https://blog.csdn.net/aptx704610875/article/details/48915149?spm=1001.2101.3001.6650.6&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-6-48915149-blog-77841261.pc_relevant_3mothn_strategy_recovery&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-6-48915149-blog-77841261.pc_relevant_3mothn_strategy_recovery&utm_relevant_index=11
【曾发表【姿态估计】、POSIT算法的原理--opencv 3D姿态估计,三维姿态等多篇博文,讲的很深,很专业】

可参考: https://blog.csdn.net/weixin_41010198/article/details/116028666

对极几何,PNP,ICP求解位姿的方法原理、优缺点
https://blog.csdn.net/zxnzjccmily/article/details/124196877