使用的官方示例:https://github.com/stereolabs/zed-examples/blob/master/other/multi%20camera/python/multi_camera.py
能做到复数相机同时显示二维彩色图像,想实现同时获取三维点云,不知道如何实现,使用的相机型号是zed2,希望对该相机有使用经验的能帮忙指导一下
import pyzed.sl as sl
import cv2
import numpy as np
import threading
import time
import signal
zed_list = []
left_list = []
depth_list = []
timestamp_list = []
thread_list = []
stop_signal = False
def signal_handler(signal, frame):
global stop_signal
stop_signal=True
time.sleep(0.5)
exit()
def grab_run(index):
global stop_signal
global zed_list
global timestamp_list
global left_list
global depth_list
runtime = sl.RuntimeParameters()
while not stop_signal:
err = zed_list[index].grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
zed_list[index].retrieve_image(left_list[index], sl.VIEW.LEFT)
zed_list[index].retrieve_measure(depth_list[index], sl.MEASURE.DEPTH)
timestamp_list[index] = zed_list[index].get_timestamp(sl.TIME_REFERENCE.CURRENT).data_ns
time.sleep(0.001) #1ms
zed_list[index].close()
def main():
global stop_signal
global zed_list
global left_list
global depth_list
global timestamp_list
global thread_list
signal.signal(signal.SIGINT, signal_handler)
print("Running...")
init = sl.InitParameters()
init.camera_resolution = sl.RESOLUTION.HD720
init.camera_fps = 30 # The framerate is lowered to avoid any USB3 bandwidth issues
#List and open cameras
name_list = []
last_ts_list = []
cameras = sl.Camera.get_device_list()
index = 0
for cam in cameras:
init.set_from_serial_number(cam.serial_number)
name_list.append("ZED {}".format(cam.serial_number))
print("Opening {}".format(name_list[index]))
zed_list.append(sl.Camera())
left_list.append(sl.Mat())
depth_list.append(sl.Mat())
timestamp_list.append(0)
last_ts_list.append(0)
status = zed_list[index].open(init)
if status != sl.ERROR_CODE.SUCCESS:
print(repr(status))
zed_list[index].close()
index = index +1
#Start camera threads
for index in range(0, len(zed_list)):
if zed_list[index].is_opened():
thread_list.append(threading.Thread(target=grab_run, args=(index,)))
thread_list[index].start()
#Display camera images
key = ''
while key != 113: # for 'q' key
for index in range(0, len(zed_list)):
if zed_list[index].is_opened():
if (timestamp_list[index] > last_ts_list[index]):
cv2.imshow(name_list[index], left_list[index].get_data())
x = round(depth_list[index].get_width() / 2)
y = round(depth_list[index].get_height() / 2)
err, depth_value = depth_list[index].get_value(x, y)
if np.isfinite(depth_value):
print("{} depth at center: {}MM".format(name_list[index], round(depth_value)))
last_ts_list[index] = timestamp_list[index]
key = cv2.waitKey(10)
cv2.destroyAllWindows()
#Stop the threads
stop_signal = True
for index in range(0, len(thread_list)):
thread_list[index].join()
print("\nFINISH")
if __name__ == "__main__":
main()
获取点云像素值的方法有很多种,这取决于您想要如何获取:直接访问ZED ROS封装器节点中的ZED SDK,订阅外部ROS节点中的点云,你只需要点的XYZ坐标,还需要RGB值。
参考
https://pastebin.com/i71RQcU2
https://github.com/stereolabs/zed-pcl
https://answers.ros.org/question/191265/pointcloud2-access-data/
#include <iostream>
#include <stdlib.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
//#include <math.h>
//#include <gl/glut.h>
#include <gl/freeglut.h>
using namespace std;
float imgdata[480][640]; //注意维度,第一维表示高度,第二维表示宽度
int w=0;
int h=0;
float scalar=50;//scalar of converting pixel color to float coordinates
#define pi 3.1415926
bool mouseisdown=false;
bool loopr=false;
int mx,my;
int ry=10;
int rx=10;
void timer(int p)
{
ry-=5;
//marks the current window as needing to be redisplayed.
glutPostRedisplay();
if (loopr)
glutTimerFunc(200,timer,0);
}
void mouse(int button, int state, int x, int y)
{
if(button == GLUT_LEFT_BUTTON)
{
if(state == GLUT_DOWN)
{
mouseisdown=true;
loopr=false;
}
else
{
mouseisdown=false;
}
}
if (button== GLUT_RIGHT_BUTTON)
if(state == GLUT_DOWN)
{
loopr=true;
glutTimerFunc(200,timer,0);
}
}
void motion(int x, int y)
{
if(mouseisdown==true)
{
ry+=x-mx;
rx+=y-my;
mx=x;
my=y;
glutPostRedisplay();
}
}
void special(int key, int x, int y)
{
switch(key)
{
case GLUT_KEY_LEFT:
ry-=5;
glutPostRedisplay();
break;
case GLUT_KEY_RIGHT:
ry+=5;
glutPostRedisplay();
break;
case GLUT_KEY_UP:
rx+=5;
glutPostRedisplay();
break;
case GLUT_KEY_DOWN:
rx-=5;
glutPostRedisplay();
break;
}
}
void renderScene(void) {
glClear (GL_COLOR_BUFFER_BIT);
glLoadIdentity();// Reset the coordinate system before modifying
//gluLookAt (0.0, 0.0, 7.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0);
gluLookAt (0.0, 0.0, 7.0, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0);
//该函数定义一个视图矩阵,并与当前矩阵相乘。
//第一组eyex, eyey,eyez 相机(观察者眼睛)在世界坐标的位置
//第二组centerx,centery,centerz 相机镜头对准的物体在世界坐标的位置(观察者观察的方向)
//第三组upx,upy,upz 相机向上的方向在世界坐标中的方向
//http://www.cnblogs.com/chengmin/archive/2011/09/12/2174004.html
//to invert the image
glRotatef(ry,0,1,0);
glRotatef(rx-180,1,0,0);
float imageCenterX=w*.5;
float imageCenterY=h*.5;
float x,y,z;
glPointSize(1.0);
glBegin(GL_POINTS);//GL_POINTS
for (int i=0;i<h;i++){
for (int j=0;j<w;j++){
// color interpolation
glColor3f(imgdata[i][j]/255, 1-imgdata[i][j]/255, 1-imgdata[i][j]/255);//red,green,blue
x=((float)j-imageCenterX)/scalar;
y=((float)i-imageCenterY)/scalar;
z=(255-imgdata[i][j])/scalar;
glVertex3f(x,y,z);
}
}
glEnd();
glFlush();
}
void reshape (int w, int h) {
glViewport (0, 0, (GLsizei)w, (GLsizei)h);//set viewpoint
glMatrixMode (GL_PROJECTION);//specify which matrix is the current matrix
glLoadIdentity ();//replace the current matrix with the identity matrix
gluPerspective (60, (GLfloat)w / (GLfloat)h, 1.0, 100.0);
glMatrixMode (GL_MODELVIEW);
}
void displayDisparity(IplImage* disparity){
//double xyscale=100;
int j=0;
int i=0;
CvScalar s;
//accessing the image pixels
for (i=0;i<h;i++){
for (j=0;j<w;j++){
s=cvGet2D(disparity,i,j); //获取disparity图像中坐标为(i,j)的像素点的值
imgdata[i][j]=s.val[0]; //s.val[0] 代表src图像BGR中的B通道的值
//cout << imgdata[i][j]<<endl;
}
}
}
int main(int argc, char *argv[])
{
cout << "OpenCV and OpenGL works together!"<<endl;
//char* filename=argv[1];
//IplImage* imgGrey = cvLoadImage(filename,0);
//IplImage* imgGrey = cvLoadImage("fusimg.png",1); //第二个参数:1,显示RGB;0,显示gray;缺省为1
IplImage* imgGrey = cvLoadImage("22.png",1);
if (imgGrey==NULL){
cout << "No valid image input."<<endl;
return 1;
}
w=imgGrey->width;
h=imgGrey->height;
displayDisparity(imgGrey);
cvNamedWindow("3D disparity image", CV_WINDOW_AUTOSIZE );
cvShowImage( "3D disparity image", imgGrey );
//------------------OpenGL-------------------------
glutInit(&argc, argv);//initialize the GLUT library
glutInitDisplayMode(GLUT_DEPTH | GLUT_SINGLE | GLUT_RGBA); //设置初始化显示模式
//GLUT_RGBA颜色模式;GLUT_SINGLE单缓存模式,GLUT_DEPTH深度缓存模式
//单缓冲,实际上就是将所有的绘图指令在窗口上执行,就是直接在窗口上绘图,这样的绘图效率是比较慢的,如果使用单缓冲,而电脑比较慢,你会看到屏幕的闪烁。
//双缓冲,实际上的绘图指令是在一个缓冲区完成,这里的绘图非常的快,在绘图指令完成之后,再通过交换指令把完成的图形立即显示在屏幕上,这就避免了出现绘图的不完整,同时效率很高。
//一般用OpenGL绘图都是用双缓冲,单缓冲一般只用于显示单独的一副非动态的图像。
//深度缓冲:http://zh.wikipedia.org/wiki/%E6%B7%B1%E5%BA%A6%E7%BC%93%E5%86%B2
glutInitWindowPosition(600,200); //设置初始化窗口位置
glutInitWindowSize(640,480); //设置初始化窗口大小
glutCreateWindow("3D disparity image"); //创建窗口
glutDisplayFunc(renderScene);
//glutDisplayFunc函数用于注册一个绘图函数, 这样操作系统在必要时刻就会对窗体进行重新绘制操作。类似于windows程序设计中处理WM_PAINT消息。
glutReshapeFunc (reshape);
//glutReshapeFunc了解:http://zhidao.baidu.com/link?url=K_GPs7mXbe6H1M_OA-YibtC7-1GNqv8efOKC_jMDxiK7kRSHt_zBzH5eIQaao0tK9RQGegwX2r_CH2N5nSsufK
glutMouseFunc(mouse);
glutMotionFunc(motion);
glutSpecialFunc(special);
glutMainLoop(); //glutMainLoop进入GLUT事件处理循环,让所有的与“事件”有关的函数调用无限循环。
//glutMainLoopEvent();
//cvWaitKey(0);
//release opencv stuff.
//system("pause");
cvReleaseImage(&imgGrey);
cvDestroyWindow("3D disparity image");
return 0;
}
不知道对你是否有帮助: https://blog.csdn.net/RNG_uzi_/article/details/102653790
看一下我这篇 朋友你想问的是俩个相机同时分别保存下三维点云吧?
https://blog.csdn.net/weixin_45358184/article/details/111645985
望采纳谢谢啦
可以参考一下
using System.Collections;
using System.Collections.Generic;
using sl;
using UnityEngine;
/// <summary>
/// 获得ZED相机的点云
/// </summary>
public class MyPointCloudFromZED : MonoBehaviour
{
[Header("点云数据获取")] public string Header;
/// <summary>
/// 点云数据矩阵
/// </summary>
private sl.ZEDMat myPointCloud; //定义点云矩阵
/// <summary>
/// 图像数据矩阵
/// </summary>
private sl.ZEDMat myRgbImage; //定义图像矩阵
/// <summary>
/// Instance of the ZEDManager interface
/// ZEDManager界面的实例,后面应该是要将外部的ZedManager赋值给下面这个变量才对
/// </summary>
public ZEDManager zedManager = null;
/// <summary>
/// 相机的分辨率
/// </summary>
private sl.Resolution resolution;
/// <summary>
/// zed Camera controller by zedManager
/// zedManager的zed相机控制器
/// </summary>
private sl.ZEDCamera zed = null;
/// <summary>
/// 是否更新数据
/// </summary>
public bool update = true; //暂时没用上
/// <summary>
/// 前一次update的状态
/// </summary>
private bool previousUpdate = true; //暂时没用上
/// <summary>
/// 某个值
/// </summary>
private float4 onePointCloud; //定义某个接收单个点数据的变量
void Start()
{
if (zedManager == null) //如果还没有实例化
{
zedManager = FindObjectOfType<ZEDManager>(); //找到ZEDManager组件
if (ZEDManager.GetInstances().Count > 1 //如果这个组件的实例化不只一个,我的情况一般只有一个
) //We chose a ZED arbitrarily, but there are multiple cams present. Warn the user.
我们任意选择一个ZED,但是有多个相机存在。所以需要警告用户。
{
Debug.Log("Warning: " + gameObject.name +
"'s zedManager was not specified, so the first available ZEDManager instance was " +
"assigned. However, there are multiple ZEDManager's in the scene. It's recommended to specify which ZEDManager you want to " +
"use to display a point cloud.");
}
}
if (zedManager != null) //实例化后的
zed = zedManager.zedCamera; //相机设备赋值
myPointCloud = new ZEDMat(); //实例化矩阵,内部获得指针,
myRgbImage = new ZEDMat(); //实例化矩阵,内部获得指针
onePointCloud = new float4();
resolution = new sl.Resolution((uint) zed.ImageWidth, (uint) zed.ImageHeight); //设置图像分辨率
myPointCloud.Create(resolution, ZEDMat.MAT_TYPE.MAT_32F_C4); //分配指定的大小和类型的数据内存
myRgbImage.Create(resolution, ZEDMat.MAT_TYPE.MAT_8U_C3); //分配指定的大小和类型的数据内存
}
// Update is called once per frame
void Update()
{
//Don't do anything unless the ZED has been initialized.
// 除非ZED已初始化,否则请勿执行任何操作。
if (zed.IsCameraReady)
{
ERROR_CODE getPointCloudResult = zed.RetrieveMeasure(myPointCloud, MEASURE.XYZ); //返回注册点云数据的结果
if (getPointCloudResult == ERROR_CODE.SUCCESS) //如果获得矩阵数据成功的话
{
myPointCloud.GetValue(400, 400, out float4 point, ZEDMat.MEM.MEM_CPU); // 从矩阵中取出某个像素点的点云数据, 这边指定从cpu内存中获得
onePointCloud = point;
//输出点云数据值
print("x:"+onePointCloud.r.ToString()+"y:"+onePointCloud.g.ToString()+"z:"+onePointCloud.b);
}
}
}
}
class PSO():
"""
PSO class
"""
def __init__(self,iters=100,pcount=50,pdim=2,mode='min'):
"""
PSO initialization
------------------
"""
self.w = None # Inertia factor
self.c1 = None # Learning factor
self.c2 = None # Learning factor
self.iters = iters # Number of iterations
self.pcount = pcount # Number of particles
self.pdim = pdim # Particle dimension
self.gbpos = np.array([0.0]*pdim) # Group optimal position
self.mode = mode # The mode of PSO
self.cur_pos = np.zeros((pcount, pdim)) # Current position of the particle
self.cur_spd = np.zeros((pcount, pdim)) # Current speed of the particle
self.bpos = np.zeros((pcount, pdim)) # The optimal position of the particle
self.trace = [] # Record the function value of the optimal solution
def init_particles(self):
"""
init_particles function
-----------------------
"""
# Generating particle swarm
for i in range(self.pcount):
for j in range(self.pdim):
self.cur_pos[i,j] = rd.uniform(MIN_POS[j], MAX_POS[j])
self.cur_spd[i,j] = rd.uniform(MIN_SPD[j], MAX_SPD[j])
self.bpos[i,j] = self.cur_pos[i,j]
# Initial group optimal position
for i in range(self.pcount):
if self.mode == 'min':
if self.fitness(self.cur_pos[i]) < self.fitness(self.gbpos):
gbpos = self.cur_pos[i]
elif self.mode == 'max':
if self.fitness(self.cur_pos[i]) > self.fitness(self.gbpos):
gbpos = self.cur_pos[i]
def fitness(self, x):
"""
fitness function
----------------
Parameter:
x :
"""
# Objective function
fitval = 5*np.cos(x[0]*x[1])+x[0]*x[1]+x[1]**3 # min
# Retyrn value
return fitval
def adaptive(self, t, p, c1, c2, w):
"""
"""
#w = 0.95 #0.9-1.2
if t == 0:
c1 = 0
c2 = 0
w = 0.95
else:
if self.mode == 'min':
# c1
if self.fitness(self.cur_pos[p]) > self.fitness(self.bpos[p]):
c1 = C1_MIN + (t/self.iters)*C1_MAX + np.random.uniform(0,0.1)
elif self.fitness(self.cur_pos[p]) <= self.fitness(self.bpos[p]):
c1 = c1
# c2
if self.fitness(self.bpos[p]) > self.fitness(self.gbpos):
c2 = C2_MIN + (t/self.iters)*C2_MAX + np.random.uniform(0,0.1)
elif self.fitness(self.bpos[p]) <= self.fitness(self.gbpos):
c2 = c2
# w
#c1 = C1_MAX - (C1_MAX-C1_MIN)*(t/self.iters)
#c2 = C2_MIN + (C2_MAX-C2_MIN)*(t/self.iters)
w = W_MAX - (W_MAX-W_MIN)*(t/self.iters)
elif self.mode == 'max':
pass
return c1, c2, w
def update(self, t):
"""
update function
---------------
Note that :
1. Update particle position
2. Update particle speed
3. Update particle optimal position
4. Update group optimal position
"""
# Part1 : Traverse the particle swarm
for i in range(self.pcount):
# Dynamic parameters
self.c1, self.c2, self.w = self.adaptive(t,i,self.c1,self.c2,self.w)
# Calculate the speed after particle iteration
# Update particle speed
self.cur_spd[i] = self.w*self.cur_spd[i] \
+self.c1*rd.uniform(0,1)*(self.bpos[i]-self.cur_pos[i])\
+self.c2*rd.uniform(0,1)*(self.gbpos - self.cur_pos[i])
for n in range(self.pdim):
if self.cur_spd[i,n] > MAX_SPD[n]:
self.cur_spd[i,n] = MAX_SPD[n]
elif self.cur_spd[i,n] < MIN_SPD[n]:
self.cur_spd[i,n] = MIN_SPD[n]
# Calculate the position after particle iteration
# Update particle position
self.cur_pos[i] = self.cur_pos[i] + self.cur_spd[i]
for n in range(self.pdim):
if self.cur_pos[i,n] > MAX_POS[n]:
self.cur_pos[i,n] = MAX_POS[n]
elif self.cur_pos[i,n] < MIN_POS[n]:
self.cur_pos[i,n] = MIN_POS[n]
# Part2 : Update particle optimal position
for k in range(self.pcount):
if self.mode == 'min':
if self.fitness(self.cur_pos[k]) < self.fitness(self.bpos[k]):
self.bpos[k] = self.cur_pos[k]
elif self.mode == 'max':
if self.fitness(self.cur_pos[k]) > self.fitness(self.bpos[k]):
self.bpos[k] = self.cur_pos[k]
# Part3 : Update group optimal position
for k in range(self.pcount):
if self.mode == 'min':
if self.fitness(self.bpos[k]) < self.fitness(self.gbpos):
self.gbpos = self.bpos[k]
elif self.mode == 'max':
if self.fitness(self.bpos[k]) > self.fitness(self.gbpos):
self.gbpos = self.bpos[k]
def run(self):
"""
run function
-------------
"""
# Initialize the particle swarm
self.init_particles()
# Iteration
for t in range(self.iters):
# Update all particle information
self.update(t)
#
self.trace.append(self.fitness(self.gbpos))