visual studio 的udp接收改写到qt上使用:首先我用QUdpsocket绑定 设置buffer大小然后connect,然后
readDatagram。于是发现读的速度非常慢,无法满足75ms读1048帧的要求。于是又改win的方法,
win的Createthread 没办法直接使用,于是用了qthread,run函数代码如下:
WSADATA wsaData;
int errorCode = WSAStartup(MAKEWORD(2, 2), &wsaData);
qDebug()<<errorCode;
sockServer = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (sockServer <0) {
m_recvThreadError = true;
// qDebug()<<"sockServer error";
return;
}
int recv_buf = 7 * 1024 * 1024;
int sets =setsockopt(sockServer, SOL_SOCKET, SO_SNDBUF, (char*)&recv_buf, sizeof(int));
if (0 != sets) {
m_recvThreadError = true;
// qDebug()<<"setsockopt SO_SNDBUF error"<<sets;
return;
};
if (0 != setsockopt(sockServer, SOL_SOCKET, SO_RCVBUF, (char*)&recv_buf, sizeof(int))) {
m_recvThreadError = true;
// qDebug()<<"setsockopt SO_RCVBUF error";
return;
};
struct sockaddr_in addr_Server;
addr_Server.sin_family = AF_INET;
addr_Server.sin_port = htons(m_port);
addr_Server.sin_addr.s_addr=inet_addr(m_ipAddr);
//qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<m_recvFlag;
while(m_recvFlag)
{
mutex.lock();
if (bind(sockServer, (struct sockaddr*)&addr_Server, sizeof(addr_Server)))
{
// qDebug()<<"eth bind error";
mutex.unlock();
usleep(100000);
continue;
}
in_addr addr_clt;
bool first = true;
uint32_t prev_sequence_number = 0;
int fromlen = sizeof(sockaddr);
uint32_t lost_frame = 0;
bool frame_head_flag = false;
int packetIdx = 0;
int packetIdxPrev = 0;
uint64_t lastRecvTime = GetCurTimeSec();
uint64_t lastMagicMatchtime = GetCurTimeSec();
mutex.unlock();
while(m_recvFlag)
{
// mutex.lock();
int last =recvfrom(sockServer, (char*)&gRecvDataBuff, sizeof(gRecvDataBuff), 0, (sockaddr*)&addr_clt, &fromlen);
// qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<last;
if (last == -1)
{
// qDebug()<<"recv from is -1";
if(1000<=(GetCurTimeSec()-lastRecvTime)&&2000>=(GetCurTimeSec()-lastRecvTime))
qDebug()<<"no receive data for 1000ms";
// mutex.unlock();
// usleep(2);
break;
}
// mutex.unlock();
if (last == UDP_PACKET_SIZE)
{
// qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<gRecvDataBuff.data.SIG.magicWord[0]<<gRecvDataBuff.data.SIG.magicWord[1]<<gRecvDataBuff.data.SIG.magicWord[2]<<gRecvDataBuff.data.SIG.magicWord[3];
if (gRecvDataBuff.data.SIG.magicWord[0] == 0x0102 && gRecvDataBuff.data.SIG.magicWord[1] == 0x0304 && gRecvDataBuff.data.SIG.magicWord[2] == 0x0506&& gRecvDataBuff.data.SIG.magicWord[3] == 0x0708)
{
// mutex.lock();
gRecvFrame = pPackage(new ETH_Package_t());
gRecvFrame->fCapTime = EthRecv::GetCurTimeSec();
gRecvFrame->nMinPackageNo = gRecvDataBuff.sequence_number;
gRecvFrame->nFrameID = gRecvDataBuff.data.SIG.frameID;
gRecvFrame->nMemLen = gRecvDataBuff.data.SIG.totalPacketLen;
gRecvFrame->nPackageTotal = gRecvDataBuff.data.SIG.totalPacketLen/1456;
gRecvFrame->nRadarIndex = gRecvDataBuff.data.SIG.radarID;
gRecvFrame->Cache= new char[gRecvFrame->nMemLen];
memcpy(gRecvFrame->Cache, (char*)&gRecvDataBuff.data, sizeof(CAP_FIRST_FRAME));
memset(icheckdata, 0, sizeof(icheckdata));
if (frame_head_flag)
{
//delete gRecvFrame;
gRecvFrame = NULL;
m_lostFrame++;
}
frame_head_flag = true;
icheckdata[0] = 1;
// qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<frame_head_flag;
// mutex.unlock();
}
else
{
if (frame_head_flag)
{
mutex.lock();
packetIdx = gRecvDataBuff.sequence_number - gRecvFrame->nMinPackageNo;
if (packetIdx < gRecvFrame->nPackageTotal) {
memcpy((char*)(gRecvFrame->Cache+ packetIdx*1456), (char*)&gRecvDataBuff.data, sizeof(CAP_FIRST_FRAME));
icheckdata[packetIdx] = 1;
}
else {
// delete gRecvFrame;
gRecvFrame = NULL;
frame_head_flag = false;
mutex.unlock();
continue;
}
if (gRecvFrame==NULL)
{
mutex.unlock();
continue;
}
mutex.unlock();
// qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<packetIdx<<gRecvFrame->nPackageTotal<< gRecvFrame->nMemLen;
if (packetIdx == gRecvFrame->nPackageTotal - 1)
{
mutex.lock();
int recvCount = 0;
for (int i = 0;i < gRecvFrame->nPackageTotal;i++)
recvCount += icheckdata[i];
if (recvCount == gRecvFrame->nPackageTotal)
{
qDebug()<<__FUNCTION__<<__LINE__<<Log::getTime()<<frame_head_flag;
gRecvFrame->nMaxPackageNo = gRecvDataBuff.sequence_number;
if (buffer.AddFrame(gRecvFrame) == -1) {
qDebug()<<"lost";
// delete gRecvFrame ;
gRecvFrame = NULL;
}
}
frame_head_flag = false;
mutex.unlock();
usleep(2);
}
// LD()<<sizeof (gRecvFrame);
}
}
}
}
}
由于qt多线程访问会有冲突,我加了锁,VS中没有锁的。现在的问题是丢帧率特别高。问下怎么解决?
丢包的一个很可能原因是:如果发包速度太快,而收包的速度较慢,导致缓冲区内新包将旧包覆盖,因为接收缓冲区大小是有限的。
多线程只要不同时访问一个socket、同一块资源就没有冲突,就不需要加锁。频繁加锁解锁很耗时间,如果必须加锁,尽量减少加锁的代码块。
建议:
简化接收线程代码,先把逻辑代码注释掉,只保留接收语句,统计接收的个数并打印,查看收包个数跟发包个数是否一致。如果个数一致,那就是你代码需要修改逻辑,把接收后的数据处理从当前线程中拿出来放到别的地方(可以把所有的报文放在一个队列中,再开线程从队列中处理数据)。
如果上面的方法仍然丢包,尝试修改接收缓冲区的大小。参考如下:
socket_udp = new QUdpSocket;
socket_udp->setReadBufferSize(16*1024*1024);
用setSocketOption()函数也可以。
您好,我是有问必答小助手,您的问题已经有小伙伴帮您解答,感谢您对有问必答的支持与关注!