ffmpeg函数avformat_write_header阻塞

您好,我在win11系统下使用qt5,ffmpeg抓取桌面并向网络推流时,代码阻塞在avformat_write_header函数中,既不报错,又不向下运行,不知道是否是相关参数设置的有问题?使用gdigrab抓取桌面,进行h264编码,以RTSP协议进行推流。以下是我的代码。万分感谢!

#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <QDebug>


MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    avcodec_configuration();
    unsigned version = avcodec_version();
    QString ch = QString::number(version,10);
    qDebug()<<"version: "<<version;


    //==================================变量定义==========================================//
    //抓屏变量定义
    AVFormatContext* pInputFormatContext = NULL;
    AVCodec* pInputCodec = NULL;
    AVCodecContext* pInputCodecContex = NULL;
    //输出流变量定义
    AVFormatContext* pOutputFormatContext = NULL;
    AVCodecContext* pOutCodecContext = NULL;
    AVCodec* pOutCodec = NULL;
    AVStream* pOutStream = NULL;
    //=============================================================================================//

    //**********注册ffmpeg相关组件**********//
    av_register_all();
    avformat_network_init();
    avdevice_register_all();


    //=================================输入(Input)抓屏部分设置======================================//
    int ret, i;
    int videoindex = -1;
    //为数据类型AVFormatContext的结构体初始化并分配默认值
    pInputFormatContext = avformat_alloc_context();
    AVDictionary* options = NULL;
    AVInputFormat* ifmt = av_find_input_format("gdigrab");//AVInputFormat存储输入视音频使用的封装格式
    //相关参数设置
    av_dict_set(&options, "framerate", "25", 0);
    av_dict_set(&options, "video_size", "1920x1080", 0);
    av_dict_set(&options, "start_time_realtime", 0, 0);
    ////Grab at position 0,0 真正的打开文件
    //这个函数读取文件的头部并且把信息保存到我们给的AVFormatContext结构体的ifomat,priv_data,nb_streams,streams,filename,start_time,duration中
    if (avformat_open_input(&pInputFormatContext, "desktop", ifmt, &options) != 0) {
        printf("Couldn't open input stream.\n");
        getchar();
        //return -1;
    }
    pInputFormatContext->probesize = 42*1024*1024;
    if ((ret = avformat_find_stream_info(pInputFormatContext, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        getchar();
        //return -1;
    }
    //打印输入流信息
    av_dump_format(pInputFormatContext, 0, "desktop", 0);
    for (i = 0; pInputFormatContext->nb_streams; i++)
        if (pInputFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            //std::cout << "pInputFormatContext=" << pInputFormatContext->streams[i]->codec->bit_rate << std::endl;
            videoindex = i;
            break;
        }
    //输入流解码器初始化
    pInputCodecContex = pInputFormatContext->streams[videoindex]->codec;
    pInputCodecContex->flags |= 0x00080000;//代表什么???????
    pInputCodec = avcodec_find_decoder(pInputCodecContex->codec_id);
    if (pInputCodec == NULL)
    {
        printf("Codec not found.\n");
        getchar();
        //return -1;
    }
    //打开解码器
    if (avcodec_open2(pInputCodecContex, pInputCodec, NULL) < 0)
    {
        printf("Could not open codec.\n");
        getchar();
        //return -1;
    }
    //为一帧图像分配内存
    AVFrame* pFrame;
    AVFrame* pFrameYUV;
    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();//为转换来申请一帧的内存(把原始帧->YUV)
    pFrameYUV->format = AV_PIX_FMT_YUV420P;
    pFrameYUV->width = pInputCodecContex->width;
    pFrameYUV->height = pInputCodecContex->height;
    unsigned char* out_buffer = (unsigned char*)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pInputCodecContex->width, pInputCodecContex->height));
    //现在我们使用avpicture_fill来把帧和我们新申请的内存来结合
    avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pInputCodecContex->width, pInputCodecContex->height);
    //格式转换的初始化,转换成YUV
    struct SwsContext* img_convert_ctx;
    img_convert_ctx = sws_getContext(pInputCodecContex->width, pInputCodecContex->height, pInputCodecContex->pix_fmt, pInputCodecContex->width, pInputCodecContex->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    //=============================================================================================//


    //==============================编码器、RTSP推流=======================================//
    const char* out_file = "rtsp://localhost/sonar";   //推流地址

    //输出流配置
    //定义一个输出流信息的结构体
    avformat_alloc_output_context2(&pOutputFormatContext, NULL, "rtsp", out_file);
    if (!pOutputFormatContext) {
        printf("Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        getchar();
        //return -1;
    }
    av_opt_set(pOutputFormatContext->priv_data, "rtsp_transport", "tcp", 0);
    pOutputFormatContext->oformat->video_codec = AV_CODEC_ID_H264;

    //查找编码器
    pOutCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pOutCodec) {
        printf("Can not find encoder! \n");
        getchar();
        //return -1;
    }
    pOutCodecContext = avcodec_alloc_context3(pOutCodec);
    pOutCodecContext->flags |= 0x00080000;//flags变量含义??????



    //输出流编码器配置必须设置的参数
    //pOutCodecContext = pOutStream->codec;
    pOutCodecContext->codec_id = AV_CODEC_ID_H264;
    pOutCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
    //像素格式,
    pOutCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
    //size
    pOutCodecContext->width = pInputCodecContex->width;
    pOutCodecContext->height = pInputCodecContex->height;
    //目标码率
    pOutCodecContext->bit_rate = 1000;// 4000000;
    //250帧插入一个I帧,I帧越小视频越小
    pOutCodecContext->gop_size = 1;// 10;
    //Optional Param B帧
    pOutCodecContext->max_b_frames = 0;  //设置B帧为0,则DTS与PTS一致
    //时基
    pOutCodecContext->time_base.num = 1;
    pOutCodecContext->time_base.den = 20;// 25;


    //H264
    //pCodecCtx->me_range = 16;
    //pCodecCtx->max_qdiff = 4;
    //pCodecCtx->qcompress = 0.6;

    //最大和最小量化系数
    pOutCodecContext->qmin = 10;
    pOutCodecContext->qmax = 51;
    //if (pOutputFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
    //pOutCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    AVDictionary* param = 0;
    //H.264编码参数设置
    av_opt_set(pOutCodecContext->priv_data, "preset", "superfast", 0);
    //av_opt_set(pOutCodecContext->priv_data, "tune", "zerolatency", 0);//实时编码

    //打开编码器
    if (avcodec_open2(pOutCodecContext, pOutCodec, &param) < 0)
    {
        printf("Failed to open encoder! \n");
        getchar();
        //return -1;
    }

    //创建输出流,初步设置
    pOutStream = avformat_new_stream(pOutputFormatContext, pOutCodec);
    if (pOutStream == NULL)
    {
        printf("Failed create pOutStream!\n");
        getchar();
        //return -1;
    }
    pOutStream->time_base.num = 1;
    pOutStream->time_base.den = 20;

    pOutStream->start_time = 0;
    pOutStream->duration = 0;


    avcodec_parameters_from_context(pOutStream->codecpar, pOutCodecContext);
    pOutStream->codec->codec = pOutCodecContext->codec;
    //pOutputFormatContext->streams[0]->codec->codec = pOutCodecContext->codec;
    //=============================================================================================//


    //================================向流中写入数据======================================//
    //写文件头
    //av_dict_set(&param, "stimeout", "3000000", 0);//超时强制退出avformat_write_header

    int r = -1;
    r = avformat_write_header(pOutputFormatContext, &param);
    if (r < 0)
    {
        printf("Failed write header!\n");
        getchar();
        //return -1;
    }

    AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
    int got_picture;

    AVPacket pkt;
    int picture_size = avpicture_get_size(pOutCodecContext->pix_fmt, pOutCodecContext->width, pOutCodecContext->height);
    av_new_packet(&pkt, picture_size);

    int frame_index = 0;
    while ((av_read_frame(pInputFormatContext, packet)) >= 0)
    {
        printf("to av_read_frame! \n");
        if (packet->stream_index == videoindex)
        {

            //真正解码,packet to pFrame
            avcodec_decode_video2(pInputCodecContex, pFrame, &got_picture, packet);
            //printf("真正解码,packet to pFrame! \n%d", got_picture);
            if (got_picture)
            {
                sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pInputCodecContex->height, pFrameYUV->data, pFrameYUV->linesize);
                pFrameYUV->pts = frame_index;

                int picture;
                //真正编码pkt
                int ret = avcodec_encode_video2(pOutCodecContext, &pkt, pFrameYUV, &picture);
                //pkt.pts = frame_index;
                //pkt.dts = pkt.pts;
                if (ret < 0) {
                    printf("Failed to encode! \n");
                    getchar();
                    //return -1;
                }
                if (picture == 1)
                {
                    pkt.stream_index = pOutStream->index;
                    AVRational time_base = pOutStream->time_base;//{ 1, 1000 };
                    AVRational r_framerate1 = { 50, 2 };//{ 50, 2 };
                    int64_t calc_pts = (double)frame_index * (AV_TIME_BASE)* (1 / av_q2d(r_framerate1));
                    pkt.pts = av_rescale_q(calc_pts, { 1, AV_TIME_BASE }, time_base);  //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                    pkt.dts = pkt.pts;
                    frame_index++;
                    ret = av_interleaved_write_frame(pOutputFormatContext, &pkt);//推流到RTSP
                    //av_dump_format(pOutputFormatContext, 0, out_file, 1);

                    if (ret < 0) {
                        printf("Error muxing packet\n");
                        break;
                    }

                    av_free_packet(&pkt);
                }
            }
        }
        av_free_packet(packet);
    }

    //写文件尾
    av_write_trailer(pOutputFormatContext);
    //=============================================================================================//

    //释放变量
    sws_freeContext(img_convert_ctx);
    //fclose(fp_yuv);
    av_free(out_buffer);
    av_free(pFrameYUV);
    av_free(pFrame);
    avcodec_close(pInputCodecContex);
    avformat_close_input(&pInputFormatContext);

    avcodec_close(pOutStream->codec);
    av_free(pOutCodec);
    avcodec_close(pOutCodecContext);
    avformat_free_context(pOutputFormatContext);

}

MainWindow::~MainWindow()
{
    delete ui;
}

问ChatGPT