为什么FFMPEG屏幕记录器输出只显示绿色屏幕? [英] Why FFMPEG screen recorder output shows green screen only?

查看:99
本文介绍了为什么FFMPEG屏幕记录器输出只显示绿色屏幕?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在使用FFMPEG库编写一个屏幕记录器,但在输出中它只显示一个绿屏视频。 我的代码如下

#define __STDC_CONSTANT_MACROS
#include<iostream>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
//#include <libswscale/swscale.h>
}
using namespace std;
//const char* out_filename = "D:\my\folder\to\Frame.jpg";

AVCodecContext* GetCodecContextFromPar(AVCodecParameters * par)
{
    AVCodecContext* cntxt = NULL;
    cntxt = avcodec_alloc_context3(avcodec_find_decoder(par->codec_id));
    avcodec_parameters_to_context(cntxt,par);
    return cntxt;
}
int AvCodecDecodeVideo2(AVCodecContext* avctx, AVFrame* frame, int* got_picture_ptr, const AVPacket* avpkt)
{
    
    int ret = avcodec_send_packet(avctx, avpkt);
    if (ret < 0)
    {
        return -1;
        *got_picture_ptr = 0;
    }
        
    while (ret >= 0)
    {
        ret = avcodec_receive_frame(avctx, frame);

    }
    *got_picture_ptr = 1;
    return 0;
    

}

int AvCodecEncodeVideo2(AVCodecContext* avctx, AVPacket* avpkt, const AVFrame* frame, int* got_packet_ptr)
{
    char str2[] = "";
    
    int Res = avcodec_send_frame(avctx, frame);
    while (Res >= 0)
    {
        Res = avcodec_send_frame(avctx, frame);
    }
    //avcodec_encode_video2
    Res = avcodec_receive_packet(avctx, avpkt);
        
    if (Res == 0)
    {
        *got_packet_ptr = 1;
        return 0;
    }
    cout << "
Error :" << av_make_error_string(str2, sizeof(str2), Res);
    return -1;
}
int main(int argc, char** argv)
{
    const char* out_filename = "D:\myfolder\to\output\new_out.mp4";
    avdevice_register_all();
    AVOutputFormat* ofmt = NULL;
    AVInputFormat* ifmt = NULL;
    AVFormatContext* ifmt_ctx = avformat_alloc_context();
    AVFormatContext* ofmt_ctx = avformat_alloc_context();
    AVCodecParameters * av_codec_par_in = avcodec_parameters_alloc();
    AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
    AVCodecContext* avcodec_contx = NULL;
    AVCodec* av_codec;

    AVStream* video_stream = NULL;
    av_codec_par_out->height = 480;
    av_codec_par_out->width = 640;
    av_codec_par_out->bit_rate = 40000;
    av_codec_par_out->codec_id = AV_CODEC_ID_MPEG4;
    av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
    av_codec_par_out->format = 0;
    av_codec_par_out->sample_aspect_ratio.den = 3;
    av_codec_par_out->sample_aspect_ratio.num = 4;

    AVDictionary* options = NULL;

    av_dict_set(&options,"framerate","30",0);
    av_dict_set(&options,"offset_x","20",0);
    av_dict_set(&options,"offset_y","40",0);
    av_dict_set(&options,"video_size","640x480",0);
    int ret, i;
    ifmt = av_find_input_format("gdigrab");
    
    if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) <0)
    {
        cout << "Error in opening file";
        exit(1);
    }
    int VideoStreamIndx = -1;
    avformat_find_stream_info(ifmt_ctx, NULL);
    /* find the first video stream index . Also there is an API available to do the below operations */
    for (int i = 0; i < ifmt_ctx->nb_streams; i++) // find video stream posistion/index.
    {
        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            VideoStreamIndx = i;
            break;
        }

    }
    if (VideoStreamIndx == -1)
    {
        cout << "
unable to find the video stream index. (-1)";
        exit(1);
    }
    av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
    av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
    if (av_codec == NULL)
    {
        cout << "
unable to find the decoder";
        exit(1);
    }
    avcodec_contx = avcodec_alloc_context3(av_codec);
    if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
    {
        cout << "
error in converting the codec contexts";
        exit(1);
    }

    //av_dict_set
    int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
    if (value < 0)
    {
        cout << "
unable to open the av codec";
        exit(1);
    }
    value = 0;
    ofmt = av_guess_format(NULL, out_filename, NULL);
    if (!ofmt)
    {
        cout << "
error in guessing the video format. try with correct format";
        exit(1);
    }
    avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
    if (!ofmt_ctx)
    {
        cout << "
error in allocating av format output context";
        exit(1);
    }
    AVCodec * av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
    if (av_codec_out == NULL)
    {
        cout << "
unable to find the encoder";
        exit(1);
    }
    video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
    if (!video_stream)
    {
        cout << "
error in creating a av format new stream";
        exit(1);
    }
    AVCodecContext* av_cntx_out;
    av_cntx_out = avcodec_alloc_context3(av_codec_out);
    if (!av_cntx_out)
    {
        cout << "
error in allocating the codec contexts";
        exit(1);
    }
    
    if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
    {
        cout << "
Codec parameter canot copied";
        exit(1);
    }
    if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
    {
        cout << "
error in converting the codec contexts";
        exit(1);
    }
    
    //av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
    av_cntx_out->gop_size = 3;
    av_cntx_out->max_b_frames = 2;
    av_cntx_out->time_base.num = 1;
    av_cntx_out->time_base.den = 30; //
    value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
    if (value < 0)
    {
        cout << "
unable to open the av codec";
        exit(1);
    }
    if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
    {
        av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
    }
    avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
    {
        av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    if (avformat_write_header(ofmt_ctx, NULL) < 0)
    {
        cout << "
error in writing the header context";
        exit(1);
    }
    AVPacket * av_pkt = av_packet_alloc();
    av_init_packet(av_pkt);
    AVFrame * av_frame = av_frame_alloc();
    if (!av_frame)
    {
        cout << "
unable to release the avframe resources";
        exit(1);
    }
    AVFrame * outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
    if (!outFrame)
    {
        cout << "
unable to release the avframe resources for outframe";
        exit(1);
    }

    //int video_outbuf_size;
    //int nbytes = av_image_get_buffer_size(av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32);
    //uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
    //if (video_outbuf == NULL)
    //{
    //  cout << "
unable to allocate memory";
    //  exit(1);
    //}
    
    av_frame->width = avcodec_contx->width;
    av_frame->height = avcodec_contx->height;
    av_frame->format = av_codec_par_in->format;
    outFrame->width = av_cntx_out->width;
    outFrame->height = av_cntx_out->height;
    outFrame->format = av_codec_par_out->format;
    av_frame_get_buffer(av_frame, 0);
    av_frame_get_buffer(outFrame, 0);

    //value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32); // returns : the size in bytes required for src
    //if (value < 0)
    //{
    //  cout << "
error in filling image array";
    //}
    SwsContext* swsCtx = sws_alloc_context();
    if (sws_init_context(swsCtx, NULL, NULL) < 0)
    {
        cout << "
Unable to Initialize the swscaler context sws_context.";
        exit(1);
    }
    swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
                            av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
                            SWS_FAST_BILINEAR, NULL, NULL, NULL);
    if (swsCtx == NULL)
    {
        cout << "
 Cannot alocate SWC Context";
        exit(1);
    }
    int ii = 0;
    int no_frames = 100;
    cout << "
enter No. of frames to capture : ";
    cin >> no_frames;
    int flag;
    int frameFinished;
    int got_picture;
    int frame_index = 0;
    AVPacket * outPacket=av_packet_alloc();
    
    int j = 0;
    while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
    {
        if (ii++ == no_frames)break;
        if (av_pkt->stream_index == VideoStreamIndx)
        {
            value = AvCodecDecodeVideo2(avcodec_contx, av_frame, &frameFinished, av_pkt);
            if (value < 0)
            {
                cout << "unable to decode video";
                exit(1);
            }

            if (frameFinished)// Frame successfully decoded :)
            {
                av_init_packet(outPacket);
                //int iHeight =sws_scale(swsCtx, av_frame->data, av_frame->linesize, 0, avcodec_contx->height, outFrame->data, outFrame->linesize);
                outPacket->data = NULL;    // packet data will be allocated by the encoder
                outPacket->size = 0;

                if (AvCodecEncodeVideo2(av_cntx_out, outPacket, outFrame, &got_picture) < 0)
                {
                    cout << "unable to encode video";
                    exit(1);
                }

                if (got_picture)
                {
                    if (outPacket->pts != AV_NOPTS_VALUE)
                        outPacket->pts = av_rescale_q(outPacket->pts, av_cntx_out->time_base, video_stream->time_base);
                    if (outPacket->dts != AV_NOPTS_VALUE)
                        outPacket->dts = av_rescale_q(outPacket->dts, av_cntx_out->time_base, video_stream->time_base);

                    printf("Write frame %3d (size= %2d)
", j++, outPacket->size / 1000);
                    if (av_write_frame(ofmt_ctx, outPacket) != 0)
                    {
                        cout << "
error in writing video frame";
                    }

                    av_packet_unref(outPacket);
                } // got_picture

                av_packet_unref(outPacket);
            } // frameFinished

        }
    }// End of while-loop

    value = av_write_trailer(ofmt_ctx);
    if (value < 0)
    {
        cout << "
error in writing av trailer";
        exit(1);
    }


    //THIS WAS ADDED LATER
    /*av_free(video_outbuf);*/
    avformat_close_input(&ifmt_ctx);
    if (!ifmt_ctx)
    {
        cout << "
file closed sucessfully";
    }
    else
    {
        cout << "
unable to close the file";
        exit(1);
    }

    avformat_free_context(ifmt_ctx);
    if (!ifmt_ctx)
    {
        cout << "
avformat free successfully";
    }
    else
    {
        cout << "
unable to free avformat context";
        exit(1);
    }
    return 0;
}

我不明白我的代码出了什么问题。 我正在使用Microsoft VS 2019进行编译; 我尝试将av_write_frame更改为av_interleed_write_frame,但没有成功。我注释掉了SWS_SCALE部分,因为它显示了错误的屏幕图像指针。

yuv

推荐答案

是在yuv颜色空间中用零填充帧的结果(YUV值等于000显示为绿色像素)。

您正在配置sws_getContext,但您没有使用它。

对抓取的视频帧进行解码后,结果为BGRA像素格式的帧。
我们应该将帧从BGRA转换为YUV420P像素格式,并将结果写入输出编码器。


执行您的代码示例时,我遇到了一些奇怪的行为。
我尝试使用以下post中的代码片段修复代码。

注意:

  • 我还在学习如何使用FFmpeg的C接口。
    有一些实现详细信息我不确定。
    仍需要进行一些打磨.

更新代码:

#define __STDC_CONSTANT_MACROS
#include<iostream>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
using namespace std;

AVCodecContext* GetCodecContextFromPar(AVCodecParameters* par)
{
    AVCodecContext* cntxt = NULL;
    cntxt = avcodec_alloc_context3(avcodec_find_decoder(par->codec_id));
    avcodec_parameters_to_context(cntxt, par);
    return cntxt;
}

int AvCodecDecodeVideo2(AVCodecContext* avctx, AVFrame* frame, int* got_picture_ptr, const AVPacket* avpkt)
{

    int ret = avcodec_send_packet(avctx, avpkt);
    if (ret < 0)
    {
        return -1;
        *got_picture_ptr = 0;
    }

    while (ret >= 0)
    {
        ret = avcodec_receive_frame(avctx, frame);

    }
    *got_picture_ptr = 1;
    return 0;


}


int main(int argc, char** argv)
{
    //const char* out_filename = "D:\myfolder\to\output\new_out.mp4";
    const char* out_filename = "new_out.mp4";
    avdevice_register_all();
    AVOutputFormat* ofmt = NULL;
    AVInputFormat* ifmt = NULL;
    AVFormatContext* ifmt_ctx = avformat_alloc_context();
    AVFormatContext* ofmt_ctx = avformat_alloc_context();
    AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
    AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
    AVCodecContext* avcodec_contx = NULL;
    AVCodec* av_codec;

    AVStream* video_stream = NULL;
    av_codec_par_out->height = 480;
    av_codec_par_out->width = 640;
    av_codec_par_out->bit_rate = 40000;
    av_codec_par_out->codec_id = AV_CODEC_ID_H264; //AV_CODEC_ID_MPEG4; //Try H.264 instead of MPEG4
    av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
    av_codec_par_out->format = 0;
    av_codec_par_out->sample_aspect_ratio.den = 3;
    av_codec_par_out->sample_aspect_ratio.num = 4;

    AVDictionary* options = NULL;

    av_dict_set(&options, "framerate", "30", 0);
    av_dict_set(&options, "offset_x", "20", 0);
    av_dict_set(&options, "offset_y", "40", 0);
    av_dict_set(&options, "video_size", "640x480", 0);
    //int ret, i;
    ifmt = av_find_input_format("gdigrab");

    if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0)
    {
        cout << "Error in opening file";
        exit(1);
    }
    int VideoStreamIndx = -1;
    avformat_find_stream_info(ifmt_ctx, NULL);
    /* find the first video stream index . Also there is an API available to do the below operations */
    for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) // find video stream position/index.
    {
        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            VideoStreamIndx = i;
            break;
        }

    }
    if (VideoStreamIndx == -1)
    {
        cout << "
unable to find the video stream index. (-1)";
        exit(1);
    }
    av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
    av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
    if (av_codec == NULL)
    {
        cout << "
unable to find the decoder";
        exit(1);
    }
    avcodec_contx = avcodec_alloc_context3(av_codec);

    //Consider using preset and crf
    //av_opt_set(avcodec_contx->priv_data, "preset", "fast", 0);
    //av_opt_set(avcodec_contx->priv_data, "crf", "18", 0);


    if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
    {
        cout << "
error in converting the codec contexts";
        exit(1);
    }

    //av_dict_set
    int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
    if (value < 0)
    {
        cout << "
unable to open the av codec";
        exit(1);
    }
    value = 0;


    ofmt = av_guess_format(NULL, out_filename, NULL);


    if (!ofmt)
    {
        cout << "
error in guessing the video format. try with correct format";
        exit(1);
    }
    avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);

    if (!ofmt_ctx)
    {
        cout << "
error in allocating av format output context";
        exit(1);
    }
    AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
    if (av_codec_out == NULL)
    {
        cout << "
unable to find the encoder";
        exit(1);
    }
    video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
    if (!video_stream)
    {
        cout << "
error in creating a av format new stream";
        exit(1);
    }
    AVCodecContext* av_cntx_out;
    av_cntx_out = avcodec_alloc_context3(av_codec_out);
    if (!av_cntx_out)
    {
        cout << "
error in allocating the codec contexts";
        exit(1);
    }

    if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
    {
        cout << "
Codec parameter canot copied";
        exit(1);
    }
    if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
    {
        cout << "
error in converting the codec contexts";
        exit(1);
    }

    //av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
    av_cntx_out->gop_size = 30;//3; //Use I-Frame frame every second.
    av_cntx_out->max_b_frames = 2;
    av_cntx_out->time_base.num = 1;
    av_cntx_out->time_base.den = 30;

    value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
    if (value < 0)
    {
        cout << "
unable to open the av codec";
        exit(1);
    }
    if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
    {
        av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0);
    }
    avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
    {
        av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    if (avformat_write_header(ofmt_ctx, NULL) < 0)
    {
        cout << "
error in writing the header context";
        exit(1);
    }
    AVPacket* av_pkt = av_packet_alloc();
    //av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
    memset(av_pkt, 0, sizeof(AVPacket)); //???

    AVFrame* av_frame = av_frame_alloc();
    if (!av_frame)
    {
        cout << "
unable to release the avframe resources";
        exit(1);
    }
    AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
    if (!outFrame)
    {
        cout << "
unable to release the avframe resources for outframe";
        exit(1);
    }

    //int video_outbuf_size;
    //int nbytes = av_image_get_buffer_size(av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32);
    //uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
    //if (video_outbuf == NULL)
    //{
    //  cout << "
unable to allocate memory";
    //  exit(1);
    //}

    av_frame->width = avcodec_contx->width;
    av_frame->height = avcodec_contx->height;
    av_frame->format = av_codec_par_in->format;
    outFrame->width = av_cntx_out->width;
    outFrame->height = av_cntx_out->height;
    outFrame->format = av_codec_par_out->format;
    av_frame_get_buffer(av_frame, 0);
    av_frame_get_buffer(outFrame, 0);

    //value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32); // returns : the size in bytes required for src
    //if (value < 0)
    //{
    //  cout << "
error in filling image array";
    //}
    SwsContext* swsCtx = sws_alloc_context();
    if (sws_init_context(swsCtx, NULL, NULL) < 0)
    {
        cout << "
Unable to Initialize the swscaler context sws_context.";
        exit(1);
    }
    swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
        av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
        SWS_FAST_BILINEAR, NULL, NULL, NULL);
    if (swsCtx == NULL)
    {
        cout << "
 Cannot allocate SWC Context";
        exit(1);
    }
    int ii = 0;
    int enc_packet_counter = 0;  //Count encoded frames.
    int no_frames = 100;
    //cout << "
enter No. of frames to capture : ";
    //cin >> no_frames;
    //int flag;
    int frameFinished;
    //int got_picture;
    int frame_index = 0;
    AVPacket* outPacket = av_packet_alloc();

    int j = 0;
    while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
    {
        if (ii++ == no_frames)
            break;

        if (av_pkt->stream_index == VideoStreamIndx)
        {
            //value = AvCodecDecodeVideo2(avcodec_contx, av_frame, &frameFinished, av_pkt);
            //if (value < 0)
            //{
            //    cout << "unable to decode video";
            //    exit(1);
            //} 
            int ret = avcodec_send_packet(avcodec_contx, av_pkt);
            if (ret < 0)
            {
                printf("Error while sending packet");
            }


            frameFinished = true;
            int response = 0;

            //av_frame_unref(av_frame); //???

            //do
            //{
            response = avcodec_receive_frame(avcodec_contx, av_frame);

            if (response < 0) //&& (response != AVERROR(EAGAIN)) && (response != AVERROR_EOF))
            {
                printf("Error while receiving frame from decoder");
                frameFinished = false;
            }
            //}
            //while (response == AVERROR(EAGAIN));


            if (frameFinished)// Frame successfully decoded :)
            {
                //av_init_packet(outPacket); //error C4996: 'av_init_packet': was declared deprecated
                memset(outPacket, 0, sizeof(AVPacket)); //???

                //int iHeight =sws_scale(swsCtx, av_frame->data, av_frame->linesize, 0, avcodec_contx->height, outFrame->data, outFrame->linesize);
                outPacket->data = NULL;    // packet data will be allocated by the encoder
                outPacket->size = 0;

                outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
                if (outPacket->dts != AV_NOPTS_VALUE)
                    outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
                outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
                outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???

                outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);           //???
                outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);  //???
                enc_packet_counter++;


                //Apply color space conversion from BGRA to YUV420p using sws_scale
                ////////////////////////////////////////////////////////////////
                int sts = sws_scale(swsCtx,                 //struct SwsContext *c, 
                    av_frame->data,         //const uint8_t *const srcSlice[],
                    av_frame->linesize,     //const int srcStride[], 
                    0,                      //int srcSliceY, 
                    av_frame->height,       //int srcSliceH,
                    outFrame->data,         //uint8_t *const dst[], 
                    outFrame->linesize);    //const int dstStride[]);

                if (sts < 0)
                {
                    printf("Error while executing sws_scale");
                }
                ////////////////////////////////////////////////////////////////


                int ret = 0;
                do
                {
                    if (ret == AVERROR(EAGAIN))
                    {
                        av_packet_unref(outPacket);
                        ret = avcodec_receive_packet(av_cntx_out, outPacket);
                        if (ret) break; // deal with error

                        outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???

                        av_write_frame(ofmt_ctx, outPacket);
                    }
                    else if (ret != 0)
                    {
                        char str2[] = "";
                        cout << "
Error :" << av_make_error_string(str2, sizeof(str2), ret);
                        return -1;
                    }

                    ret = avcodec_send_frame(av_cntx_out, outFrame);
                } while (ret);


                //if (AvCodecEncodeVideo2(av_cntx_out, outPacket, outFrame, &got_picture) < 0)
                //{
                //    cout << "unable to encode video";
                //    exit(1);
                //}
                //
                //if (got_picture)
                //{
                //    if (outPacket->pts != AV_NOPTS_VALUE)
                //        outPacket->pts = av_rescale_q(outPacket->pts, av_cntx_out->time_base, video_stream->time_base);
                //    if (outPacket->dts != AV_NOPTS_VALUE)
                //        outPacket->dts = av_rescale_q(outPacket->dts, av_cntx_out->time_base, video_stream->time_base);
                //
                //    //Set packet duration
                //    ////////////////////////////////////////////////////////////
                //    //AVRational avg_frame_rate = av_make_q(30, 1);   //30 fps
                //    //int64_t avp_duration = av_cntx_out->time_base.den / av_cntx_out->time_base.num / avg_frame_rate.num * avg_frame_rate.den;
                //    //outPacket->duration = avp_duration;
                //    outPacket->duration = 1; //Since the time base is 1/30, the duration equals 1
                //    ////////////////////////////////////////////////////////////
                //
                //    printf("Write frame %3d (size= %2d)
", j++, outPacket->size / 1000);
                //    if (av_write_frame(ofmt_ctx, outPacket) != 0)
                //    {
                //        cout << "
error in writing video frame";
                //    }
                //
                //    av_packet_unref(outPacket);
                //} // got_picture
                //
                //av_packet_unref(outPacket);
            } // frameFinished

        }
    }// End of while-loop

    // flush the rest of the packets ???
    ////////////////////////////////////////////////////////////
    int ret = 0;
    avcodec_send_frame(av_cntx_out, NULL);
    do
    {
        av_packet_unref(outPacket);
        ret = avcodec_receive_packet(av_cntx_out, outPacket);
        if (!ret)
        {
            outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
            outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
            outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
            av_write_frame(ofmt_ctx, outPacket);
            enc_packet_counter++;
        }
    } while (!ret);
    ////////////////////////////////////////////////////////////


    value = av_write_trailer(ofmt_ctx);
    if (value < 0)
    {
        cout << "
error in writing av trailer";
        exit(1);
    }


    //THIS WAS ADDED LATER
    /*av_free(video_outbuf);*/
    avformat_close_input(&ifmt_ctx);
    if (!ifmt_ctx)
    {
        cout << "
file closed successfully";
    }
    else
    {
        cout << "
unable to close the file";
        exit(1);
    }

    avformat_free_context(ifmt_ctx);
    if (!ifmt_ctx)
    {
        cout << "
avformat free successfully";
    }
    else
    {
        cout << "
unable to free avformat context";
        exit(1);
    }

    //Free codec context.
    ////////////////////////////////////////////////////////////
    avcodec_free_context(&av_cntx_out);

    if (!av_cntx_out)
    {
        cout << "
avcodec free successfully";
    }
    else
    {
        cout << "
unable to free avcodec context";
        exit(1);
    }

    ////////////////////////////////////////////////////////////

    return 0;
}

样框:

如您所见,我也在使用Microsoft Visual Studio 2019:)

这篇关于为什么FFMPEG屏幕记录器输出只显示绿色屏幕?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆