使用FFMPEG解码mp4 / mkv失败 [英] Decoding mp4/mkv using FFMPEG fails

查看:408
本文介绍了使用FFMPEG解码mp4 / mkv失败的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在使用基于最新的FFmpeg git源代码树的项目,并链接到Zeranoe发布的共享DLL,网址为: https://ffmpeg.zeranoe.com/builds/



播放代码工作和循环。它播放h265文件(原始),mpeg,avi和mpg文件。但是,只要将mp4或mkv容器指定为输入文件,无论内部是什么,都会从编解码器中转出大量错误。无论是HEVC还是h264都没关系。

  [h264 @ 00000000xyz]没有找到开始的代码
[ h264 @ 00000000xyz]将输入分割为NAL单元时出错。

为了使一切都很奇怪,ffplay.exe播放这些文件就好了。



我意识到,我可以通过将文件转换为原始格式来解决这个问题,但我想要能够读取和解析mp4文件。由于我正在使用Zeraneo的预构建库,我的猜测是在构建过程中未启用某些内容,但是我希望ffplay也会失败。我需要在format_context或codec_context中设置一个标志,或提供某种过滤器标识符?



播放的电影来自 http://bbb3d.renderfarming.net/download.html http://www.w6rz.net/ http:// www.sample-videos.com/



这些工作:

  big_buck_bunny_480p_surround-fix.avi 
bigbuckbunny_480x272.h265

ffmpeg,请帮我看看有什么问题以及如何解决。如果预构建库是弊端,那么第二个问题是如果有人有一个方便的cmake设置来构建Windows X64和x32调试和发布目标。



这是初始化ffmpeg的原始资料。

  avdevice_register_all(); 
avfilter_register_all();
av_register_all();
avformat_network_init();

格式解析如下:

  m_FormatContext = avformat_alloc_context(); 
if(avformat_open_input(& m_FormatContext,file.GetPath()。ToString()。c_str(),NULL,NULL)!= 0)
{
// std :: cout< ;< 无法打开输入<的std :: ENDL;
success = false;
}
//查找流信息
if(success)
{
if(avformat_find_stream_info(m_FormatContext,NULL)<0)
{
// std :: cout<<<< 无法获取流信息<<的std :: ENDL;
success = false;
}
}

流打开如下:

  m_VideoStream = avstream; 
m_FormatContext = formatContext;
if(m_VideoStream)
{
m_StreamIndex = m_VideoStream-> stream_identifier;
AVCodecParameters * codecpar = m_VideoStream-> codecpar;
if(codecpar)
{
AVCodecID codec_id = codecpar-> codec_id;
m_Decoder = avcodec_find_decoder(codec_id);
if(m_Decoder)
{
m_CodecContext = avcodec_alloc_context3(m_Decoder);
if(m_CodecContext)
{
m_CodecContext-> width = codecpar-> width;
m_CodecContext-> height = codecpar-> height;
m_VideoSize = i3(codecpar-> width,codecpar-> height,1);
success = 0 == avcodec_open2(m_CodecContext,m_Decoder,NULL);
if(success)
{
if(m_CodecContext)
{
int size = av_image_get_buffer_size(format,m_CodecContext-> width,m_CodecContext-> height,1 );
if(size> 0)
{
av_frame = av_frame_alloc();
gl_frame = av_frame_alloc();
uint8_t * internal_buffer =(uint8_t *)av_malloc(size * sizeof(uint8_t)); $($)$($)$($)()((uint8_t **)((AVPicture *)gl_frame-> data),(int *)((AVPicture *)gl_frame-> linesize),internal_buffer,format,m_CodecContext-> width,m_CodecContext - >高度,1);
m_Packet =(AVPacket *)av_malloc(sizeof(AVPacket));
}
}
}
if(!success)
{
avcodec_close(m_CodecContext);
avcodec_free_context(& m_CodecContext);
m_CodecContext = NULL;
m_Decoder = NULL;
m_VideoStream = NULL;
}
}
else
{
m_Decoder = NULL;
m_VideoStream = NULL;
}
}
}
}

在单线程上进行编码:

  do 
{
if(av_read_frame(m_FormatContext,m_Packet) 0)
{
av_packet_unref(m_Packet);
m_AllPacketsSent = true;
}
else
{
if(m_Packet-> stream_index == m_StreamIndex)
{
avcodec_send_packet(m_CodecContext,m_Packet);
}
}

int frame_finished = avcodec_receive_frame(m_CodecContext,av_frame);
if(frame_finished == 0)
{
if(!conv_ctx)
{
conv_ctx = sws_getContext(m_CodecContext-> width,
m_CodecContext- > height,m_CodecContext-> pix_fmt,
m_CodecContext-> width,m_CodecContext-> height,format,SWS_BICUBIC,NULL,NULL,NULL);
}

sws_scale(conv_ctx,av_frame-> data,av_frame-> linesize,0,m_CodecContext-> height,gl_frame-> data,gl_frame-> linesize);

开关(格式)
{
案例AV_PIX_FMT_BGR32_1:
案例AV_PIX_FMT_RGB32_1:
案例AV_PIX_FMT_0BGR32:
案例AV_PIX_FMT_0RGB32:
案例AV_PIX_FMT_BGR32:
案例AV_PIX_FMT_RGB32:
{
m_CodecContext-> bits_per_raw_sample = 32;打破;
}
默认值:
{
FWASSERT(format == AV_PIX_FMT_RGB32,格式更改,更新每个原始样本的位!打破;
}
}


size_t bufferSize = m_CodecContext-> width * m_CodecContext-> height * m_CodecContext-> bits_per_raw_sample / 8;
m_Buffer.Realloc(bufferSize,false,gl_frame-> data [0]);
m_VideoSize = i3(m_CodecContext-> width,m_CodecContext-> height,1);
result = true;
//将图像缓冲区直接发送到锁定的纹理。
// glTexSubImage2D(GL_TEXTURE_2D,0,0,0,codec_ctx-> width,codec_ctx-> height,GL_RGB,GL_UNSIGNED_BYTE, gl_frame->数据[0]);
}

av_packet_unref(m_Packet);
} while(m_Packet-> stream_index!= m_StreamIndex);

m_FrameDecoded = result;

任何见解都表示赞赏!

  m_CodecContext-> width = codecpar- >宽度; 
m_CodecContext-> height = codecpar-> height;

你应该调用 avcodec_parameters_to_context()


I'm using a project based on the latest FFmpeg git source tree, and linking to the shared DLL's published by Zeranoe at https://ffmpeg.zeranoe.com/builds/

The playback code works and loops. It plays back h265 files (raw), mpeg, avi, and mpg files. However as soon as an mp4 or mkv container is specified as input file, regardless of what's inside,a lot of errors are dumped from the codec. It doesn't matter if it's HEVC or h264.

[h264 @ 00000000xyz] No start code is found
[h264 @ 00000000xyz] Error splitting the input into NAL units.

To make everything really strange, ffplay.exe plays these files just fine.

I realize that I can probably fix this by converting files into a raw format first, but I would like to be able to read and parse mp4 files a they are. Since I am using the pre-build libs of Zeraneo, my guess would be that something was not enabled during the build, but then I would expect ffplay to fail too. Do I need to set a flag in the format_context or codec_context, or provide some sort of filter identifier?

Movies that play fine came from http://bbb3d.renderfarming.net/download.html, http://www.w6rz.net/ and http://www.sample-videos.com/

These work:

big_buck_bunny_480p_surround-fix.avi
bigbuckbunny_480x272.h265

Being a total noob at ffmpeg, please help me understand what is wrong and how to fix it. If the pre-build libs are the culprit, then the second question is if someone has a convenient cmake setup to build this for windows X64 and x32 debug and release targets.

Here's the source for initializing ffmpeg for reading

avdevice_register_all();
avfilter_register_all();
av_register_all();
avformat_network_init();

The format is parsed as follows:

m_FormatContext = avformat_alloc_context();
if (avformat_open_input(&m_FormatContext, file.GetPath().ToString().c_str(), NULL, NULL) != 0) 
{
    //std::cout << "failed to open input" << std::endl;
    success = false;
}
// find stream info
if (success)
{
    if (avformat_find_stream_info(m_FormatContext, NULL) < 0) 
    {
        //std::cout << "failed to get stream info" << std::endl;
        success = false;
    }
}

The stream is opened as follows:

m_VideoStream = avstream;
m_FormatContext = formatContext;
if (m_VideoStream)
{
    m_StreamIndex = m_VideoStream->stream_identifier;
    AVCodecParameters *codecpar = m_VideoStream->codecpar;      
    if (codecpar)
    {
        AVCodecID codec_id = codecpar->codec_id;
        m_Decoder = avcodec_find_decoder(codec_id);
        if (m_Decoder)
        {
            m_CodecContext = avcodec_alloc_context3(m_Decoder);
            if (m_CodecContext)
            {
                m_CodecContext->width = codecpar->width;                    
                m_CodecContext->height = codecpar->height;
                m_VideoSize = i3(codecpar->width, codecpar->height,1);
                success = 0 == avcodec_open2(m_CodecContext, m_Decoder, NULL);
                if (success)
                {
                    if(m_CodecContext)
                    {
                        int size = av_image_get_buffer_size(format, m_CodecContext->width, m_CodecContext->height, 1);      
                        if (size > 0)
                        {
                            av_frame = av_frame_alloc();
                            gl_frame = av_frame_alloc();        
                            uint8_t *internal_buffer = (uint8_t *)av_malloc(size * sizeof(uint8_t));
                            av_image_fill_arrays((uint8_t**)((AVPicture *)gl_frame->data), (int*) ((AVPicture *)gl_frame->linesize), internal_buffer, format, m_CodecContext->width, m_CodecContext->height,1);
                            m_Packet = (AVPacket *)av_malloc(sizeof(AVPacket));
                        }
                    }
                }
                if (!success) 
                {
                    avcodec_close(m_CodecContext);
                    avcodec_free_context(&m_CodecContext);
                    m_CodecContext = NULL;
                    m_Decoder = NULL;
                    m_VideoStream = NULL;
                }
            }
            else
            {
                m_Decoder = NULL;
                m_VideoStream = NULL;
            }
        }
    }
}

And dedoding on a single thread:

do 
{
    if (av_read_frame(m_FormatContext, m_Packet) < 0) 
    {
        av_packet_unref(m_Packet);
        m_AllPacketsSent = true;
    }
    else
    {
        if (m_Packet->stream_index == m_StreamIndex) 
        {                   
            avcodec_send_packet(m_CodecContext, m_Packet);
        }
    }

    int frame_finished = avcodec_receive_frame(m_CodecContext, av_frame);
    if (frame_finished == 0) 
    {
        if (!conv_ctx) 
        {
            conv_ctx = sws_getContext(m_CodecContext->width, 
                m_CodecContext->height, m_CodecContext->pix_fmt, 
                m_CodecContext->width, m_CodecContext->height, format, SWS_BICUBIC, NULL, NULL, NULL);
        }

        sws_scale(conv_ctx, av_frame->data, av_frame->linesize, 0, m_CodecContext->height, gl_frame->data, gl_frame->linesize);

        switch(format)
        {
            case AV_PIX_FMT_BGR32_1:
            case AV_PIX_FMT_RGB32_1:
            case AV_PIX_FMT_0BGR32:
            case AV_PIX_FMT_0RGB32:
            case AV_PIX_FMT_BGR32:  
            case AV_PIX_FMT_RGB32:              
            {
                m_CodecContext->bits_per_raw_sample = 32; break;                    
            }
            default:
            {
                FWASSERT(format == AV_PIX_FMT_RGB32, "The format changed, update the bits per raw sample!"); break;
            }
        }


        size_t bufferSize = m_CodecContext->width * m_CodecContext->height * m_CodecContext->bits_per_raw_sample / 8;
        m_Buffer.Realloc(bufferSize, false,  gl_frame->data[0]);
        m_VideoSize = i3(m_CodecContext->width, m_CodecContext->height,1);
        result = true;
        // sends the image buffer straight to the locked texture here..
        // glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, codec_ctx->width, codec_ctx->height, GL_RGB, GL_UNSIGNED_BYTE, gl_frame->data[0]);
    }

    av_packet_unref(m_Packet);
} while (m_Packet->stream_index != m_StreamIndex);

m_FrameDecoded = result;

Any insight is appreciated!

解决方案

Instead of implicitly provide width and height here:

m_CodecContext->width = codecpar->width;                    
m_CodecContext->height = codecpar->height;

you should call avcodec_parameters_to_context().

这篇关于使用FFMPEG解码mp4 / mkv失败的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆