视频来自PIPE->;YUV with libAV->;RGB with SWS_Scale->;用Qt绘制 [英] Video from pipe->YUV with libAV->RGB with sws_scale->Draw with Qt
本文介绍了视频来自PIPE->;YUV with libAV->;RGB with SWS_Scale->;用Qt绘制的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!
问题描述
我需要从管道或插座解码视频,然后转换它的图像集和绘制与Qt(4.8.5!!)。 我正在使用libAV的默认示例并添加我需要的内容。
以下是我的代码:
AVCodec *codec;
AVCodecContext *codecContext= NULL;
int frameNumber, got_picture, len;
FILE *f;
AVFrame *avFrame, *avFrameYUV, *avFrameRGB;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
av_init_packet(&avpkt);
f = fopen("/tmp/test.mpg", "rb");
if (!f) {
fprintf(stderr, "could not open /tmp/test.mpg
");
exit(1);
}
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
/* decode until eof */
avpkt.data = inbuf;
avpkt.size = fread(inbuf, 1, INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE, f);
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "codec not found
");
exit(1);
}
codecContext = avcodec_alloc_context3(codec);
codecContext->get_format = &my_get_format;
avFrameYUV = avcodec_alloc_frame();
avFrameRGB = avcodec_alloc_frame();
if(codec->capabilities&CODEC_CAP_TRUNCATED)
codecContext->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(codecContext, codec, NULL) < 0) {
fprintf(stderr, "could not open codec
");
exit(1);
}
/* the codec gives us the frame size, in samples */
int srcX = 352; //Size of output picture in example
int srcY = 288;
struct SwsContext *swsContext = sws_getContext(srcX, srcY, AV_PIX_FMT_YUV420P,
srcX, srcY, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
frameNumber = 0;
for(;;) {
if (avpkt.size == 0)
break;
avpkt.data = inbuf;
while (avpkt.size > 0) {
len = avcodec_decode_video2(codecContext, avFrameYUV, &got_picture, &avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d
", frameNumber);
exit(1);
}
if (got_picture) {
printf("saving frame %3d
", frameNumber);
fflush(stdout);
sws_scale(swsContext, avFrameYUV->data, avFrameYUV->linesize, 0, dstY, avFrameRGB->data, avFrameRGB->linesize);
myImage = new QImage(avFrameRGB->data[0], srcX, srcY, QImage::Format_RGB32);
emit update(myImage); // Here i draw it to the screen
usleep(50000);
frameNumber++;
}
avpkt.size -= len;
avpkt.data += len;
}
}
avpkt.data = NULL;
avpkt.size = 0;
len = avcodec_decode_video2(codecContext, avFrameYUV, &got_picture, &avpkt);
if (got_picture) {
printf("saving last frame %3d
", frameNumber);
fflush(stdout);
frameNumber++;
}
avcodec_close(codecContext);
av_free(codecContext);
avcodec_free_frame(&avFrameRGB);
avcodec_free_frame(&avFrameYUV);
现在它不起作用了:"[swscaler@0xb0005460]错误的DST图像指针",但问题是:
-我在全球范围内做错了什么?
-我需要AVPicture吗?喜欢here?
-QImage是画框的最佳素材吗?
-我给了SWS_Scale()两个不同的初始化帧,它崩溃了。为什么?
推荐答案
我会这样做:
AVPicture m_Rgb;
int srcX = codecContext->width;
int srcY = codecContext->height;
struct SwsContext *swsContext = sws_getContext(srcX, srcY, codecContext->pix_fmt, srcX, srcY, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
m_Rgb.linesize[0] = srcX * 4;
m_Rgb.data[0] = (uint8_t*)malloc( m_Rgb.linesize[0] * srcY );
sws_scale(swsContext, avFrameYUV->data, avFrameYUV->linesize, 0, srcY, m_Rgb.data, m_Rgb.linesize );
QImage image( m_Rgb.data[0], srcX, srcY, QImage::Format_RGB32 );
image = image.copy(); // This makes a deep copy of the pixels. Currently image just has a copy of the m_Rgb.data[0] pointer
您可以反复使用m_rgb,只需在最后将其删除即可。
这篇关于视频来自PIPE->;YUV with libAV->;RGB with SWS_Scale->;用Qt绘制的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!
查看全文