980210 - 为sws_scale提供参数时出了什么问题? [英] 980210 - what is wrong while providing arguments for sws_scale?
问题描述
在下面的代码中,我无法弄清楚出了什么问题:
uint8_t * dstData [4];
int dstLinesize [4];
AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA;
int ret;
// ...
printf(tmp_frame格式:%d(%s)%dx%d \ n,tmp_frame->格式,av_get_pix_fmt_name( (AVPixelFormat)tmp_frame->格式),tmp_frame-> width,tmp_frame-> height);
//以上行打印:tmp_frame格式:23(nv12)480x480
int size = av_image_get_buffer_size(convertToPixFmt,tmp_frame-> width,tmp_frame-> height,1);
uint8_t * buffer =(uint8_t *)av_malloc(size);
ret = av_image_copy_to_buffer(缓冲区,大小,
(const uint8_t * const *)& tmp_frame-> data [i],
(const int *)& tmp_frame - > linesize [i],(AVPixelFormat)tmp_frame->格式,
tmp_frame-> width,tmp_frame-> height,1);
ASSERT(ret> = 0);
ret = av_image_fill_arrays(dstData,dstLinesize,buffer,convertToPixFmt,dest_width,dest_height,1);
ASSERT(ret> = 0);
ret = sws_scale(
convertContext,
dstData,
dstLinesize,
0,
dest_width,
convertedFrame->数据,
convertedFrame-> linesize);
printf(sws_scale返回%d \ n,ret); //打印:sws_scale返回0
ASSERT(ret == tmp_frame-> height);
// ...
这是使用dxva2获取tmp_frame的代码的一部分。我启发了hw_decode.c中的代码,并确保代码中没有错误。 tmp_frame以NV12格式正确制作。我只是在调用sws_scale时发生错误,它是:
错误的src图像指针
所以我不知道如何提供指针以避免出现此错误,并且sws_scale可能正常工作。有什么想法吗?
我更新了提供完整代码的问题:
静态AVBufferRef * hw_device_ctx = NULL;
static enum AVPixelFormat hw_pix_fmt;
静态FILE * output_file = NULL;
int main(int argc,char * argv [])
{
AVFormatContext * input_ctx = NULL;
int video_stream,ret;
AVStream * video = NULL;
AVCodecContext * decoder_ctx = NULL;
AVCodec * decoder = NULL;
AVPacket包;
enum AVHWDeviceType类型;
int i;
if(argc< 2)
{
fprintf(stderr,Usage:%s< input file> \ n,argv [0]);
返回-1;
}
type = av_hwdevice_find_type_by_name(dxva2);
ASSERT(类型!= AV_HWDEVICE_TYPE_NONE);
ASSERT(avformat_open_input(& input_ctx,argv [1],NULL,NULL)== 0);
ASSERT(avformat_find_stream_info(input_ctx,NULL)> = 0);
video_stream = av_find_best_stream(input_ctx,AVMEDIA_TYPE_VIDEO,-1,-1,& decoder,0);
ASSERT(video_stream> = 0);
decoder_ctx = avcodec_alloc_context3(解码器);
ASSERT(decoder_ctx);
video = input_ctx-> streams [video_stream];
ASSERT(avcodec_parameters_to_context(decoder_ctx,video-> codecpar)> = 0);
ASSERT(av_hwdevice_ctx_create(& hw_device_ctx,type,NULL,NULL,0)> = 0);
decoder_ctx-> hw_device_ctx = av_buffer_ref(hw_device_ctx);
ASSERT(avcodec_open2(decoder_ctx,decoder,NULL)> = 0);
printf(视频信息:%dx%d \ n,decoder_ctx-> width,decoder_ctx-> height);
AVFrame * frame = av_frame_alloc();
ASSERT(框架);
AVFrame * sw_frame = av_frame_alloc();
ASSERT(sw_frame);
AVFrame * convertedFrame = av_frame_alloc();
ASSERT(convertedFrame);
AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA;
// int dest_width = 320,dest_height = 200;
int dest_width = decoder_ctx-> width,dest_height = decoder_ctx-> height;
SwsContext * convertContext = sws_getContext(decoder_ctx-> width,decoder_ctx-> height,AV_PIX_FMT_YUV420P,
dest_width,dest_height,convertToPixFmt,
SWS_FAST_BILINEAR,NULL,NULL,NULL);
ASSERT(convertContext);
int convertedFrameAspectBufferSize = avpicture_get_size(convertToPixFmt,dest_width,dest_height);
void * convertedFrameBuffer = av_malloc(convertedFrameAspectBufferSize);
avpicture_fill((AVPicture *)convertedFrame,(uint8_t *)convertedFrameBuffer,convertToPixFmt,dest_width,dest_height);
output_file = fopen(1.out,w +);
for(int i = 0; / * i< 20 * /; i ++)
{
ret = av_read_frame(input_ctx,& packet);
if(ret == AVERROR_EOF)
break;
ASSERT(ret> = 0);
if(video_stream!= packet.stream_index)
继续;
int ret = avcodec_send_packet(decoder_ctx,& packet);
ASSERT(ret> = 0);
// printf(%p,decoder-> hw_configs-> hwaccel);
ret = avcodec_receive_frame(decoder_ctx,frame);
if(ret< 0)
printf(%d \ t%d \ n,i,ret);
AVFrame * tmp_frame;
if(frame-> format> 0)// hw enabled
{
ASSERT(av_hwframe_transfer_data(sw_frame,frame,0)> = 0);
tmp_frame = sw_frame;
}
其他
{
tmp_frame = frame;
}
printf(帧格式:%d(%s)%dx%d \ n,frame->格式,av_get_pix_fmt_name((AVPixelFormat)frame->格式),frame- > width,frame-> height);
printf(sw_frame格式:%d(%s)%dx%d \ n,sw_frame->格式,av_get_pix_fmt_name((AVPixelFormat)sw_frame->格式),sw_frame-> width,sw_frame - >高度);
printf(tmp_frame格式:%d(%s)%dx%d \ n,tmp_frame->格式,av_get_pix_fmt_name((AVPixelFormat)tmp_frame->格式),tmp_frame-> width,tmp_frame - >高度);
/ *
视频信息:480x480
帧格式:53(dxva2_vld)480x480
sw_frame格式:23(nv12)480x480
[swscaler @ 004cb2c0]坏src图像指针
* /
int size = av_image_get_buffer_size(convertToPixFmt,tmp_frame-> width,tmp_frame-> height,1);
uint8_t * buffer =(uint8_t *)av_malloc(size);
ret = av_image_copy_to_buffer(缓冲区,大小,
(const uint8_t * const *)& tmp_frame-> data [i],
(const int *)& tmp_frame - > linesize [i],(AVPixelFormat)tmp_frame->格式,
tmp_frame-> width,tmp_frame-> height,1);
ASSERT(ret> 0);
ret = sws_scale(
convertContext,
tmp_frame-> data,
tmp_frame-> linesize,
0,
dest_width,
convertedFrame-> data,
convertedFrame-> linesize);
printf(sws_scale返回%d \ n,ret);
ASSERT(ret == tmp_frame-> height);
ret = fwrite(convertedFrame-> data,tmp_frame-> height * tmp_frame-> width,1,output_file);
ASSERT(ret == 1);
休息;
}
av_frame_free(& frame);
av_packet_unref(& packet);
avcodec_free_context(& decoder_ctx);
avformat_close_input(& input_ctx);
av_buffer_unref(& hw_device_ctx);
返回0;
}
我的尝试:
我尝试使用更大的已分配块作为缓冲区。我也尝试过其他格式和更改参数。但我无法成功。
通常错误消息比某些初学者编写的代码更正确。所以我认为你的数据或数据类型不匹配。
这里有一些使用sws_scale 的工作代码,可以显示让它工作。
我认为您应该在 sws_scale 电话中查看变量的命名及其用法。
In the following code, I can't figure out what's wrong:
uint8_t *dstData[4]; int dstLinesize[4]; AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA; int ret; // ... printf("tmp_frame format: %d (%s) %dx%d\n", tmp_frame->format, av_get_pix_fmt_name((AVPixelFormat)tmp_frame->format), tmp_frame->width, tmp_frame->height); // The above line prints: tmp_frame format: 23 (nv12) 480x480 int size = av_image_get_buffer_size(convertToPixFmt, tmp_frame->width, tmp_frame->height, 1); uint8_t *buffer = (uint8_t *) av_malloc(size); ret = av_image_copy_to_buffer(buffer, size, (const uint8_t * const *)&tmp_frame->data[i], (const int *)&tmp_frame->linesize[i], (AVPixelFormat)tmp_frame->format, tmp_frame->width, tmp_frame->height, 1); ASSERT(ret >= 0); ret = av_image_fill_arrays(dstData, dstLinesize, buffer, convertToPixFmt, dest_width, dest_height, 1); ASSERT(ret >= 0); ret = sws_scale( convertContext, dstData, dstLinesize, 0, dest_width, convertedFrame->data, convertedFrame->linesize); printf("sws_scale returns %d\n", ret); // prints: sws_scale returns 0 ASSERT(ret == tmp_frame->height); // ...
It's part of a code which uses dxva2 to obtain tmp_frame. I inspired the code from hw_decode.c and am sure that there's no mistake in the code. The tmp_frame is properly made in NV12 format. The error occurs just when I call sws_scale and it's:
bad src image pointers
So I don't know how to provide pointers not to get this error and sws_scale may work properly. Any idea?
I updated the question for giving the complete code:
static AVBufferRef *hw_device_ctx = NULL; static enum AVPixelFormat hw_pix_fmt; static FILE *output_file = NULL; int main(int argc, char *argv[]) { AVFormatContext *input_ctx = NULL; int video_stream, ret; AVStream *video = NULL; AVCodecContext *decoder_ctx = NULL; AVCodec *decoder = NULL; AVPacket packet; enum AVHWDeviceType type; int i; if (argc < 2) { fprintf(stderr, "Usage: %s <input file>\n", argv[0]); return -1; } type = av_hwdevice_find_type_by_name("dxva2"); ASSERT(type != AV_HWDEVICE_TYPE_NONE); ASSERT(avformat_open_input(&input_ctx, argv[1], NULL, NULL) == 0); ASSERT(avformat_find_stream_info(input_ctx, NULL) >= 0); video_stream = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); ASSERT(video_stream >= 0); decoder_ctx = avcodec_alloc_context3(decoder); ASSERT(decoder_ctx); video = input_ctx->streams[video_stream]; ASSERT(avcodec_parameters_to_context(decoder_ctx, video->codecpar) >= 0); ASSERT(av_hwdevice_ctx_create(&hw_device_ctx, type, NULL, NULL, 0) >= 0); decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx); ASSERT(avcodec_open2(decoder_ctx, decoder, NULL) >= 0); printf("video info: %dx%d\n", decoder_ctx->width, decoder_ctx->height); AVFrame *frame = av_frame_alloc(); ASSERT(frame); AVFrame *sw_frame = av_frame_alloc(); ASSERT(sw_frame); AVFrame* convertedFrame = av_frame_alloc(); ASSERT(convertedFrame); AVPixelFormat convertToPixFmt = AV_PIX_FMT_RGBA; //int dest_width = 320, dest_height = 200; int dest_width = decoder_ctx->width, dest_height = decoder_ctx->height; SwsContext* convertContext = sws_getContext(decoder_ctx->width, decoder_ctx->height, AV_PIX_FMT_YUV420P, dest_width, dest_height, convertToPixFmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); ASSERT(convertContext); int convertedFrameAspectBufferSize = avpicture_get_size(convertToPixFmt, dest_width, dest_height); void *convertedFrameBuffer = av_malloc(convertedFrameAspectBufferSize); avpicture_fill((AVPicture*)convertedFrame, (uint8_t *)convertedFrameBuffer, convertToPixFmt, dest_width, dest_height); output_file = fopen("1.out", "w+"); for (int i = 0; /*i < 20*/; i++) { ret = av_read_frame(input_ctx, &packet); if (ret == AVERROR_EOF) break; ASSERT(ret >= 0); if (video_stream != packet.stream_index) continue; int ret = avcodec_send_packet(decoder_ctx, &packet); ASSERT(ret >= 0); //printf("%p", decoder->hw_configs->hwaccel); ret = avcodec_receive_frame(decoder_ctx, frame); if (ret < 0) printf("%d\t%d\n", i, ret); AVFrame *tmp_frame; if (frame->format > 0) // hw enabled { ASSERT(av_hwframe_transfer_data(sw_frame, frame, 0) >= 0); tmp_frame = sw_frame; } else { tmp_frame = frame; } printf("frame format: %d (%s) %dx%d\n", frame->format, av_get_pix_fmt_name((AVPixelFormat)frame->format), frame->width, frame->height); printf("sw_frame format: %d (%s) %dx%d\n", sw_frame->format, av_get_pix_fmt_name((AVPixelFormat)sw_frame->format), sw_frame->width, sw_frame->height); printf("tmp_frame format: %d (%s) %dx%d\n", tmp_frame->format, av_get_pix_fmt_name((AVPixelFormat)tmp_frame->format), tmp_frame->width, tmp_frame->height); /* video info: 480x480 frame format: 53 (dxva2_vld) 480x480 sw_frame format: 23 (nv12) 480x480 [swscaler @ 004cb2c0] bad src image pointers */ int size = av_image_get_buffer_size(convertToPixFmt, tmp_frame->width, tmp_frame->height, 1); uint8_t *buffer = (uint8_t *) av_malloc(size); ret = av_image_copy_to_buffer(buffer, size, (const uint8_t * const *)&tmp_frame->data[i], (const int *)&tmp_frame->linesize[i], (AVPixelFormat)tmp_frame->format, tmp_frame->width, tmp_frame->height, 1); ASSERT(ret > 0); ret = sws_scale( convertContext, tmp_frame->data, tmp_frame->linesize, 0, dest_width, convertedFrame->data, convertedFrame->linesize); printf("sws_scale returns %d\n", ret); ASSERT(ret == tmp_frame->height); ret = fwrite(convertedFrame->data, tmp_frame->height * tmp_frame->width, 1, output_file); ASSERT(ret == 1); break; } av_frame_free(&frame); av_packet_unref(&packet); avcodec_free_context(&decoder_ctx); avformat_close_input(&input_ctx); av_buffer_unref(&hw_device_ctx); return 0; }
What I have tried:
I tried using a larger allocated block as buffer. I also tried other formats and changing arguments. But I couldn't succeed.
Normally the error message is more correct than the code written by some beginner. So I think you have mismatched some data or data types.
Here is some working code with sws_scale which may show you to get it work.
I think you should review the naming of your variables and their usage in the sws_scale call.
这篇关于980210 - 为sws_scale提供参数时出了什么问题?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!