C ++ - 在ffmpeg中应用过滤器 [英] C++ - applying filter in ffmpeg
问题描述
AVFilter * filter = avfilter_get_by_name(yadif);
之后,我打开过滤器上下文:
AVFilterContext * filter_ctx;
avfilter_open(& filter_ctx,filter,NULL);
我的第一个问题是关于这个功能。 Visual Studio警告我关于 avfilter_open
已被弃用。
之后,我做:
avfilter_init_str(filter_ctx,yadif = 1:-1);
总是失败。我试过 1:-1
而不是 yadif = 1:-1
,但总是失败,我应该使用什么参数?
编辑:值 1
或 2
,例如,它有效。调试它,我发现使用这个值之一,函数使用 mode = 1
或 mode = 2
。这些值的解释在这个链接。
然后,我有一个 AVFrame *框架
,这是我要去隔行的框架。当最后一句话工作时,我将有过滤器和他的上下文初始化。
感谢您的帮助。
我认为你的问题是一年多的时间,但最近我不得不使用隔行的DVB-TS流,所以我可以帮助任何人遇到这个问题。 >
这些片段来自我写的完成的播放器
初始化过滤器图:
void VideoManager :: init_filter_graph(AVFrame * frame){
if(filter_initialised)return;
int result;
AVFilter * buffer_src = avfilter_get_by_name(缓冲区);
AVFilter * buffer_sink = avfilter_get_by_name(buffersink);
AVFilterInOut * inputs = avfilter_inout_alloc();
AVFilterInOut * outputs = avfilter_inout_alloc();
AVCodecContext * ctx = ffmpeg.vid_stream.context;
char args [512];
int frame_fix = 0; //修复某些流上的不良宽度
if(frame-> width< 704)frame_fix = 2;
else if(frame-> width> 704)frame_fix = -16;
snprintf(args,sizeof(args),
video_size =%dx%d:pix_fmt =%d:time_base =%d /%d:pixel_aspect =%d /%d ,
frame-> width + frame_fix,
frame-> height,
frame-> format,// ctx-> pix_fmt,
ctx-> time_base .num,
ctx-> time_base.den,
ctx-> sample_aspect_ratio.num,
ctx-> sample_aspect_ratio.den);
const char * description =yadif = 1:-1:0;
LOGD(Filter:%s - Settings:%s,description,args);
filter_graph = avfilter_graph_alloc();
result = avfilter_graph_create_filter(& filter_src_ctx,buffer_src,in,args,NULL,filter_graph);
if(result< 0){
LOGI(Filter graph - Unable to create buffer source);
返回;
}
AVBufferSinkParams * params = av_buffersink_params_alloc();
枚举AVPixelFormat pix_fmts [] = {AV_PIX_FMT_GRAY8,AV_PIX_FMT_NONE};
params-> pixel_fmts = pix_fmts;
result = avfilter_graph_create_filter(& filter_sink_ctx,buffer_sink,out,NULL,params,filter_graph);
av_free(params);
if(result< 0){
LOGI(Filter graph - Unable to create buffer sink);
返回;
}
inputs-> name = av_strdup(out);
inputs-> filter_ctx = filter_sink_ctx;
inputs-> pad_idx = 0;
inputs-> next = NULL;
outputs-> name = av_strdup(in);
outputs-> filter_ctx = filter_src_ctx;
输出 - > pad_idx = 0;
outputs-> next = NULL;
result = avfilter_graph_parse_ptr(filter_graph,description,& inputs,& outputs,NULL);
if(result< 0)LOGI(avfilter_graph_parse_ptr ERROR);
result = avfilter_graph_config(filter_graph,NULL);
if(result< 0)LOGI(avfilter_graph_config ERROR);
filter_initialised = true;
}
当从流中处理视频数据包时,检查它是否是隔行扫描帧并将帧发送到过滤器。然后,过滤器会将去隔行扫描的帧返回给您。
void FFMPEG :: process_video_packet(AVPacket * pkt){
int got;
AVFrame * frame = vid_stream.frame;
avcodec_decode_video2(vid_stream.context,frame,& got,pkt);
if(got){
if(!frame-> interlaced_frame){// not interlaced
Video.add_av_frame(frame,0);
} else {
if(!Video.filter_initialised){
Video.init_filter_graph(frame);
}
av_buffersrc_add_frame_flags(Video.filter_src_ctx,frame,AV_BUFFERSRC_FLAG_KEEP_REF);
int c = 0;
while(true){
AVFrame * filter_frame = ffmpeg.vid_stream.filter_frame;
int result = av_buffersink_get_frame(Video.filter_sink_ctx,filter_frame);
if(result == AVERROR(EAGAIN)|| result == AVERROR(AVERROR_EOF))break;
if(result< 0)return;
Video.add_av_frame(filter_frame,c ++);
av_frame_unref(filter_frame);
}
}
}
}
希望这有助于任何人,因为找到关于ffmpeg的信息是艰难的。
I'm trying to deinterlace a frame using ffmpeg (latest release). Related with this question, I can get the filter I want using this sentence:
AVFilter *filter = avfilter_get_by_name("yadif");
After that, I open the filter context as:
AVFilterContext *filter_ctx;
avfilter_open(&filter_ctx, filter, NULL);
My first question is about this function. Visual Studio warns me about avfilter_open
is deprecated. Which is the alternative?
After that, I do:
avfilter_init_str(filter_ctx, "yadif=1:-1");
And always fails. I've tried "1:-1
" instead "yadif=1:-1
", but always fails too, what parameter I should use?
EDIT: A value of "1
" or "2
", for example, it works. Debuging it, I found that with one of this values, the function uses mode=1
or mode=2
. The explanation of those values is in this link.
Then, I have a AVFrame *frame
that is the frame I want to deinterlace. When the last sentence work, I'll have the filter and his context init. How do I apply this filter to my frame?
Thanks for your help.
I undertsnad your question is over a year now but recently I had to work with interlaced DVB-TS streams so I might be able to help anybody else coming across this subject.
These snippets are from a finished player I've written
Initialise the filter graph:
void VideoManager::init_filter_graph(AVFrame *frame) {
if (filter_initialised) return;
int result;
AVFilter *buffer_src = avfilter_get_by_name("buffer");
AVFilter *buffer_sink = avfilter_get_by_name("buffersink");
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterInOut *outputs = avfilter_inout_alloc();
AVCodecContext *ctx = ffmpeg.vid_stream.context;
char args[512];
int frame_fix = 0; // fix bad width on some streams
if (frame->width < 704) frame_fix = 2;
else if (frame->width > 704) frame_fix = -16;
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
frame->width + frame_fix,
frame->height,
frame->format,// ctx->pix_fmt,
ctx->time_base.num,
ctx->time_base.den,
ctx->sample_aspect_ratio.num,
ctx->sample_aspect_ratio.den);
const char *description = "yadif=1:-1:0";
LOGD("Filter: %s - Settings: %s", description, args);
filter_graph = avfilter_graph_alloc();
result = avfilter_graph_create_filter(&filter_src_ctx, buffer_src, "in", args, NULL, filter_graph);
if (result < 0) {
LOGI("Filter graph - Unable to create buffer source");
return;
}
AVBufferSinkParams *params = av_buffersink_params_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
params->pixel_fmts = pix_fmts;
result = avfilter_graph_create_filter(&filter_sink_ctx, buffer_sink, "out", NULL, params, filter_graph);
av_free(params);
if (result < 0) {
LOGI("Filter graph - Unable to create buffer sink");
return;
}
inputs->name = av_strdup("out");
inputs->filter_ctx = filter_sink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
outputs->name = av_strdup("in");
outputs->filter_ctx = filter_src_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
result = avfilter_graph_parse_ptr(filter_graph, description, &inputs, &outputs, NULL);
if (result < 0) LOGI("avfilter_graph_parse_ptr ERROR");
result = avfilter_graph_config(filter_graph, NULL);
if (result < 0) LOGI("avfilter_graph_config ERROR");
filter_initialised = true;
}
When processing the video packets from the stream, check if it is an interlaced frame and send the frame off to the filter. The filter will then return the de-interlaced frames back to you.
void FFMPEG::process_video_packet(AVPacket *pkt) {
int got;
AVFrame *frame = vid_stream.frame;
avcodec_decode_video2(vid_stream.context, frame, &got, pkt);
if (got) {
if (!frame->interlaced_frame) { // not interlaced
Video.add_av_frame(frame, 0);
} else {
if (!Video.filter_initialised) {
Video.init_filter_graph(frame);
}
av_buffersrc_add_frame_flags(Video.filter_src_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
int c = 0;
while (true) {
AVFrame *filter_frame = ffmpeg.vid_stream.filter_frame;
int result = av_buffersink_get_frame(Video.filter_sink_ctx, filter_frame);
if (result == AVERROR(EAGAIN) || result == AVERROR(AVERROR_EOF)) break;
if (result < 0) return;
Video.add_av_frame(filter_frame, c++);
av_frame_unref(filter_frame);
}
}
}
}
Hope this helps anyone because finding information about ffmpeg is tough going.
这篇关于C ++ - 在ffmpeg中应用过滤器的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!