用于播放avi文件的gstreamer代码已挂起 [英] gstreamer code for playing avi file is hanging

查看:115
本文介绍了用于播放avi文件的gstreamer代码已挂起的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我是gstremaer的新手.我已经编写了使用gstreamer播放avi文件的代码.但是在执行代码后不久,它就挂起了,我无法调试出什么问题,请有人帮我. 代码和输出如下:

I am new to gstremaer. I have written a code for playing avi file using gstreamer. But on executing the code it just hangs after a while, I am unable to debug whats the problem, Can someone help me please. The code and the output is as below:

    Code:

    #include<stdio.h>
    #include<gst/gst.h>
    #include<glib.h>

    //Function to process message on bus of pipeline
    gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data);

    //Function to add pad dynamically for ogg demux
void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data);        
void dynamic_decodepad (GstElement* object, GstPad* arg0, gboolean arg1,gpointer user_data);

     GstElement *source, *demuxer, *audio_decoder, *video_decoder, *audio_convertor,*video_convertor, *audio_sink,*video_sink,*audioqueue,*videoqueue;//*audio_demuxer, *video_demuxer,

    int main(int argc,char* argv[])
    {

      GstPipeline *pipeline;
      GstBin *Bin;
      GstBus *bus;
      GMainLoop *Mainloop;

      gst_init (&argc,&argv);

      Mainloop = g_main_loop_new(NULL,FALSE);//NULL to use the current context and False to tell its not in running state

      pipeline = gst_pipeline_new("PIPELINE");
      Bin = GST_BIN(pipeline);
      bus = gst_pipeline_get_bus(pipeline);


      source = gst_element_factory_make("filesrc","file-source");
      g_object_set(G_OBJECT(source),"location",argv[1],NULL);

      demuxer = gst_element_factory_make("avidemux","avi-demuxer");
      audioqueue = gst_element_factory_make("queue","Queue for audio");
      videoqueue = gst_element_factory_make("queue","Queue for video");
      video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");
      audio_convertor = gst_element_factory_make("audioconvert","audio convertor");//"Audio converter","audioconvert");
      video_convertor = gst_element_factory_make("videoscale","video convertor");//"Audio converter","audioconvert");
      audio_sink = gst_element_factory_make("autoaudiosink","Auto audio sink");
      video_sink = gst_element_factory_make("xvimagesink","XV video sink ");

      if(!source || !demuxer || !audioqueue || !videoqueue || !video_decoder ||!audio_convertor || !video_convertor || !audio_sink || !video_sink  )
      {   g_print("Could not not create element\n");
        return 0;
      }
      gst_bin_add(Bin,source);
      gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);

      gst_element_link(source,demuxer);
      gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
      gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);

      g_signal_connect(demuxer,"pad-added",G_CALLBACK(dynamic_addpad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
      g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);//demuxer and decoder are passed as instance and data as pads of both the elements are linked in dynamic_addpad
      gst_bus_add_watch(bus,process_message,Mainloop); //Mainloop is passed as user data as in the process_message actions are taken on the loop
      g_object_unref(bus);
      g_print("In playing state\n");
      gst_element_set_state(pipeline,GST_STATE_PLAYING);//Pipeline is also a bin and bin is also an element at abstract level and hence gst_element_set_state call is used to set state of pipeline.

      g_main_loop_run(Mainloop);
      g_print("In playing state2\n");
      gst_element_set_state(pipeline,GST_STATE_NULL);
      g_object_unref(G_OBJECT(pipeline));
    }



    //Function to process message on bus of pipeline
    gboolean process_message(GstBus *bus, GstMessage *msg,gpointer data)
    {
      GError *error;
      gchar *debug;
      GMainLoop *loop = (GMainLoop *)data;

      g_print(" In process message msg->type : %d\n",GST_MESSAGE_TYPE(msg));
      switch(GST_MESSAGE_TYPE(msg))
      {
        case   GST_MESSAGE_UNKNOWN :
                g_print("GST_MESSAGE_UNKNOWN \n");
                break;
        case   GST_MESSAGE_EOS     :
                g_print("GST_MESSAGE_EOS \n");
                g_main_loop_quit(loop);
                break;
        case   GST_MESSAGE_ERROR   :
                g_print("GST_MESSAGE_ERROR \n");
                gst_message_parse_error (msg, &error, &debug);
                g_free(debug);
                //if(!error)
                {
                  g_print("GST_MESSAGE_ERROR message : %s \n",error->message);
                }
                g_main_loop_quit(loop);
                break;
        case   GST_MESSAGE_WARNING :
                g_print("GST_MESSAGE_WARNING  \n");
                break;
        case   GST_MESSAGE_INFO    :
                g_print("GST_MESSAGE_INFO \n");
                break;
        case   GST_MESSAGE_TAG     :
                g_print("GST_MESSAGE_TAG \n");
                break;
        case   GST_MESSAGE_BUFFERING:
                g_print("GST_MESSAGE_BUFFERING \n");
                break;
        case   GST_MESSAGE_STATE_CHANGED:
                g_print("GST_MESSAGE_STATE_CHANGED \n");
                break;
        default : 
                g_print("default \n");
                break;

      }
      return TRUE; //returns true always as it has to be always registered returning false will deregister the function
    }

    //Function to add pad dynamically for ogg demux
    void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
    {
      GstPad *audiodemuxsink;
      GstPad *videodemuxsink;
      GstElement *decoder = (GstElement *)data;
      g_print(" In dynamic ADDING PAD\n");

      audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
      gst_pad_link(pad,audiodemuxsink );
      videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
      gst_pad_link(pad,videodemuxsink );
      g_print(" In dynamic ADDING PAD2\n");

    }



    void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
    {
      GstPad *videoconvertsink;
      GstPad *audioconvertsink ;
      g_print(" In dynamic_decodepad ADDING PAD\n");

      videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
      gst_pad_link(pad,videoconvertsink);
      audioconvertsink  = gst_element_get_static_pad(audio_convertor,"sink");
      gst_pad_link(pad,audioconvertsink  );
      g_print(" In dynamic_decodepad ADDING PAD2\n");

    }


    Output:
    In playing state
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 8192
    default 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 8192
    default 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 8192
    default 
     In process message msg->type : 8192
    default 
     In dynamic ADDING PAD
     In dynamic ADDING PAD2
     In dynamic ADDING PAD
     In dynamic ADDING PAD2
     In process message msg->type : 16
    GST_MESSAGE_TAG 
     In process message msg->type : 16
    GST_MESSAGE_TAG 
     In process message msg->type : 16
    GST_MESSAGE_TAG 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In dynamic_decodepad ADDING PAD
     In dynamic_decodepad ADDING PAD2
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 
     In process message msg->type : 64
    GST_MESSAGE_STATE_CHANGED 

此时挂起. 任何帮助表示赞赏. 预先感谢.

It hangs at this point. Any help is appreciated. Thanks in advance.

推荐答案

您的代码在某些方面是错误的,这就是为什么我的回答如此之长的原因.

Your code is wrong in several ways, that is why my answer is so long.

首先,gst_pipeline_new返回GstElement*而不是GstPipeline*:

-  pipeline = gst_pipeline_new("PIPELINE");
+  GstElement *pipeline = gst_pipeline_new("PIPELINE");
   Bin = GST_BIN(pipeline);
-  bus = gst_pipeline_get_bus(pipeline);
+  bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));

然后,您的管道是错误的:您尝试使用一个decodebin解码两个流(音频和视频),但是您需要两个.创建它,不要忘记将其添加到垃圾箱:

Then, your pipeline is wrong: you trying to decode both streams (audio and video) with one decodebin but you need two. Create it and don't forget to add it to the bin:

   videoqueue = gst_element_factory_make("queue","Queue for video");
+  audio_decoder = gst_element_factory_make("decodebin","a_decodebin");
   video_decoder = gst_element_factory_make("decodebin","decoderbin");//"Vorbis audio decoder","vorbis");

-  gst_bin_add_many(Bin,demuxer,audioqueue,videoqueue,audio_convertor,video_decoder,video_convertor,audio_sink,video_sink,NULL);
+  gst_bin_add_many(
+    Bin,
+    demuxer,
+    audioqueue,videoqueue,
+    audio_decoder,audio_convertor,
+    video_decoder,video_convertor,
+    audio_sink,video_sink,
+    NULL);

而且,顺便说一下,最好不要使用decodebin2,因为decodebin已过时.

And, by the way, it's better to use decodebin2 as decodebin is deprecated.

然后,您将动态链接一些元素:将多路分配器排入队列,将decodebin转换为转换器.因此,您不应该使用gst_element_link_many在和转换器之间创建链接:

Then you linking some elements dynamically: demuxer to queue and decodebin to convertors. Hence you should not create link between decodebin and convertors with gst_element_link_many:

   gst_element_link(source,demuxer);
-  gst_element_link_many(audioqueue,video_decoder,audio_convertor,audio_sink,NULL);
-  gst_element_link_many(videoqueue,video_decoder,video_convertor,video_sink,NULL);
+  gst_element_link_many(audioqueue,audio_decoder,NULL);
+  gst_element_link_many(audio_convertor,audio_sink,NULL);
+  gst_element_link_many(videoqueue,video_decoder,NULL);
+  gst_element_link_many(video_convertor,video_sink,NULL);

当然,当我们添加audio_decoder encodebin时,我们需要处理它的填充创建信号:

And of course, as we added audio_decoder decodebin, we need to handle it's pad creation signal:

+  g_signal_connect(audio_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);
   g_signal_connect(video_decoder,"new-decoded-pad",G_CALLBACK(dynamic_decodepad),NULL);

现在我们处于最有趣的部分.

And now we are at the most interesting part.

void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
  GstPad *audiodemuxsink;
  GstPad *videodemuxsink;
  GstElement *decoder = (GstElement *)data;
  g_print(" In dynamic ADDING PAD\n");

  audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
  gst_pad_link(pad,audiodemuxsink );
  videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
  gst_pad_link(pad,videodemuxsink );
  g_print(" In dynamic ADDING PAD2\n");
}

这是完全错误的!每次创建打击垫时都会调用dynamic_addpad. avidemux通常创建两个填充(每个数据流一个):"audio_00"和"video_00".因此,dynamic_addpad将被调用两次,我们需要根据打击垫名称来区分要链接的内容:

This is completely wrong! dynamic_addpad is called on each pad creation. avidemux commonly creates two pads (one for each data stream): "audio_00" and "video_00". So, dynamic_addpad will be called twice and we need to distinguish what to link depending on pad name:

void dynamic_addpad(GstElement *element, GstPad *pad, gpointer data)
{
  char* pad_name = gst_pad_get_name(pad);
  g_print(" In dynamic ADDING PAD %s\n", pad_name);

  if (g_str_has_prefix(pad_name,"audio")) {
    GstPad *audiodemuxsink = gst_element_get_static_pad(audioqueue,"sink");
    gst_pad_link(pad,audiodemuxsink );
  }
  else if (g_str_has_prefix(pad_name,"video")) {
    GstPad *videodemuxsink = gst_element_get_static_pad(videoqueue,"sink");
    gst_pad_link(pad,videodemuxsink );
  }
  g_free (pad_name);
}

dynamic_decodepad几乎相同.因为只有一个src pad是由encodebin创建的,所以为video_decoderaudio_decoder创建单独的处理程序将更加容易. 但是出于教学上的原因,我将在一个功能中执行此操作.现在,我们可以通过其盖子来区分要连接到垫的元素.

Almost the same is for dynamic_decodepad. As it's only one src pad is created by decodebin, it will be easier to create separate handlers for video_decoder and audio_decoder.
But for pedagogical reasons I will do it in one function. Now we can distinguish which element to connect to pad by it's caps.

void dynamic_decodepad (GstElement* object, GstPad* pad, gboolean arg1,gpointer user_data)
{
  GstPad* videoconvertsink = gst_element_get_static_pad(video_convertor,"sink");
  if (gst_pad_can_link(pad,videoconvertsink)) {
    gst_pad_link(pad,videoconvertsink);
  }

  GstPad* audioconvertsink  = gst_element_get_static_pad(audio_convertor,"sink");
  if (gst_pad_can_link(pad,audioconvertsink)) {
    gst_pad_link(pad,audioconvertsink);
  }
}

gst_pad_can_linkdynamic_addpath中将不起作用,因为可以将query元素都连接到"audio_00"和"video_00".

gst_pad_can_link will not work in dynamic_addpath because it's possible to connect query element both to "audio_00" and "video_00".

就是这样.不要犹豫,问您是否还有其他问题.

That's it. Don't hesitate to ask if you have other questions.

这篇关于用于播放avi文件的gstreamer代码已挂起的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆