渲染解码帧的最佳方法是什么? [英] What's the best method for rendering a decoded frame?

查看:74
本文介绍了渲染解码帧的最佳方法是什么?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

在下面的代码中,我需要知道是否使用了正确的方法来渲染解码和重新缩放后获得的帧。此外,我得到例外。我不知道异常是什么,因为我在一段时间后看到红色x而不是图像。

In the following code, I need to know whether I used a proper method for rendering frames obtained after decoding and rescaling. Besides, I get exception. I don't know what the exception is, since I see red x instead of images after some time.

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Runtime.InteropServices;
using System.IO;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Diagnostics;
using FFmpeg.AutoGen;
using System.Threading;
using System.Threading.Tasks;
using System.Drawing.Imaging;

namespace player.cs
{
	public partial class Form1 : Form
	{
		private bool _bPlaying, _bInfoCtrlsUpdated;
		private string _path;
		private string _resolution;
		private string _timebase;
		private string _codecId;
		private Bitmap _bmp;
		
		public Form1()
		{
			InitializeComponent();
			backgroundWorker1.DoWork += backgroundWorker1_DoWork;
			backgroundWorker1.ProgressChanged += backgroundWorker1_ProgressChanged;
			_path = "1.mp4";
			tbPath.Text = _path;
		}

		unsafe void PlayVideoFile(DoWorkEventArgs e)
		{
			AVBufferRef* hw_device_ctx;
			Debug.Assert(FFmpegInvoke.av_hwdevice_ctx_create(&hw_device_ctx,
							 FFmpeg.AutoGen.AVHWDeviceType.AV_HWDEVICE_TYPE_DXVA2, null, null, 0) == 0);
			AVFormatContext* input_ctx = null;
			Debug.Assert(FFmpegInvoke.avformat_open_input(&input_ctx, _path, null, null) == 0);
			Debug.Assert(FFmpegInvoke.avformat_find_stream_info(input_ctx, null) >= 0);
			AVCodec* decoder = null;
			int video_stream = FFmpegInvoke.av_find_best_stream(input_ctx, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
			Debug.Assert(video_stream >= 0);
			AVCodecContext* decoder_ctx = null;
			decoder_ctx = FFmpegInvoke.avcodec_alloc_context3(decoder);
			Debug.Assert(decoder_ctx != null);
			AVStream* video = null;
			video = input_ctx->streams[video_stream];
			Debug.Assert(FFmpegInvoke.avcodec_parameters_to_context(decoder_ctx, video->codecpar) >= 0);
			decoder_ctx->hw_device_ctx = FFmpegInvoke.av_buffer_ref(hw_device_ctx);
			Debug.Assert(FFmpegInvoke.avcodec_open2(decoder_ctx, decoder, null) >= 0);
			AVFrame* frame = FFmpegInvoke.av_frame_alloc();
			Debug.Assert(frame != null);
			AVPacket packet;
			_resolution = string.Format("{0}x{1}", decoder_ctx->width, decoder_ctx->height);
			_timebase = string.Format("{0}/{1}", decoder_ctx->time_base.num, decoder_ctx->time_base.den);
			_codecId = decoder_ctx->codec_id.ToString();
			AVPixelFormat convertToPixFmt = AVPixelFormat.AV_PIX_FMT_RGBA;
			int dest_width = pictureBox1.Width, dest_height = pictureBox1.Width;
			SwsContext* convertContext = FFmpegInvoke.sws_getContext(
				decoder_ctx->width, decoder_ctx->height, AVPixelFormat.AV_PIX_FMT_YUV420P,
				dest_width, dest_height, convertToPixFmt,
				FFmpegInvoke.SWS_FAST_BILINEAR, null, null, null);
			AVFrame* convertedFrame = FFmpegInvoke.av_frame_alloc();
			int convertedFrameAspectBufferSize = FFmpegInvoke.avpicture_get_size(convertToPixFmt, dest_width, dest_height);
			void* convertedFrameBuffer = FFmpegInvoke.av_malloc((uint)convertedFrameAspectBufferSize);
			FFmpegInvoke.avpicture_fill((AVPicture*)convertedFrame, (byte*)convertedFrameBuffer, convertToPixFmt, dest_width, dest_height);
			while (true)
			{
				while (true)
				{
					backgroundWorker1.ReportProgress((int)(video->cur_dts * 100 / video->duration));
					if (backgroundWorker1.CancellationPending)
						break;
					int ret = FFmpegInvoke.av_read_frame(input_ctx, &packet);
					if (ret == FFmpegInvoke.AVERROR_EOF)
						break;
					Debug.Assert(ret >= 0);
					if (video_stream != packet.stream_index)
						continue;
					ret = FFmpegInvoke.avcodec_send_packet(decoder_ctx, &packet);
					Debug.Assert(ret >= 0);
					ret = FFmpegInvoke.avcodec_receive_frame(decoder_ctx, frame);
					if (ret < 0)
						continue;
					ret = FFmpegInvoke.sws_scale(
						convertContext,
						&frame->data_0,
						frame->linesize,
						0,
						frame->height,
						&convertedFrame->data_0,
						convertedFrame->linesize);
					Debug.Assert(ret >= 0);
					var bmp = new Bitmap(
						dest_width,
						dest_height,
						convertedFrame->linesize[0],
						PixelFormat.Format32bppPArgb,
						new IntPtr(convertedFrame->data_0));
					pictureBox1.Image = bmp;
				}
				if (backgroundWorker1.CancellationPending)
				{
					e.Cancel = true;
					break;
				}
				Debug.Assert(FFmpegInvoke.av_seek_frame(input_ctx, video_stream, 0, FFmpegInvoke.AVSEEK_FLAG_FRAME) == 0);
			}
			FFmpegInvoke.av_frame_free(&frame);
			FFmpegInvoke.av_packet_unref(&packet);
			FFmpegInvoke.avcodec_free_context(&decoder_ctx);
			FFmpegInvoke.avformat_close_input(&input_ctx);
			FFmpegInvoke.av_buffer_unref(&hw_device_ctx);
		}

		private void button1_Click(object sender, EventArgs e)
		{
			var dlg = new OpenFileDialog();
			if (dlg.ShowDialog(this) == DialogResult.OK)
				_path = dlg.FileName;
			tbPath.Text = _path;
		}

		private void button2_Click(object sender, EventArgs e)
		{
			if (_bPlaying)
				backgroundWorker1.CancelAsync();
			else
				backgroundWorker1.RunWorkerAsync();
			button2.Text = _bPlaying ? "Play" : "Pause";
			_bPlaying = !_bPlaying;
		}

		void Play(DoWorkEventArgs e)
		{
			_bInfoCtrlsUpdated = false;
			PlayVideoFile(e);
		}

		private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
		{
			Play(e);
		}

		private void backgroundWorker1_ProgressChanged(object sender, ProgressChangedEventArgs e)
		{
			if (!_bInfoCtrlsUpdated)
			{
				tbResolution.Text = _resolution;
				tbTimebase.Text = _timebase;
				tbCodecID.Text = _codecId;
				_bInfoCtrlsUpdated = true;
			}
			progressBar1.Value = e.ProgressPercentage;
		}
	}
}





我尝试了什么:



我试过LockBits不是每次都为每一帧创建一个新的Bitmap实例,但它没有用。此外,我不知道它是否是一个很好的解决方案。



What I have tried:

I tried LockBits not to create a new instance of Bitmap every time for each frame, but it didn't work. Besides, I don't know whether it has been a good solution.

推荐答案

您正在尝试在不是的线程上执行UI(用户界面)操作UI线程。



在backgroundWorker1_ProgressChanged方法中创建和显示Bitmap或使用this.Invoke在Form UI线程上执行代码并生成Bitmap并显示在调用的方法。



您还可以从后台线程获取pictureBox1的宽度和高度 - 考虑缓存这些并在调整pictureBox1大小时更改它们。
You are trying to perform UI(User Interface) operations on a thread that is not the UI thread.

Either create and display the Bitmap in the backgroundWorker1_ProgressChanged method or use this.Invoke to execute code on the Form UI thread and generate the Bitmap and display it in the method invoked.

You also get the width and height of the pictureBox1 from the background thread - consider caching these and changing them when pictureBox1 is resized.


这篇关于渲染解码帧的最佳方法是什么?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆