天天看点

ffmpeg+sdl2.0编写最简单的视频播放器

花了好几天的时间,终于把这东东搞出来了

sdl2相比sdl1,api有了比较大的变化,尤其是绘图方面

网上基本上找不到用sdl2编写的,只好自己不断探索尝试

ffmpeg和sdl都可以直接去官网下载,现在已经有编译好的vc库文件下载,怎么配置网上有很多,这里就不多说了

推荐几个sdl2的教程:

http://adolfans.github.io/sdltutorialcn/blog/2013/01/25/sdl-2-dot-0-tutorial-index/

http://www.sdltutorials.com/tutorials

ffmpeg的api文档可以在这里看:

http://ffmpeg.org/doxygen/2.0/index.html

需要注意的一点是,建立工程的时候最好选择 控制台应用程序,加上预编译头文件,即有stdafx.h头文件的那种

再推荐一个学习的博客,我的代码就是参考他的写出来的

http://blog.csdn.net/leixiaohua1020/article/details/8652605

还有一个很好的教程,虽然有点过时,但是基本思想是不变的,很值得参考

如何用FFmpeg编写一个简单播放器

最后你需要了解一下像素格式,可以参考:

图文详解YUV格式

在sdl的头文件SDL_Pixel.h中有各种像素格式的声明

其他不多说了,直接上代码

#include "stdafx.h"

extern "C"{
#include "libavcodec\avcodec.h"
#include "libavformat\avformat.h"
#include "libswscale\swscale.h"
}
#include "SDL.h"
#include "SDL_image.h"
#include "SDL_thread.h"

#include <iostream>
using namespace std;


/*
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame){
FILE *pFile;
char szFilename[32];
int y;

sprintf(szFilename, "frame%d.ppm", iFrame);
pFile = fopen(szFilename, "wb");
if (pFile == NULL){
cout << "open file:" << szFilename << " error." << endl;
return;
}

//Write header
}*/

int _tmain(int argc, char *agrv[]){
	av_register_all();	//注册了所有的文件格式和编解码的库,它们将被自动的使用在被打开的合适格式的文件上
	AVFormatContext *pFormatCtx;
	pFormatCtx = avformat_alloc_context();

	char filepath[] = "../../resource/test.mp4";
	//Open an input stream and read the header
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
		printf("Can't open the file\n");
		return -1;
	}
	//Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0){
		printf("Couldn't find stream information.\n");
		return -1;
	}

	int i, videoIndex;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;

	//Find the first video stream
	videoIndex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++){//视音频流的个数
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			videoIndex = i;
			break;
		}
	}
	if (videoIndex == -1)
		return -1;
	//Get a pointer to the codec context for the video stream
	//流中关于编解码器的信息就是被我们叫做"codec context"(编解码器上下文)
	//的东西。这里面包含了流中所使用的关于编解码器的所有信
	pCodecCtx = pFormatCtx->streams[videoIndex]->codec;
	//Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL){
		printf("Unsupported codec!\n");
		return -1;
	}
	//Open codec
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
		printf("Could not open codec.\n");
		return -1;
	}
	//allocate video frame and set its fileds to default value
	AVFrame *pFrame, *pFrameYUV;
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();

	//即使我们申请了一帧的内存,当转换的时候,我们仍然需要一个地方来放置原始
	//的数据。我们使用avpicture_get_size 来获得我们需要的大小, 然后手工申请
	//内存空间:
	uint8_t *out_buffer;
	int numBytes;
	numBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	//av_malloc 是ffmpeg 的malloc,用来实现一个简单的malloc 的包装,这样来保
	//证内存地址是对齐的(4 字节对齐或者2 字节对齐)。它并不能保 护你不被内
	//存泄漏,重复释放或者其它malloc 的问题所困扰。
	out_buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	//Assign appropriate parts of buffer to image planes in pFrameYUV
	//Note that pFrameYUV is an AVFrame, but AVFrame is a superset of AVPicture
	avpicture_fill((AVPicture*)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);


	//----------------SDL--------------------------------------//
	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)){
		printf("Could not initialize SDL -%s\n", SDL_GetError());
		exit(1);
	}

	SDL_Window *window = nullptr;
	window = SDL_CreateWindow("MyPlayer", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
		pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);
	if (!window){
		cout << SDL_GetError() << endl;
		return 1;
	}

	SDL_Renderer *ren = nullptr;
	ren = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
	if (ren == nullptr){
		cout << SDL_GetError() << endl;
		return -1;
	}

	SDL_Texture *texture = nullptr;
	texture = SDL_CreateTexture(ren, SDL_PIXELFORMAT_YV12,
		SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
	SDL_Rect rect;
	rect.x = 0, rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;

	//*************************************************************//
	//通过读取包来读取整个视频流,然后把它解码成帧,最后转换格式并且保存
	int frameFinished;
	//int psize = pCodecCtx->width * pCodecCtx->height;
	AVPacket packet;
	av_new_packet(&packet, numBytes);

	//output file information
	cout << "文件信息----------------------------------" << endl;
	av_dump_format(pFormatCtx, 0, filepath, 0);
	cout << "--------------------------------------------" << endl;

	i = 0;
	int ret;
	static struct SwsContext *img_convert_ctx;
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P,
		SWS_BICUBIC, NULL, NULL, NULL);

	//Read the next frame of a stream
	while (av_read_frame(pFormatCtx, &packet) >= 0){
		//Is this a packet from the video stream?
		if (packet.stream_index == videoIndex){
			//decode video frame of size packet.size from packet.data into picture
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
			//Did we get a video frame?
			if (ret >= 0){
				//Convert the image from its native format to YUV
				if (frameFinished){
					sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data,
						pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

					SDL_UpdateYUVTexture(texture, &rect, pFrameYUV->data[0], pFrameYUV->linesize[0],
						pFrameYUV->data[1], pFrameYUV->linesize[1], pFrameYUV->data[2], pFrameYUV->linesize[2]);
					
					SDL_RenderClear(ren);
					SDL_RenderCopy(ren, texture, &rect, &rect);
					SDL_RenderPresent(ren);
				}
				SDL_Delay(50);
			}
			else{
				cout << "decode error" << endl;
				return -1;
			}
		}
	}

	av_free_packet(&packet);

	SDL_Event event;
	SDL_PollEvent(&event);
	switch (event.type){
	case SDL_QUIT:
		SDL_Quit();
		exit(0);
		break;
	default:
		break;
	}

	SDL_DestroyTexture(texture);

	av_frame_free(&pFrame);
	av_frame_free(&pFrameYUV);

	avcodec_close(pCodecCtx);

	avformat_close_input(&pFormatCtx);

	return 0;
}