天天看点

FFMPEG 实时解码网络H264码流,RTP封装

初学FFMPEG和H264,解码视频流时遇到了很多麻烦,记录一下研究成果。

我使用的FFMPEG 2.5.2版本,使用av_parser_parse2重组图像帧时遇到了一下麻烦!

下面是主要代码:

RTP头定义,

typedef struct   
{  
	/**//* byte 0 */  
	unsigned char csrc_len:4;        /**//* expect 0 */  
	unsigned char extension:1;        /**//* expect 1, see RTP_OP below */  
	unsigned char padding:1;        /**//* expect 0 */  
	unsigned char version:2;        /**//* expect 2 */  
	/**//* byte 1 */  
	unsigned char payload:7;        /**//* RTP_PAYLOAD_RTSP */  
	unsigned char marker:1;        /**//* expect 1 */  
	/**//* bytes 2, 3 */  
	unsigned short seq_no;              
	/**//* bytes 4-7 */  
	unsigned  long timestamp;          
	/**//* bytes 8-11 */  
	unsigned long ssrc;            /**//* stream number is used here. */  
} RTP_FIXED_HEADER; 
           

初始化,

int CVPPMediaPlayer::init_decode()
{
	av_init_packet(&m_avpkt);

	m_codec = avcodec_find_decoder(CODEC_ID_H264);
	if(!m_codec){
		TRACE(_T("Codec not found\n"));
		return -1;
	}
	m_pCodecCtx = avcodec_alloc_context3(m_codec);
	if(!m_pCodecCtx){
		TRACE(_T("Could not allocate video codec context\n"));
		return -1;
	}

	m_pCodecParserCtx=av_parser_init(AV_CODEC_ID_H264);
	if (!m_pCodecParserCtx){
		TRACE(_T("Could not allocate video parser context\n"));
		return -1;
	}
	
	if(m_codec->capabilities&CODEC_CAP_TRUNCATED)
		m_pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; 

	if (avcodec_open2(m_pCodecCtx, m_codec, NULL) < 0) {
		TRACE(_T("Could not open codec\n"));
		return -1;
	}

	m_picture = av_frame_alloc();
	m_pFrameRGB = av_frame_alloc();
	if(!m_picture || !m_pFrameRGB){
		TRACE(_T("Could not allocate video frame\n"));
		return -1;
	}

	m_PicBytes = 0;
	m_PicBuf = NULL;
	m_pImgCtx = NULL;

	return 0;
}
           

反初始化,

void CVPPMediaPlayer::uninit_decode()
{
	avcodec_close(m_pCodecCtx);
	if(m_pCodecCtx){
		av_free(m_pCodecCtx);
		m_pCodecCtx = NULL;
	}
	if(m_picture){
		av_frame_free(&m_picture);
		m_picture = NULL;
	}
	if(m_pFrameRGB){
		av_frame_free(&m_pFrameRGB);
		m_pFrameRGB = NULL;
	}
	if(m_pImgCtx){
		sws_freeContext(m_pImgCtx);
	}

	if(m_pCodecParserCtx){
		av_parser_close(m_pCodecParserCtx);
	}

	m_PicBytes = 0;
	if(m_PicBuf != NULL){
		delete m_PicBuf;
		m_PicBuf = NULL;
	}
}
           

网络数据流接收,收到RTP包后去掉RTP包头,在视频帧分片前加上00 00 00 01;这里没有处理FU-A格式的H264数据,如果你的H264数据是FU-A打包的,需要做另外的处理。

BOOL CVPPMediaPlayer::get_h264_data(char* in_buf, int in_len, char* out_buf, int &out_len)
{
	int rtp_head_len;
	char nalu[4] = {0x00, 0x00, 0x00, 0x01};
	
	memcpy(out_buf, nalu, 4);
	rtp_head_len = sizeof(RTP_FIXED_HEADER);
	out_buf += 4;
	in_buf += rtp_head_len;
	memcpy(out_buf, in_buf, in_len-rtp_head_len);
	out_len = in_len-rtp_head_len+4;

	return TRUE;
}

int CVPPMediaPlayer::video_recv_poll()
{
	char buf[RTP_RECV_BUF_LEN];
	char h264_buf[DECODE_BUF_SIZE];
	int sockfd, max_fd;
	int ret = 0;
	int len, out_len, addr_len;
	int line;

	struct sockaddr_in local_addr;
	struct sockaddr_in remote_addr;

	fd_set rset;
	struct timeval timeout = {RTP_RECV_TIMEOUT, 0};

	RTP_FIXED_HEADER* p_rtp_head;
	int rtp_head_len = sizeof(RTP_FIXED_HEADER);

	memset(&local_addr, 0, sizeof(local_addr));
	local_addr.sin_family = AF_INET;
	local_addr.sin_addr.s_addr = inet_addr(g_vpp_call.lines[m_VcId].meida_info.local_ip);
	local_addr.sin_port = htons(g_vpp_call.lines[m_VcId].meida_info.local_video_port);

	sockfd = socket(PF_INET, SOCK_DGRAM, 0);
	if(sockfd <= 0){
		TRACE(_T("Create socket for video recv failed\n"));
		return -1;
	}

	if(bind(sockfd, (struct sockaddr *)&local_addr, sizeof(local_addr)) < 0){
		TRACE(_T("bind failed\n"));
		closesocket(sockfd);
		return -1;
	}

	while(1)
	{
		timeout.tv_sec = RTP_RECV_TIMEOUT; 
		timeout.tv_usec = 0;
		FD_ZERO(&rset);
		FD_SET(sockfd, &rset);
		max_fd = sockfd + 1;
		ret = select(max_fd, &rset, NULL, NULL, &timeout);
		if(g_vpp_call.lines[m_VcId].status == CALL_STATUS_IDLE){
			TRACE(_T("Thread for recv video rtp end!\n"));
			PostMessage(WM_CLOSE_MEDIA_PLAYER);
			closesocket(sockfd);
			return 0;
		}

		if(ret < 0){
			TRACE(_T("select error"));
			PostMessage(WM_CLOSE_MEDIA_PLAYER);
			closesocket(sockfd);
			return -1;
		}else if(ret == 0){
			//TRACE(_T("select timeout %d s"), RTP_RECV_TIMEOUT);
			continue;
		}else{
			memset(buf, 0, sizeof(buf));
			memset(h264_buf, 0, sizeof(h264_buf));
			addr_len = sizeof(struct sockaddr);
			len = recvfrom(sockfd, buf, RTP_RECV_BUF_LEN, 0, (struct sockaddr *)&remote_addr, &addr_len);
			if(len <= 0){
				TRACE(_T("socket was shut, close it"));
				PostMessage(WM_CLOSE_MEDIA_PLAYER);
				closesocket(sockfd);
				return -1;
			}

			
			p_rtp_head = (RTP_FIXED_HEADER*)buf;
			TRACE(_T("recv video stream, len=%d, ssrc=0x%08x\n"), len, ntohl(p_rtp_head->ssrc));
			out_len = RTP_RECV_BUF_LEN;
			if(get_h264_data(buf, len, h264_buf, out_len)){
				h264_decode(h264_buf, out_len);
			}
		}
	}

	return 0;
}
           

重组视频帧并解码,解码流程为 H264->YUV->RGB,得到RGB后直接显示出来就行了。

这里使用avcodec_decode_video2来将分片组成视频帧,当返回值为0时表示前面的分片已经组成一帧完整的视频帧,可以送给解码函数进行解码了,需要注意的是此时当前的分片并没有加入到视频帧中(也就是通过当前的分片判断出前面的所有分片已经组成了视频帧),所以当前的分片需要加入到下一帧中,我就在这里吃了大亏,没有把当前的分片加入到下一帧中,导致解码出来的画面很模糊,纠结了好几天

FFMPEG 实时解码网络H264码流,RTP封装

当avcodec_decode_video2返回0时,将m_avpkt送给解码函数,同时再次调用avcodec_decode_video2函数,将当前分片的buf送进去。

int CVPPMediaPlayer::h264_decode(char* buf, int buf_len)
{
	int got, len, paser_len;
	

	if(buf == NULL || buf_len == 0){
		return -1;
	}

	paser_len = av_parser_parse2(m_pCodecParserCtx, m_pCodecCtx, &m_avpkt.data, &m_avpkt.size, (uint8_t *)buf, buf_len,
		AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);

	if(paser_len == 0){
		switch(m_pCodecParserCtx->pict_type){
			case AV_PICTURE_TYPE_I: TRACE(_T("Type: I\n"));break;
			case AV_PICTURE_TYPE_P: TRACE(_T("Type: P\n"));break;
			case AV_PICTURE_TYPE_B: TRACE(_T("Type: B\n"));break;
			default: TRACE(_T("Type: Other\n"));break;
		}
		len = avcodec_decode_video2(m_pCodecCtx, m_picture, &got, &m_avpkt);
		if(len < 0){
			TRACE(_T("Error while decoding frame\n"));
			return -1;
		}

		if(got){
			TRACE(_T("Got picture\n"));
			if(m_PicBytes == 0){
				m_PicBytes = avpicture_get_size(PIX_FMT_BGR24, m_pCodecCtx->width, m_pCodecCtx->height);
				m_PicBuf = new uint8_t[m_PicBytes];
				avpicture_fill((AVPicture *)m_pFrameRGB, m_PicBuf, PIX_FMT_BGR24,
					m_pCodecCtx->width, m_pCodecCtx->height);
			}

			if(!m_pImgCtx){
				m_pImgCtx = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height, PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
			}

			m_picture->data[0] += m_picture->linesize[0]*(m_pCodecCtx->height-1);
			m_picture->linesize[0] *= -1;                      
			m_picture->data[1] += m_picture->linesize[1]*(m_pCodecCtx->height/2-1);
			m_picture->linesize[1] *= -1;
			m_picture->data[2] += m_picture->linesize[2]*(m_pCodecCtx->height/2-1);
			m_picture->linesize[2] *= -1;
			sws_scale(m_pImgCtx, (const uint8_t* const*)m_picture->data, m_picture->linesize, 0, m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize); 

			display_picture(m_pFrameRGB->data[0], m_pCodecCtx->width, m_pCodecCtx->height);
		}
	}

	if(paser_len == 0){
		paser_len = av_parser_parse2(m_pCodecParserCtx, m_pCodecCtx, &m_avpkt.data, &m_avpkt.size, (uint8_t *)buf, buf_len,
			AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
	}

	return 0;
}
           

在窗口中显示每一帧RGB图片;这里重载一个CStatic控件来做视频播放。

void CVideoStatic::init_bm_head()
{
	m_bm_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	m_bm_info.bmiHeader.biWidth = m_width;
	m_bm_info.bmiHeader.biHeight = m_height;
	m_bm_info.bmiHeader.biPlanes =1;
	m_bm_info.bmiHeader.biBitCount = 24;
	m_bm_info.bmiHeader.biCompression = BI_RGB;
	m_bm_info.bmiHeader.biSizeImage = 0;
	m_bm_info.bmiHeader.biClrUsed = 0;
	m_bm_info.bmiHeader.biClrImportant = 0;
}


void CVideoStatic::display_pic(unsigned char* data, int width, int height)
{
	CRect  rc;
	HDC hdc = GetDC()->GetSafeHdc();
	GetClientRect(&rc);

	if(m_height != height || m_width != width){
		m_height = height;
		m_width = width;

		MoveWindow(0, 0, width, height, 0);
		Invalidate();
	}

	init_bm_head();

	DrawDibDraw(m_DrawDib,
		hdc,
		rc.left,
		rc.top,
		-1,	// don't stretch
		-1,
		&m_bm_info.bmiHeader, 
		(void*)data, 
		0, 
		0, 
		width, 
		height, 
		0);
}
           

存在的问题:

1、不能兼容FU-A方式封装的H264流,一些设备的视频无法播放。

2、分片组帧应该可以自己实现,不需要avcodec_decode_video2来做。