天天看點

FFMPEG音視訊開發: Linux下采集音頻(alsa-lib庫)、視訊(V4L2架構)資料編碼并實時推流到RTMP流媒體伺服器,達到直播功能(推流)

一、環境介紹

作業系統:  VM虛拟機運作的ubuntu18.04

FFMPEG版本: 4.4.2

攝像頭: 羅技USB攝像頭、電腦自帶攝像頭

聲霸卡:電腦自帶聲霸卡

二、FFMPEG、X264的安裝

參考這裡:  FFMPEG開發: Linux下采集攝像頭資料錄制成MP4視訊儲存到本: 地

https://blog.csdn.net/xiaolong1126626497/article/details/104919095

三、功能介紹

代碼裡運作3線程:

第一個線程:通過Linux下V4L2架構讀取攝像頭資料

第二個線程:通過alsa-lib庫讀取聲霸卡資料

第三個線程:通過FFMPEG庫将視訊音頻編碼推流到指定的RTMP伺服器。

四、核心代碼

代碼裡RTMP推流位址使用宏的方式定義,在代碼最上面,大家使用時,将推流位址改為自己的位址即可。

為了友善大家複制粘貼測試,下面代碼都是編寫在一個.c檔案裡,量比較大。

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
 
#include <stdio.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <poll.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <signal.h>
#include <unistd.h>
#include <string.h>
 
#include <stdio.h>
#include <stdlib.h>
#include <alsa/asoundlib.h>
#include <signal.h>
#include <pthread.h>
 
 
//推流的伺服器位址
#define RTMP_SERVE_RADDR  "rtmp://js.live-send.acg.tv/live-js/?streamname=live_68130189_71037877&key=b95d4cfda0c196518f104839fe5e7573"
 
#define STREAM_DURATION   10.0   /*錄制10秒的視訊,由于緩沖的原因,一般隻有8秒*/
#define STREAM_FRAME_RATE 15     /* 15 images/s   avfilter_get_by_name */
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
 
//固定攝像頭輸出畫面的尺寸
#define VIDEO_WIDTH  640
#define VIDEO_HEIGHT 480
 
//存放從攝像頭讀出轉換之後的資料
unsigned char YUV420P_Buffer[VIDEO_WIDTH*VIDEO_HEIGHT*3/2];
unsigned char YUV420P_Buffer_temp[VIDEO_WIDTH*VIDEO_HEIGHT*3/2];
 
/*一些攝像頭需要使用的全局變量*/
unsigned char *image_buffer[4];
int video_fd;
pthread_mutex_t mutex;
pthread_cond_t cond;
 
/*一些audio需要使用的全局變量*/
pthread_mutex_t mutex_audio;
 
extern int capture_audio_data_init( char *audio_dev);
extern int capture_audio_data(snd_pcm_t *capture_handle,int buffer_frames);
/*
 進行音頻采集,采集pcm資料并直接儲存pcm資料
 音頻參數: 
     聲道數:       2
     采樣位數:  16bit、LE格式
     采樣頻率:  44100Hz
*/
#define AudioFormat SND_PCM_FORMAT_S16_LE  //指定音頻的格式,其他常用格式:SND_PCM_FORMAT_U24_LE、SND_PCM_FORMAT_U32_LE
#define AUDIO_CHANNEL_SET   1             //1單聲道   2立體聲
#define AUDIO_RATE_SET 44100   //音頻采樣率,常用的采樣頻率: 44100Hz 、16000HZ、8000HZ、48000HZ、22050HZ
FILE *pcm_data_file=NULL;
 
int buffer_frames;
snd_pcm_t *capture_handle;
snd_pcm_format_t format=AudioFormat;
 
 
//儲存音頻資料連結清單
struct AUDIO_DATA
{
    unsigned char* audio_buffer;
    struct AUDIO_DATA *next;
};
 
//定義一個連結清單頭
struct AUDIO_DATA *list_head=NULL;
struct AUDIO_DATA *List_CreateHead(struct AUDIO_DATA *head);
void List_AddNode(struct AUDIO_DATA *head,unsigned char* audio_buffer);
void List_DelNode(struct AUDIO_DATA *head,unsigned char* audio_buffer);
int List_GetNodeCnt(struct AUDIO_DATA *head);
 
// 單個輸出AVStream的包裝器
typedef struct OutputStream {
    AVStream *st;
    AVCodecContext *enc;
 
    /* 下一幀的點數*/
    int64_t next_pts;
    int samples_count;
 
    AVFrame *frame;
    AVFrame *tmp_frame;
 
    float t, tincr, tincr2;
 
    struct SwsContext *sws_ctx;
    struct SwrContext *swr_ctx;
} OutputStream;
 
 
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /*将輸出資料包時間戳值從編解碼器重新調整為流時基 */
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
    pkt->stream_index = st->index;
        
    /*将壓縮的幀寫入媒體檔案*/
    return av_interleaved_write_frame(fmt_ctx, pkt);
}
 
 
/* 添加輸出流。 */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
                       AVCodec **codec,
                       enum AVCodecID codec_id)
{
    AVCodecContext *c;
    int i;
 
    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(codec_id));
        exit(1);
    }
 
    ost->st = avformat_new_stream(oc, NULL);
    if (!ost->st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    ost->st->id = oc->nb_streams-1;
    c = avcodec_alloc_context3(*codec);
    if (!c) {
        fprintf(stderr, "Could not alloc an encoding context\n");
        exit(1);
    }
    ost->enc = c;
 
    switch ((*codec)->type) {
    case AVMEDIA_TYPE_AUDIO:
        c->sample_fmt  = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
        c->bit_rate    = 64000;  //設定碼率
        c->sample_rate = 44100;  //音頻采樣率
        c->channels= av_get_channel_layout_nb_channels(c->channel_layout);
        c->channel_layout = AV_CH_LAYOUT_MONO; AV_CH_LAYOUT_MONO 單聲道   AV_CH_LAYOUT_STEREO 立體聲
        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
        ost->st->time_base = (AVRational){ 1, c->sample_rate };
        break;
 
    case AVMEDIA_TYPE_VIDEO:
        c->codec_id = codec_id;
        //碼率:影響體積,與體積成正比:碼率越大,體積越大;碼率越小,體積越小。
        c->bit_rate = 400000; //設定碼率 400kps
        /*分辨率必須是2的倍數。 */
        c->width    =VIDEO_WIDTH;
        c->height   = VIDEO_HEIGHT;
        /*時基:這是基本的時間機關(以秒為機關)
         *表示其中的幀時間戳。 對于固定fps内容,
         *時基應為1 / framerate,時間戳增量應為
         *等于1。*/
        ost->st->time_base = (AVRational){1,STREAM_FRAME_RATE};
        c->time_base       = ost->st->time_base;
        c->gop_size      = 12; /* 最多每十二幀發射一幀内幀 */
        c->pix_fmt       = STREAM_PIX_FMT;
        c->max_b_frames = 0;  //不要B幀
        if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) 
        {
            c->mb_decision = 2;
        }
    break;
 
    default:
        break;
    }
 
    /* 某些格式希望流頭分開。 */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
 
/**************************************************************/
/* audio output */
 
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                  uint64_t channel_layout,
                                  int sample_rate, int nb_samples)
{
    AVFrame *frame = av_frame_alloc();
    frame->format = sample_fmt;
    frame->channel_layout = channel_layout;
    frame->sample_rate = sample_rate;
    frame->nb_samples = nb_samples;
    if(nb_samples)
    {
        av_frame_get_buffer(frame, 0);
    }
    return frame;
}
 
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c;
    int nb_samples;
    int ret;
    AVDictionary *opt = NULL;
    c = ost->enc;
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    
 
    /*下面3行代碼是為了生成虛拟的聲音設定的頻率參數*/
    ost->t     = 0;
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
 
    //AAC編碼這裡就固定為1024
    nb_samples = c->frame_size;
 
    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                       c->sample_rate, nb_samples);
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                       c->sample_rate, nb_samples);
 
    /* copy the stream parameters to the muxer */
    avcodec_parameters_from_context(ost->st->codecpar, c);
 
    /* create resampler context */
    ost->swr_ctx = swr_alloc();
 
    /* set options */
    printf("c->channels=%d\n",c->channels);
    av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
    av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
    av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
    av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
    av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
    av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
    
    /* initialize the resampling context */
    swr_init(ost->swr_ctx);
}
 
/* 毫秒級 延時 */
void Sleep(int ms)
{
    struct timeval delay;
    delay.tv_sec = 0;
    delay.tv_usec = ms * 1000; // 20 ms
    select(0, NULL, NULL, NULL, &delay);
}
 
 
/*
準備虛拟音頻幀
這裡可以替換成從聲霸卡讀取的PCM資料
*/
static AVFrame *get_audio_frame(OutputStream *ost)
{
    AVFrame *frame = ost->tmp_frame;
    int j, i, v;
    int16_t *q = (int16_t*)frame->data[0];
    /* 檢查我們是否要生成更多幀,用于判斷是否結束*/
    if (av_compare_ts(ost->next_pts, ost->enc->time_base,STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;
 
   #if 1
    //擷取連結清單節點數量
    int cnt=0;
    while(cnt<=0)
    {
        cnt=List_GetNodeCnt(list_head);
    }
    
    pthread_mutex_lock(&mutex_audio); /*互斥鎖上鎖*/
 
    //得到節點資料
    struct AUDIO_DATA *tmp=list_head;
    unsigned char *buffer;
 
    tmp=tmp->next;
    if(tmp==NULL)
    {
        printf("資料為NULL.\n");
        exit(0);
    }
    buffer=tmp->audio_buffer;
    
    //1024*16*1
    memcpy(q,buffer,frame->nb_samples*sizeof(int16_t)*ost->enc->channels);//将音頻資料拷貝進入frame緩沖區
    
    List_DelNode(list_head,buffer);
    free(buffer);           
    pthread_mutex_unlock(&mutex_audio); /*互斥鎖解鎖*/
    #endif
    
    frame->pts = ost->next_pts;
    ost->next_pts  += frame->nb_samples;
    return frame;
}
 
 
/*
 *編碼一個音頻幀并将其發送到多路複用器
 *編碼完成後傳回1,否則傳回0
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 };
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;
 
    av_init_packet(&pkt);
    c = ost->enc;
 
    frame = get_audio_frame(ost);
 
    if(frame)
    {
        /*使用重采樣器将樣本從本機格式轉換為目标編解碼器格式*/
         /*計算樣本的目标數量*/
        dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        av_assert0(dst_nb_samples == frame->nb_samples);
        av_frame_make_writable(ost->frame);
        /*轉換為目标格式 */
        swr_convert(ost->swr_ctx,
                    ost->frame->data, dst_nb_samples,
                    (const uint8_t **)frame->data, frame->nb_samples);
        frame = ost->frame;
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }
 
    avcodec_encode_audio2(c, &pkt, frame, &got_packet);
 
    if (got_packet) 
    {
        write_frame(oc, &c->time_base, ost->st, &pkt);
    }
    return (frame || got_packet) ? 0 : 1;
}
 
 
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
    AVFrame *picture;
    int ret;
    picture = av_frame_alloc();
    picture->format = pix_fmt;
    picture->width  = width;
    picture->height = height;
 
    /* allocate the buffers for the frame data */
    av_frame_get_buffer(picture, 32);
    return picture;
}
 
 
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c = ost->enc;
    AVDictionary *opt = NULL;
    av_dict_copy(&opt, opt_arg, 0);
    /* open the codec */
    avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    ost->tmp_frame = NULL;
    /* 将流參數複制到多路複用器 */
    avcodec_parameters_from_context(ost->st->codecpar, c);
}
 
 
/*
準備圖像資料
YUV422占用記憶體空間 = w * h * 2
YUV420占用記憶體空間 = width*height*3/2
*/
static void fill_yuv_image(AVFrame *pict, int frame_index,int width, int height)
{
    int y_size=width*height;
    /*等待條件成立*/
    pthread_mutex_lock(&mutex);
    pthread_cond_wait(&cond,&mutex);
    memcpy(YUV420P_Buffer_temp,YUV420P_Buffer,sizeof(YUV420P_Buffer));
    /*互斥鎖解鎖*/
    pthread_mutex_unlock(&mutex);
    
    //将YUV資料拷貝到緩沖區  y_size=wXh
    memcpy(pict->data[0],YUV420P_Buffer_temp,y_size);
    memcpy(pict->data[1],YUV420P_Buffer_temp+y_size,y_size/4);
    memcpy(pict->data[2],YUV420P_Buffer_temp+y_size+y_size/4,y_size/4);
}
 
 
static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->enc;
 
    /* 檢查我們是否要生成更多幀---判斷是否結束錄制 */
      if(av_compare_ts(ost->next_pts, c->time_base,STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;
 
    /*當我們将幀傳遞給編碼器時,它可能會保留對它的引用
    *内部; 確定我們在這裡不覆寫它*/
    if (av_frame_make_writable(ost->frame) < 0)
        exit(1);
 
    //制作虛拟圖像
    //DTS(解碼時間戳)和PTS(顯示時間戳)
    fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    ost->frame->pts = ost->next_pts++;
    return ost->frame;
}
 
/*
*編碼一個視訊幀并将其發送到多路複用器
*編碼完成後傳回1,否則傳回0
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;
    AVPacket pkt = { 0 };
    c=ost->enc;
    //擷取一幀資料
    frame = get_video_frame(ost);
    av_init_packet(&pkt);
 
    /* 編碼圖像 */
    ret=avcodec_encode_video2(c, &pkt, frame, &got_packet);
 
    if(got_packet) 
    {
        ret=write_frame(oc, &c->time_base, ost->st, &pkt);
    }
    else
    {
        ret = 0;
    }
    return (frame || got_packet) ? 0 : 1;
}
 
 
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
    avcodec_free_context(&ost->enc);
    av_frame_free(&ost->frame);
    av_frame_free(&ost->tmp_frame);
    sws_freeContext(ost->sws_ctx);
    swr_free(&ost->swr_ctx);
}
 
 
//編碼視訊和音頻
int video_audio_encode(char *filename)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;
    int i;
 
    /* 配置設定輸出環境*/
    avformat_alloc_output_context2(&oc,NULL,"flv",filename);
    fmt=oc->oformat;
    //指定編碼器
    fmt->video_codec=AV_CODEC_ID_H264;
    fmt->audio_codec=AV_CODEC_ID_AAC;
    
         /*使用預設格式的編解碼器添加音頻和視訊流,初始化編解碼器。 */
    if(fmt->video_codec != AV_CODEC_ID_NONE)
    {
        add_stream(&video_st,oc,&video_codec,fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if(fmt->audio_codec != AV_CODEC_ID_NONE)
    {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }
 
  /*現在已經設定了所有參數,可以打開音頻視訊編解碼器,并配置設定必要的編碼緩沖區。 */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);
 
    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);
 
    av_dump_format(oc, 0, filename, 1);
 
    /* 打開輸出檔案(如果需要) */
    if(!(fmt->flags & AVFMT_NOFILE)) 
    {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "無法打開輸出檔案: '%s': %s\n", filename,av_err2str(ret));
            return 1;
        }
    }
 
    /* 編寫流頭(如果有)*/
    avformat_write_header(oc,&opt);
 
    while(encode_video || encode_audio)
    {
        /* 選擇要編碼的流*/
        if(encode_video &&(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,audio_st.next_pts, audio_st.enc->time_base) <= 0))
        {
            //printf("視訊編碼一次----->\n");
            encode_video = !write_video_frame(oc,&video_st);
        }
        else 
        {
            //printf("音頻編碼一次----->\n");
            encode_audio = !write_audio_frame(oc,&audio_st);
        }
    }
    
    av_write_trailer(oc);
    
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);
 
    if (!(fmt->flags & AVFMT_NOFILE))
        avio_closep(&oc->pb);
    avformat_free_context(oc);
    return 0;
}
 
 
/*
函數功能: 攝像頭裝置初始化
*/
int VideoDeviceInit(char *DEVICE_NAME)
{
    /*1. 打開攝像頭裝置*/
    video_fd=open(DEVICE_NAME,O_RDWR);
    if(video_fd<0)return -1;
 
    /*2. 設定攝像頭支援的顔色格式和輸出的圖像尺寸*/
    struct v4l2_format video_formt;
    memset(&video_formt,0,sizeof(struct v4l2_format));  
    video_formt.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
    video_formt.fmt.pix.height=VIDEO_HEIGHT; //480 
    video_formt.fmt.pix.width=VIDEO_WIDTH; //640
    video_formt.fmt.pix.pixelformat=V4L2_PIX_FMT_YUYV;
    if(ioctl(video_fd,VIDIOC_S_FMT,&video_formt))return -2;
    printf("目前攝像頭尺寸:width*height=%d*%d\n",video_formt.fmt.pix.width,video_formt.fmt.pix.height);
    
    /*3.請求申請緩沖區的數量*/
    struct v4l2_requestbuffers video_requestbuffers;
    memset(&video_requestbuffers,0,sizeof(struct v4l2_requestbuffers)); 
    video_requestbuffers.count=4;
    video_requestbuffers.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
    video_requestbuffers.memory=V4L2_MEMORY_MMAP;
    if(ioctl(video_fd,VIDIOC_REQBUFS,&video_requestbuffers))return -3;
    printf("video_requestbuffers.count=%d\n",video_requestbuffers.count);
 
    /*4. 擷取緩沖區的首位址*/
    struct v4l2_buffer video_buffer;
    memset(&video_buffer,0,sizeof(struct v4l2_buffer));
    int i;
    for(i=0;i<video_requestbuffers.count;i++)
    {
        video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
        video_buffer.memory=V4L2_MEMORY_MMAP;
        video_buffer.index=i;/*緩沖區的編号*/
        if(ioctl(video_fd,VIDIOC_QUERYBUF,&video_buffer))return -4;
        /*映射位址*/
        image_buffer[i]=mmap(NULL,video_buffer.length,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,video_buffer.m.offset);
        printf("image_buffer[%d]=0x%X\n",i,image_buffer[i]);
    }
    /*5. 将緩沖區加入到采集隊列*/
    memset(&video_buffer,0,sizeof(struct v4l2_buffer));
    for(i=0;i<video_requestbuffers.count;i++)
    {
        video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
        video_buffer.memory=V4L2_MEMORY_MMAP;
        video_buffer.index=i;/*緩沖區的編号*/
        if(ioctl(video_fd,VIDIOC_QBUF,&video_buffer))return -5;
    }
    /*6. 啟動采集隊列*/
    int opt=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
    if(ioctl(video_fd,VIDIOC_STREAMON,&opt))return -6;
    return 0;
}
 
 
//YUYV==YUV422
int yuyv_to_yuv420p(const unsigned char *in, unsigned char *out, unsigned int width, unsigned int height)
{
    unsigned char *y = out;
    unsigned char *u = out + width*height;
    unsigned char *v = out + width*height + width*height/4;
    unsigned int i,j;
    unsigned int base_h;
    unsigned int  is_u = 1;
    unsigned int y_index = 0, u_index = 0, v_index = 0;
    unsigned long yuv422_length = 2 * width * height;
    //序列為YU YV YU YV,一個yuv422幀的長度 width * height * 2 個位元組
    //丢棄偶數行 u v
    for(i=0; i<yuv422_length; i+=2)
    {
        *(y+y_index) = *(in+i);
        y_index++;
    }
    for(i=0; i<height; i+=2)
    {
        base_h = i*width*2;
        for(j=base_h+1; j<base_h+width*2; j+=2)
        {
            if(is_u)
            {
                *(u+u_index) = *(in+j);
                u_index++;
                is_u = 0;
            }
            else
            {
                *(v+v_index) = *(in+j);
                v_index++;
                is_u = 1;
            }
        }
    }
    return 1;
}
 
 
/*
子線程函數: 采集攝像頭的資料
*/
void *pthread_read_video_data(void *arg)
{
    /*1. 循環讀取攝像頭采集的資料*/
    struct pollfd fds;
    fds.fd=video_fd;
    fds.events=POLLIN;
 
    /*2. 申請存放JPG的資料空間*/
    struct v4l2_buffer video_buffer;
    while(1)
    {
         /*(1)等待攝像頭采集資料*/
         poll(&fds,1,-1);
         /*(2)取出隊列裡采集完畢的緩沖區*/
         video_buffer.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; /*視訊捕獲裝置*/
         video_buffer.memory=V4L2_MEMORY_MMAP;
         ioctl(video_fd,VIDIOC_DQBUF,&video_buffer);
         /*(3)處理圖像資料*/
         /*YUYV資料轉YUV420P*/
         pthread_mutex_lock(&mutex);   /*互斥鎖上鎖*/
         yuyv_to_yuv420p(image_buffer[video_buffer.index],YUV420P_Buffer,VIDEO_WIDTH,VIDEO_HEIGHT);
         pthread_mutex_unlock(&mutex); /*互斥鎖解鎖*/
         pthread_cond_broadcast(&cond);/*廣播方式喚醒休眠的線程*/
         
         /*(4)将緩沖區再放入隊列*/
         ioctl(video_fd,VIDIOC_QBUF,&video_buffer);
    }   
}
 
/*
子線程函數: 采集攝像頭的資料
*/
void *pthread_read_audio_data(void *arg)
{
    capture_audio_data(capture_handle,buffer_frames);
}
 
//運作示例:  ./a.out /dev/video0
int main(int argc,char **argv)
{
    if(argc!=3)
    {
        printf("./app </dev/videoX> <hw:X> \n");
        return 0;
    }
    int err;
    pthread_t thread_id;
    
    //建立連結清單頭
    list_head=List_CreateHead(list_head);
    
    /*初始化互斥鎖*/
    pthread_mutex_init(&mutex,NULL);
    /*初始化條件變量*/
    pthread_cond_init(&cond,NULL);
 
    /*初始化互斥鎖*/
    pthread_mutex_init(&mutex_audio,NULL);
 
    /*初始化攝像頭裝置*/
    err=VideoDeviceInit(argv[1]);
    printf("VideoDeviceInit=%d\n",err);
    if(err!=0)return err;
    /*建立子線程: 采集攝像頭的資料*/
    pthread_create(&thread_id,NULL,pthread_read_video_data,NULL);
    /*設定線程的分離屬性: 采集攝像頭的資料*/
    pthread_detach(thread_id);
 
    capture_audio_data_init( argv[2]);
    /*建立子線程: 采集音頻的資料*/
    pthread_create(&thread_id,NULL,pthread_read_audio_data,NULL);
    /*設定線程的分離屬性: 采集攝像頭的資料*/
    pthread_detach(thread_id);
    
    char filename[100];
    time_t t;
    struct tm *tme;
    //開始音頻、視訊編碼
    while(1)
    {
        //開始視訊編碼
        video_audio_encode(RTMP_SERVE_RADDR);
    }
    return 0;
}
 
/*
函數功能: 建立連結清單頭
*/
struct AUDIO_DATA *List_CreateHead(struct AUDIO_DATA *head)
{
    if(head==NULL)
    {
        head=malloc(sizeof(struct AUDIO_DATA));
        head->next=NULL;
    }
    return head;
}
 
/*
函數功能: 插入新的節點
*/
void List_AddNode(struct AUDIO_DATA *head,unsigned char* audio_buffer)
{
    struct AUDIO_DATA *tmp=head;
    struct AUDIO_DATA *new_node;
    /*找到連結清單尾部*/
    while(tmp->next)
    {
        tmp=tmp->next;
    }
    /*插入新的節點*/
    new_node=malloc(sizeof(struct AUDIO_DATA));
    new_node->audio_buffer=audio_buffer;
    new_node->next=NULL;
    /*将新節點接入到連結清單*/
    tmp->next=new_node;
}
 
/*
函數功能:删除節點
*/
void List_DelNode(struct AUDIO_DATA *head,unsigned char* audio_buffer)
{
    struct AUDIO_DATA *tmp=head;
    struct AUDIO_DATA *p;
    /*找到連結清單中要删除的節點*/
    while(tmp->next)
    {
        p=tmp;
        tmp=tmp->next;
        if(tmp->audio_buffer==audio_buffer)
        {
            p->next=tmp->next;
            free(tmp);
        }
    }
}
 
/*
*/
 
 
/*
函數功能:周遊連結清單,得到節點總數量
*/
int List_GetNodeCnt(struct AUDIO_DATA *head)
{
    int cnt=0;
    struct AUDIO_DATA *tmp=head;
    while(tmp->next)
    {
        tmp=tmp->next;
        cnt++;
    }
    return cnt;
}
 
 
int capture_audio_data_init( char *audio_dev)
{
    int i;
    int err;
    
    buffer_frames = 1024;
    unsigned int rate = AUDIO_RATE_SET;// 常用的采樣頻率: 44100Hz 、16000HZ、8000HZ、48000HZ、22050HZ
    capture_handle;// 一個指向PCM裝置的句柄
    snd_pcm_hw_params_t *hw_params; //此結構包含有關硬體的資訊,可用于指定PCM流的配置
    
    /*注冊信号捕獲退出接口*/
    printf("進入main\n");
    /*PCM的采樣格式在pcm.h檔案裡有定義*/
    format=SND_PCM_FORMAT_S16_LE; // 采樣位數:16bit、LE格式
    
    /*打開音頻采集卡硬體,并判斷硬體是否打開成功,若打開失敗則列印出錯誤提示*/
    if ((err = snd_pcm_open (&capture_handle, audio_dev,SND_PCM_STREAM_CAPTURE,0))<0) 
    {
        printf("無法打開音頻裝置: %s (%s)\n",  audio_dev,snd_strerror (err));
        exit(1);
    }
    printf("音頻接口打開成功.\n");
    
 
    /*配置設定硬體參數結構對象,并判斷是否配置設定成功*/
    if((err = snd_pcm_hw_params_malloc(&hw_params)) < 0) 
    {
        printf("無法配置設定硬體參數結構 (%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("硬體參數結構已配置設定成功.\n");
    
    /*按照預設設定對硬體對象進行設定,并判斷是否設定成功*/
    if((err=snd_pcm_hw_params_any(capture_handle,hw_params)) < 0) 
    {
        printf("無法初始化硬體參數結構 (%s)\n", snd_strerror(err));
        exit(1);
    }
    printf("硬體參數結構初始化成功.\n");
 
    /*
        設定資料為交叉模式,并判斷是否設定成功
        interleaved/non interleaved:交叉/非交叉模式。
        表示在多聲道資料傳輸的過程中是采樣交叉的模式還是非交叉的模式。
        對多聲道資料,如果采樣交叉模式,使用一塊buffer即可,其中各聲道的資料交叉傳輸;
        如果使用非交叉模式,需要為各聲道分别配置設定一個buffer,各聲道資料分别傳輸。
    */
    if((err = snd_pcm_hw_params_set_access (capture_handle,hw_params,SND_PCM_ACCESS_RW_INTERLEAVED)) < 0) 
    {
        printf("無法設定通路類型(%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("通路類型設定成功.\n");
 
    /*設定資料編碼格式,并判斷是否設定成功*/
    if ((err=snd_pcm_hw_params_set_format(capture_handle, hw_params,format)) < 0) 
    {
        printf("無法設定格式 (%s)\n",snd_strerror(err));
        exit(1);
    }
    fprintf(stdout, "PCM資料格式設定成功.\n");
 
    /*設定采樣頻率,并判斷是否設定成功*/
    if((err=snd_pcm_hw_params_set_rate_near (capture_handle,hw_params,&rate,0))<0) 
    {
        printf("無法設定采樣率(%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("采樣率設定成功\n");
 
    /*設定聲道,并判斷是否設定成功*/
    if((err = snd_pcm_hw_params_set_channels(capture_handle, hw_params,AUDIO_CHANNEL_SET)) < 0) 
    {
        printf("無法設定聲道數(%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("聲道數設定成功.\n");
 
    /*将配置寫入驅動程式中,并判斷是否配置成功*/
    if ((err=snd_pcm_hw_params (capture_handle,hw_params))<0) 
    {
        printf("無法向驅動程式設定參數(%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("參數設定成功.\n");
    /*使采集卡處于空閑狀态*/
    snd_pcm_hw_params_free(hw_params);
 
    /*準備音頻接口,并判斷是否準備好*/
    if((err=snd_pcm_prepare(capture_handle))<0) 
    {
        printf("無法使用音頻接口 (%s)\n",snd_strerror(err));
        exit(1);
    }
    printf("音頻接口準備好.\n");
    
    return 0;
}
 
unsigned char audio_read_buff[2048];
//音頻采集線程
int capture_audio_data(snd_pcm_t *capture_handle,int buffer_frames)
{
    int err;
    //因為frame樣本數固定為1024,而雙通道,每個采樣點2byte,是以一次要發送1024*2*2byte資料給frame->data[0];
    /*配置一個資料緩沖區用來緩沖資料*/
    //snd_pcm_format_width(format) 擷取樣本格式對應的大小(機關是:bit)
    int frame_byte=snd_pcm_format_width(format)/8;
 
    /*開始采集音頻pcm資料*/
    printf("開始采集資料...\n");
    int i;
    char *audio_buffer;
    while(1) 
    {
        audio_buffer=malloc(buffer_frames*frame_byte*AUDIO_CHANNEL_SET); //2048
        if(audio_buffer==NULL)
        {
            printf("緩沖區配置設定錯誤.\n");
            break;
        }
        
        /*從聲霸卡裝置讀取一幀音頻資料:2048位元組*/
        if((err=snd_pcm_readi(capture_handle,audio_read_buff,buffer_frames))!=buffer_frames) 
        {
              printf("從音頻接口讀取失敗(%s)\n",snd_strerror(err));
              exit(1);
        }
    
        pthread_mutex_lock(&mutex_audio); /*互斥鎖上鎖*/
        memcpy(audio_buffer,audio_read_buff,buffer_frames*frame_byte*AUDIO_CHANNEL_SET);
        //添加節點
        List_AddNode(list_head,audio_buffer);
        pthread_mutex_unlock(&mutex_audio); /*互斥鎖解鎖*/
    }
 
    /*釋放資料緩沖區*/
    free(audio_buffer);
 
    /*關閉音頻采集卡硬體*/
    snd_pcm_close(capture_handle);
 
    /*關閉檔案流*/
    fclose(pcm_data_file);
 
    return 0;
}
       

五、代碼編譯與運作示例

all:
    gcc ffmpeg_encode_video_audio.c -I /home/wbyq/work_pc/ffmpeg-4.2.2/_install/include -L /home/wbyq/work_pc/ffmpeg-4.2.2/_install/lib -lavcodec -lavfilter -lavutil -lswresample -lavdevice -lavformat -lpostproc -lswscale -L/home/wbyq/work_pc/x264-snapshot-20181217-2245/_install/lib -lx264 -lm -lpthread -lasound      
./a.out /dev/video0 hw:0      

六、推流到B站的效果

FFMPEG音視訊開發: Linux下采集音頻(alsa-lib庫)、視訊(V4L2架構)資料編碼并實時推流到RTMP流媒體伺服器,達到直播功能(推流)
FFMPEG音視訊開發: Linux下采集音頻(alsa-lib庫)、視訊(V4L2架構)資料編碼并實時推流到RTMP流媒體伺服器,達到直播功能(推流)
FFMPEG音視訊開發: Linux下采集音頻(alsa-lib庫)、視訊(V4L2架構)資料編碼并實時推流到RTMP流媒體伺服器,達到直播功能(推流)

繼續閱讀