天天看点

JM ldecod分析: 分帧处理JM ldecod分析: 分帧处理

JM ldecod分析: 分帧处理

slice 和IDR

case NALU_TYPE_SLICE:
    case NALU_TYPE_IDR:

      if (p_Vid->recovery_point || nalu->nal_unit_type == NALU_TYPE_IDR)
      {
        if (p_Vid->recovery_point_found == 0)
        {
          if (nalu->nal_unit_type != NALU_TYPE_IDR)
          {
            printf("Warning: Decoding does not start with an IDR picture.\n");
            p_Vid->non_conforming_stream = 1;
          }
          else
            p_Vid->non_conforming_stream = 0;
        }
        p_Vid->recovery_point_found = 1;
      }

      if (p_Vid->recovery_point_found == 0)
        break;

      currSlice->idr_flag = (nalu->nal_unit_type == NALU_TYPE_IDR);
      currSlice->nal_reference_idc = nalu->nal_reference_idc;
      currSlice->dp_mode = PAR_DP_1;
      currSlice->max_part_nr = 1;
#if (MVC_EXTENSION_ENABLE)
      if (currSlice->svc_extension_flag != 0)
      {
        currStream = currSlice->partArr[0].bitstream;
        currStream->ei_flag = 0;
        currStream->frame_bitoffset = currStream->read_len = 0;
        fast_memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
        currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
      }
#else   
      currStream = currSlice->partArr[0].bitstream;
      currStream->ei_flag = 0;
      currStream->frame_bitoffset = currStream->read_len = 0;
      memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
      currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
#endif

#if (MVC_EXTENSION_ENABLE)
      if(currSlice->svc_extension_flag == 0)
      {  //MVC
        //if(is_MVC_profile(p_Vid->active_sps->profile_idc))
        //{
          currSlice->view_id = currSlice->NaluHeaderMVCExt.view_id;
          currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
          currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
        //}
      }
      else if(currSlice->svc_extension_flag == -1) //SVC and the normal AVC;
      {
        if(p_Vid->active_subset_sps == NULL)
        {
          currSlice->view_id = GetBaseViewId(p_Vid, &p_Vid->active_subset_sps);
          if(currSlice->NaluHeaderMVCExt.iPrefixNALU >0)
          {
            assert(currSlice->view_id == currSlice->NaluHeaderMVCExt.view_id);
            currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
            currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
          }
          else
          {
            currSlice->inter_view_flag = 1;
            currSlice->anchor_pic_flag = currSlice->idr_flag;
          }
        }
        else
        {
          assert(p_Vid->active_subset_sps->num_views_minus1 >=0);
          // prefix NALU available
          if(currSlice->NaluHeaderMVCExt.iPrefixNALU >0)
          {
            currSlice->view_id = currSlice->NaluHeaderMVCExt.view_id;
            currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
            currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
          }
          else
          { //no prefix NALU;
            currSlice->view_id = p_Vid->active_subset_sps->view_id[0];
            currSlice->inter_view_flag = 1;
            currSlice->anchor_pic_flag = currSlice->idr_flag;
          }
        }
      }
     currSlice->layer_id = currSlice->view_id = GetVOIdx( p_Vid, currSlice->view_id );
#endif

      // Some syntax of the Slice Header depends on the parameter set, which depends on
      // the parameter set ID of the SLice header.  Hence, read the pic_parameter_set_id
      // of the slice header first, then setup the active parameter sets, and then read
      // the rest of the slice header
      BitsUsedByHeader = FirstPartOfSliceHeader(currSlice);
      UseParameterSet (currSlice);
      currSlice->active_sps = p_Vid->active_sps;
      currSlice->active_pps = p_Vid->active_pps;
      currSlice->Transform8x8Mode = p_Vid->active_pps->transform_8x8_mode_flag;
      currSlice->chroma444_not_separate = (p_Vid->active_sps->chroma_format_idc==YUV444)&&((p_Vid->separate_colour_plane_flag == 0));

      BitsUsedByHeader += RestOfSliceHeader (currSlice);
#if (MVC_EXTENSION_ENABLE)
      if(currSlice->view_id >=0)
      {
        currSlice->p_Dpb = p_Vid->p_Dpb_layer[currSlice->view_id];
      }
#endif

      assign_quant_params (currSlice);        

      // if primary slice is replaced with redundant slice, set the correct image type
      if(currSlice->redundant_pic_cnt && p_Vid->Is_primary_correct==0 && p_Vid->Is_redundant_correct)
      {
        p_Vid->dec_picture->slice_type = p_Vid->type;
      }

      if(is_new_picture(p_Vid->dec_picture, currSlice, p_Vid->old_slice))
      {
        if(p_Vid->iSliceNumOfCurrPic==0)
          init_picture(p_Vid, currSlice, p_Inp);

        current_header = SOP;
        //check zero_byte if it is also the first NAL unit in the access unit
        CheckZeroByteVCL(p_Vid, nalu);
      }
      else
        current_header = SOS;

      setup_slice_methods(currSlice);

      // From here on, p_Vid->active_sps, p_Vid->active_pps and the slice header are valid
      if (currSlice->mb_aff_frame_flag)
        currSlice->current_mb_nr = currSlice->start_mb_nr << 1;
      else
        currSlice->current_mb_nr = currSlice->start_mb_nr;

      if (p_Vid->active_pps->entropy_coding_mode_flag)
      {
        int ByteStartPosition = currStream->frame_bitoffset/8;
        if (currStream->frame_bitoffset%8 != 0)
        {
          ++ByteStartPosition;
        }
        arideco_start_decoding (&currSlice->partArr[0].de_cabac, currStream->streamBuffer, ByteStartPosition, &currStream->read_len);
      }
      // printf ("read_new_slice: returning %s\n", current_header == SOP?"SOP":"SOS");
      //FreeNALU(nalu);
      p_Vid->recovery_point = 0;
      return current_header;
      break;
    case NALU_TYPE_DPA:
      if (p_Vid->recovery_point_found == 0)
        break;

      // read DP_A
      currSlice->dpB_NotPresent =1; 
      currSlice->dpC_NotPresent =1; 

      currSlice->idr_flag          = FALSE;
      currSlice->nal_reference_idc = nalu->nal_reference_idc;
      currSlice->dp_mode     = PAR_DP_3;
      currSlice->max_part_nr = 3;
      currSlice->ei_flag     = 0;
#if MVC_EXTENSION_ENABLE
      currSlice->p_Dpb = p_Vid->p_Dpb_layer[0];
#endif
      currStream             = currSlice->partArr[0].bitstream;
      currStream->ei_flag    = 0;
      currStream->frame_bitoffset = currStream->read_len = 0;
      memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
      currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
#if MVC_EXTENSION_ENABLE
      currSlice->view_id = GetBaseViewId(p_Vid, &p_Vid->active_subset_sps);
      currSlice->inter_view_flag = 1;
      currSlice->layer_id = currSlice->view_id = GetVOIdx( p_Vid, currSlice->view_id );
      currSlice->anchor_pic_flag = currSlice->idr_flag;
#endif

      BitsUsedByHeader = FirstPartOfSliceHeader(currSlice);
      UseParameterSet (currSlice);
      currSlice->active_sps = p_Vid->active_sps;
      currSlice->active_pps = p_Vid->active_pps;
      currSlice->Transform8x8Mode = p_Vid->active_pps->transform_8x8_mode_flag;
      currSlice->chroma444_not_separate = (p_Vid->active_sps->chroma_format_idc==YUV444)&&((p_Vid->separate_colour_plane_flag == 0));

      BitsUsedByHeader += RestOfSliceHeader (currSlice);
#if MVC_EXTENSION_ENABLE
      currSlice->p_Dpb = p_Vid->p_Dpb_layer[currSlice->view_id];
#endif

      assign_quant_params (currSlice);        

当该slice是一副新图像,一般情况下一个slice都是一副新图像。
      if(is_new_picture(p_Vid->dec_picture, currSlice, p_Vid->old_slice))
      { 当slice是第一个slice时
        if(p_Vid->iSliceNumOfCurrPic==0)
          init_picture(p_Vid, currSlice, p_Inp);

        current_header = SOP;
        //check zero_byte if it is also the first NAL unit in the access unit
        CheckZeroByteVCL(p_Vid, nalu);
      }
      else
        current_header = SOS;

      setup_slice_methods(currSlice);

      // From here on, p_Vid->active_sps, p_Vid->active_pps and the slice header are valid
      if (currSlice->mb_aff_frame_flag)
        currSlice->current_mb_nr = currSlice->start_mb_nr << 1;
      else
        currSlice->current_mb_nr = currSlice->start_mb_nr;

      // Now I need to read the slice ID, which depends on the value of
      // redundant_pic_cnt_present_flag

      slice_id_a  = read_ue_v("NALU: DP_A slice_id", currStream, &p_Dec->UsedBits);

      if (p_Vid->active_pps->entropy_coding_mode_flag)
        error ("received data partition with CABAC, this is not allowed", 500);

      // continue with reading next DP
      if (0 == read_next_nalu(p_Vid, nalu))
        return current_header;

      if ( NALU_TYPE_DPB == nalu->nal_unit_type)
      {
        // we got a DPB
        currStream             = currSlice->partArr[1].bitstream;
        currStream->ei_flag    = 0;
        currStream->frame_bitoffset = currStream->read_len = 0;

        memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
        currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);

        slice_id_b  = read_ue_v("NALU: DP_B slice_id", currStream, &p_Dec->UsedBits);

        currSlice->dpB_NotPresent = 0; 

        if ((slice_id_b != slice_id_a) || (nalu->lost_packets))
        {
          printf ("Waning: got a data partition B which does not match DP_A (DP loss!)\n");
          currSlice->dpB_NotPresent =1; 
          currSlice->dpC_NotPresent =1; 
        }
        else
        {
          if (p_Vid->active_pps->redundant_pic_cnt_present_flag)
            read_ue_v("NALU: DP_B redundant_pic_cnt", currStream, &p_Dec->UsedBits);

          // we're finished with DP_B, so let's continue with next DP
          if (0 == read_next_nalu(p_Vid, nalu))
            return current_header;
        }
      }
      else
      {
        currSlice->dpB_NotPresent =1; 
      }

      // check if we got DP_C
      if ( NALU_TYPE_DPC == nalu->nal_unit_type)
      {
        currStream             = currSlice->partArr[2].bitstream;
        currStream->ei_flag    = 0;
        currStream->frame_bitoffset = currStream->read_len = 0;

        memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
        currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);

        currSlice->dpC_NotPresent = 0;

        slice_id_c  = read_ue_v("NALU: DP_C slice_id", currStream, &p_Dec->UsedBits);
        if ((slice_id_c != slice_id_a)|| (nalu->lost_packets))
        {
          printf ("Warning: got a data partition C which does not match DP_A(DP loss!)\n");
          //currSlice->dpB_NotPresent =1;
          currSlice->dpC_NotPresent =1;
        }

        if (p_Vid->active_pps->redundant_pic_cnt_present_flag)
          read_ue_v("NALU:SLICE_C redudand_pic_cnt", currStream, &p_Dec->UsedBits);
      }
      else
      {
        currSlice->dpC_NotPresent =1;
        pending_nalu = nalu;
      }

      // check if we read anything else than the expected partitions
      if ((nalu->nal_unit_type != NALU_TYPE_DPB) && (nalu->nal_unit_type != NALU_TYPE_DPC) && (!currSlice->dpC_NotPresent))
      {
        // we have a NALI that we can't process here, so restart processing
        goto process_nalu;
        // yes, "goto" should not be used, but it's really the best way here before we restructure the decoding loop
        // (which should be taken care of anyway)
      }

      //FreeNALU(nalu);
      return current_header;
      break;
           
/*!
 ************************************************************************
 * \brief
 *    Initializes the parameters for a new picture
 ************************************************************************
 */
static void init_picture(VideoParameters *p_Vid, Slice *currSlice, InputParameters *p_Inp)
{
  int i;
  int nplane;
  StorablePicture *dec_picture = NULL;
  seq_parameter_set_rbsp_t *active_sps = p_Vid->active_sps;
  DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb;

  p_Vid->PicHeightInMbs = p_Vid->FrameHeightInMbs / ( 1 + currSlice->field_pic_flag );
  p_Vid->PicSizeInMbs   = p_Vid->PicWidthInMbs * p_Vid->PicHeightInMbs;
  p_Vid->FrameSizeInMbs = p_Vid->PicWidthInMbs * p_Vid->FrameHeightInMbs;

  p_Vid->bFrameInit = 1;
  if (p_Vid->dec_picture) // && p_Vid->num_dec_mb == p_Vid->PicSizeInMbs)
  {
    // this may only happen on slice loss
    exit_picture(p_Vid, &p_Vid->dec_picture);
  }
  p_Vid->dpb_layer_id = currSlice->layer_id;
  //set buffers;
  setup_buffers(p_Vid, currSlice->layer_id);

  if (p_Vid->recovery_point)
    p_Vid->recovery_frame_num = (currSlice->frame_num + p_Vid->recovery_frame_cnt) % p_Vid->max_frame_num;

  if (currSlice->idr_flag)
    p_Vid->recovery_frame_num = currSlice->frame_num;

  if (p_Vid->recovery_point == 0 &&
    currSlice->frame_num != p_Vid->pre_frame_num &&
    currSlice->frame_num != (p_Vid->pre_frame_num + 1) % p_Vid->max_frame_num)
  {
    if (active_sps->gaps_in_frame_num_value_allowed_flag == 0)
    {
      // picture error concealment
      if(p_Inp->conceal_mode !=0)
      {
        if((currSlice->frame_num) < ((p_Vid->pre_frame_num + 1) % p_Vid->max_frame_num))
        {
          /* Conceal lost IDR frames and any frames immediately
             following the IDR. Use frame copy for these since
             lists cannot be formed correctly for motion copy*/
          p_Vid->conceal_mode = 1;
          p_Vid->IDR_concealment_flag = 1;
          conceal_lost_frames(p_Dpb, currSlice);
          //reset to original concealment mode for future drops
          p_Vid->conceal_mode = p_Inp->conceal_mode;
        }
        else
        {
          //reset to original concealment mode for future drops
          p_Vid->conceal_mode = p_Inp->conceal_mode;

          p_Vid->IDR_concealment_flag = 0;
          conceal_lost_frames(p_Dpb, currSlice);
        }
      }
      else
      {   /* Advanced Error Concealment would be called here to combat unintentional loss of pictures. */
        error("An unintentional loss of pictures occurs! Exit\n", 100);
      }
    }
    if(p_Vid->conceal_mode == 0)
      fill_frame_num_gap(p_Vid, currSlice);
  }

  if(currSlice->nal_reference_idc)
  {
    p_Vid->pre_frame_num = currSlice->frame_num;
  }

  //p_Vid->num_dec_mb = 0;

  //calculate POC
  decode_poc(p_Vid, currSlice);

  if (p_Vid->recovery_frame_num == (int) currSlice->frame_num && p_Vid->recovery_poc == 0x7fffffff)
    p_Vid->recovery_poc = currSlice->framepoc;

  if(currSlice->nal_reference_idc)
    p_Vid->last_ref_pic_poc = currSlice->framepoc;

  //  dumppoc (p_Vid);

  if (currSlice->structure==FRAME ||currSlice->structure==TOP_FIELD)
  {
    gettime (&(p_Vid->start_time));             // start time
  }

  dec_picture = p_Vid->dec_picture = alloc_storable_picture (p_Vid, currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr, 1);
  dec_picture->top_poc=currSlice->toppoc;
  dec_picture->bottom_poc=currSlice->bottompoc;
  dec_picture->frame_poc=currSlice->framepoc;
  dec_picture->qp = currSlice->qp;
  dec_picture->slice_qp_delta = currSlice->slice_qp_delta;
  dec_picture->chroma_qp_offset[0] = p_Vid->active_pps->chroma_qp_index_offset;
  dec_picture->chroma_qp_offset[1] = p_Vid->active_pps->second_chroma_qp_index_offset;
  dec_picture->iCodingType = currSlice->structure==FRAME? (currSlice->mb_aff_frame_flag? FRAME_MB_PAIR_CODING:FRAME_CODING): FIELD_CODING; //currSlice->slice_type;
  dec_picture->layer_id = currSlice->layer_id;
#if (MVC_EXTENSION_ENABLE)
  dec_picture->view_id         = currSlice->view_id;
  dec_picture->inter_view_flag = currSlice->inter_view_flag;
  dec_picture->anchor_pic_flag = currSlice->anchor_pic_flag;
  if (dec_picture->view_id == 1)
  {
    if((p_Vid->profile_idc == MVC_HIGH) || (p_Vid->profile_idc == STEREO_HIGH))
      init_mvc_picture(currSlice);
  }
#endif

  // reset all variables of the error concealment instance before decoding of every frame.
  // here the third parameter should, if perfectly, be equal to the number of slices per frame.
  // using little value is ok, the code will allocate more memory if the slice number is larger
#if (DISABLE_ERC == 0)
  ercReset(p_Vid->erc_errorVar, p_Vid->PicSizeInMbs, p_Vid->PicSizeInMbs, dec_picture->size_x);
#endif
  p_Vid->erc_mvperMB = 0;

  switch (currSlice->structure )
  {
  case TOP_FIELD:
    {
      dec_picture->poc = currSlice->toppoc;
      p_Vid->number *= 2;
      break;
    }
  case BOTTOM_FIELD:
    {
      dec_picture->poc = currSlice->bottompoc;
      p_Vid->number = p_Vid->number * 2 + 1;
      break;
    }
  case FRAME:
    {
      dec_picture->poc = currSlice->framepoc;
      break;
    }
  default:
    error("p_Vid->structure not initialized", 235);
  }

  //p_Vid->current_slice_nr=0;

  if (p_Vid->type > SI_SLICE)
  {
    set_ec_flag(p_Vid, SE_PTYPE);
    p_Vid->type = P_SLICE;  // concealed element
  }

  // CAVLC init
  if (p_Vid->active_pps->entropy_coding_mode_flag == (Boolean) CAVLC)
  {
    memset(p_Vid->nz_coeff[0][0][0], -1, p_Vid->PicSizeInMbs * 48 *sizeof(byte)); // 3 * 4 * 4
  }

  // Set the slice_nr member of each MB to -1, to ensure correct when packet loss occurs
  // TO set Macroblock Map (mark all MBs as 'have to be concealed')
  if( (p_Vid->separate_colour_plane_flag != 0) )
  {
    for( nplane=0; nplane<MAX_PLANE; ++nplane )
    {      
      Macroblock *currMB = p_Vid->mb_data_JV[nplane];
      char *intra_block = p_Vid->intra_block_JV[nplane];
      for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
      {
        reset_mbs(currMB++);
      }
      fast_memset(p_Vid->ipredmode_JV[nplane][0], DC_PRED, 16 * p_Vid->FrameHeightInMbs * p_Vid->PicWidthInMbs * sizeof(char));
      if(p_Vid->active_pps->constrained_intra_pred_flag)
      {
        for (i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
        {
          intra_block[i] = 1;
        }
      }
    }
  }
  else
  {
#if 0 //defined(OPENMP)
#pragma omp parallel for
    for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
      reset_mbs(&p_Vid->mb_data[i]);
#else
    Macroblock *currMB = p_Vid->mb_data;
    for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
      reset_mbs(currMB++);
#endif
    if(p_Vid->active_pps->constrained_intra_pred_flag)
    {
      for (i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
      {
        p_Vid->intra_block[i] = 1;
      }
    }
    fast_memset(p_Vid->ipredmode[0], DC_PRED, 16 * p_Vid->FrameHeightInMbs * p_Vid->PicWidthInMbs * sizeof(char));
  }  

  dec_picture->slice_type = p_Vid->type;
  dec_picture->used_for_reference = (currSlice->nal_reference_idc != 0);
  dec_picture->idr_flag = currSlice->idr_flag;
  dec_picture->no_output_of_prior_pics_flag = currSlice->no_output_of_prior_pics_flag;
  dec_picture->long_term_reference_flag     = currSlice->long_term_reference_flag;
  dec_picture->adaptive_ref_pic_buffering_flag = currSlice->adaptive_ref_pic_buffering_flag;

  dec_picture->dec_ref_pic_marking_buffer = currSlice->dec_ref_pic_marking_buffer;
  currSlice->dec_ref_pic_marking_buffer   = NULL;

  dec_picture->mb_aff_frame_flag = currSlice->mb_aff_frame_flag;
  dec_picture->PicWidthInMbs     = p_Vid->PicWidthInMbs;

  p_Vid->get_mb_block_pos = dec_picture->mb_aff_frame_flag ? get_mb_block_pos_mbaff : get_mb_block_pos_normal;
  p_Vid->getNeighbour     = dec_picture->mb_aff_frame_flag ? getAffNeighbour : getNonAffNeighbour;

  dec_picture->pic_num   = currSlice->frame_num;
  dec_picture->frame_num = currSlice->frame_num;

  dec_picture->recovery_frame = (unsigned int) ((int) currSlice->frame_num == p_Vid->recovery_frame_num);

  dec_picture->coded_frame = (currSlice->structure==FRAME);

  dec_picture->chroma_format_idc = active_sps->chroma_format_idc;

  dec_picture->frame_mbs_only_flag = active_sps->frame_mbs_only_flag;
  dec_picture->frame_cropping_flag = active_sps->frame_cropping_flag;

  if (dec_picture->frame_cropping_flag)
  {
    dec_picture->frame_crop_left_offset   = active_sps->frame_crop_left_offset;
    dec_picture->frame_crop_right_offset  = active_sps->frame_crop_right_offset;
    dec_picture->frame_crop_top_offset    = active_sps->frame_crop_top_offset;
    dec_picture->frame_crop_bottom_offset = active_sps->frame_crop_bottom_offset;
  }

#if (ENABLE_OUTPUT_TONEMAPPING)
  // store the necessary tone mapping sei into StorablePicture structure
  if (p_Vid->seiToneMapping->seiHasTone_mapping)
  {
    int coded_data_bit_max = (1 << p_Vid->seiToneMapping->coded_data_bit_depth);
    dec_picture->seiHasTone_mapping    = 1;
    dec_picture->tone_mapping_model_id = p_Vid->seiToneMapping->model_id;
    dec_picture->tonemapped_bit_depth  = p_Vid->seiToneMapping->sei_bit_depth;
    dec_picture->tone_mapping_lut      = malloc(coded_data_bit_max * sizeof(int));
    if (NULL == dec_picture->tone_mapping_lut)
    {
      no_mem_exit("init_picture: tone_mapping_lut");
    }
    memcpy(dec_picture->tone_mapping_lut, p_Vid->seiToneMapping->lut, sizeof(imgpel) * coded_data_bit_max);
    update_tone_mapping_sei(p_Vid->seiToneMapping);
  }
  else
    dec_picture->seiHasTone_mapping = 0;
#endif

  if( (p_Vid->separate_colour_plane_flag != 0) )
  {
    p_Vid->dec_picture_JV[0] = p_Vid->dec_picture;
    p_Vid->dec_picture_JV[1] = alloc_storable_picture (p_Vid, (PictureStructure) currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr, 1);
    copy_dec_picture_JV( p_Vid, p_Vid->dec_picture_JV[1], p_Vid->dec_picture_JV[0] );
    p_Vid->dec_picture_JV[2] = alloc_storable_picture (p_Vid, (PictureStructure) currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr, 1);
    copy_dec_picture_JV( p_Vid, p_Vid->dec_picture_JV[2], p_Vid->dec_picture_JV[0] );
  }
}
           

slice header 处理

  1. 当该nalu为IDR和普通slice 时,这里就是一个刷新点。然后开始读取slice header,由于sliceheader字段依赖图像参数集,这里的读取sliceheader是分为两部分。

FirstPartOfSliceHeader(currSlice) 获取sliceheader字段中不依赖图像参数集的部分

sliceheader字段描述

/*!
 ************************************************************************
 * \brief
 *    read the first part of the header (only the pic_parameter_set_id)
 * \return
 *    Length of the first part of the slice header (in bits)
 ************************************************************************
 */
int FirstPartOfSliceHeader(Slice *currSlice)
{
  VideoParameters *p_Vid = currSlice->p_Vid;
  byte dP_nr = assignSE2partition[currSlice->dp_mode][SE_HEADER];
  DataPartition *partition = &(currSlice->partArr[dP_nr]);
  Bitstream *currStream = partition->bitstream;
  int tmp;

  p_Dec->UsedBits= partition->bitstream->frame_bitoffset; // was hardcoded to 31 for previous start-code. This is better.

  // Get first_mb_in_slice
  currSlice->start_mb_nr = read_ue_v ("SH: first_mb_in_slice", currStream, &p_Dec->UsedBits);
  
  tmp = read_ue_v ("SH: slice_type", currStream, &p_Dec->UsedBits);

  if (tmp > 4) tmp -= 5;

  p_Vid->type = currSlice->slice_type = (SliceType) tmp;

  currSlice->pic_parameter_set_id = read_ue_v ("SH: pic_parameter_set_id", currStream, &p_Dec->UsedBits);

  if( p_Vid->separate_colour_plane_flag )
    currSlice->colour_plane_id = read_u_v (2, "SH: colour_plane_id", currStream, &p_Dec->UsedBits);
  else
    currSlice->colour_plane_id = PLANE_Y;

  return p_Dec->UsedBits;
}
           

slice_type 为1~5,上面代码中会有映射

typedef enum
{
  P_SLICE = 0,
  B_SLICE = 1,
  I_SLICE = 2,
  SP_SLICE = 3,
  SI_SLICE = 4,
  NUM_SLICE_TYPES = 5
} SliceType;
           

2.在读到其他帧之前一定要先读到sps 和pps 才能够进行解码。下边是激活sps 和pps

void UseParameterSet (Slice *currSlice)
{
  VideoParameters *p_Vid = currSlice->p_Vid;
  int PicParsetId = currSlice->pic_parameter_set_id;  
  pic_parameter_set_rbsp_t *pps = &p_Vid->PicParSet[PicParsetId];
  seq_parameter_set_rbsp_t *sps = &p_Vid->SeqParSet[pps->seq_parameter_set_id];
  int i;

  if (pps->Valid != TRUE)
    printf ("Trying to use an invalid (uninitialized) Picture Parameter Set with ID %d, expect the unexpected...\n", PicParsetId);
#if (MVC_EXTENSION_ENABLE)
  if (currSlice->svc_extension_flag == -1)
  {
    if (sps->Valid != TRUE)
      printf ("PicParset %d references an invalid (uninitialized) Sequence Parameter Set with ID %d, expect the unexpected...\n", 
      PicParsetId, (int) pps->seq_parameter_set_id);
  }
  else
  {
    // Set SPS to the subset SPS parameters
    p_Vid->active_subset_sps = p_Vid->SubsetSeqParSet + pps->seq_parameter_set_id;
    sps = &(p_Vid->active_subset_sps->sps);
    if (p_Vid->SubsetSeqParSet[pps->seq_parameter_set_id].Valid != TRUE)
      printf ("PicParset %d references an invalid (uninitialized) Subset Sequence Parameter Set with ID %d, expect the unexpected...\n", 
      PicParsetId, (int) pps->seq_parameter_set_id);
  }
#else
  if (sps->Valid != TRUE)
    printf ("PicParset %d references an invalid (uninitialized) Sequence Parameter Set with ID %d, expect the unexpected...\n", 
    PicParsetId, (int) pps->seq_parameter_set_id);
#endif

  // In theory, and with a well-designed software, the lines above
  // are everything necessary.  In practice, we need to patch many values
  // in p_Vid-> (but no more in p_Inp-> -- these have been taken care of)

  // Set Sequence Parameter Stuff first
  //  printf ("Using Picture Parameter set %d and associated Sequence Parameter Set %d\n", PicParsetId, pps->seq_parameter_set_id);
  if ((int) sps->pic_order_cnt_type < 0 || sps->pic_order_cnt_type > 2)  // != 1
  {
    printf ("invalid sps->pic_order_cnt_type = %d\n", (int) sps->pic_order_cnt_type);
    error ("pic_order_cnt_type != 1", -1000);
  }

  if (sps->pic_order_cnt_type == 1)
  {
    if(sps->num_ref_frames_in_pic_order_cnt_cycle >= MAXnum_ref_frames_in_pic_order_cnt_cycle)
    {
      error("num_ref_frames_in_pic_order_cnt_cycle too large",-1011);
    }
  }
  p_Vid->dpb_layer_id = currSlice->layer_id;
  activate_sps(p_Vid, sps);
  activate_pps(p_Vid, pps);

  // currSlice->dp_mode is set by read_new_slice (NALU first byte available there)
  if (pps->entropy_coding_mode_flag == (Boolean) CAVLC)
  {
    currSlice->nal_startcode_follows = uvlc_startcode_follows;
    for (i=0; i<3; i++)
    {
      currSlice->partArr[i].readSyntaxElement = readSyntaxElement_UVLC;      
    }
  }
  else
  {
    currSlice->nal_startcode_follows = cabac_startcode_follows;
    for (i=0; i<3; i++)
    {
      currSlice->partArr[i].readSyntaxElement = readSyntaxElement_CABAC;
    }
  }
  p_Vid->type = currSlice->slice_type;

           
/*!
 ************************************************************************
 * \brief
 *    Activate Sequence Parameter Sets
 *
 ************************************************************************
 */
void acttivate_sps (VideoParameters *p_Vid, seq_parameter_set_rbsp_t *sps)
{
  InputParameters *p_Inp = p_Vid->p_Inp;  
激活sps时,p_Vid中activate_sps是NULL
  if (p_Vid->active_sps != sps)
  {
    if (p_Vid->dec_picture)
    {
      // this may only happen on slice loss
      exit_picture(p_Vid, &p_Vid->dec_picture);
    }
    p_Vid->active_sps = sps;

    if(p_Vid->dpb_layer_id==0 && is_BL_profile(sps->profile_idc) && !p_Vid->p_Dpb_layer[0]->init_done)
    {
    将sps中的相关参数加入到解码参数结构中
      set_coding_par(sps, p_Vid->p_EncodePar[0]);
      setup_layer_info( p_Vid, sps, p_Vid->p_LayerPar[0]);
    }
    else if(p_Vid->dpb_layer_id==1 && is_EL_profile(sps->profile_idc) && !p_Vid->p_Dpb_layer[1]->init_done)
    {
      set_coding_par(sps, p_Vid->p_EncodePar[1]);
      setup_layer_info(p_Vid, sps, p_Vid->p_LayerPar[1]);
    }

//to be removed in future;
    set_global_coding_par(p_Vid, p_Vid->p_EncodePar[p_Vid->dpb_layer_id]);
//end;

#if (MVC_EXTENSION_ENABLE)
    //init_frext(p_Vid);
    if (/*p_Vid->last_pic_width_in_mbs_minus1 != p_Vid->active_sps->pic_width_in_mbs_minus1
        || p_Vid->last_pic_height_in_map_units_minus1 != p_Vid->active_sps->pic_height_in_map_units_minus1
        || p_Vid->last_max_dec_frame_buffering != GetMaxDecFrameBuffering(p_Vid)
        || */(p_Vid->last_profile_idc != p_Vid->active_sps->profile_idc && is_BL_profile(p_Vid->active_sps->profile_idc) && !p_Vid->p_Dpb_layer[0]->init_done /*&& is_BL_profile(p_Vid->last_profile_idc)*/))
    {
      //init_frext(p_Vid);
      init_global_buffers(p_Vid, 0);

      if (!p_Vid->no_output_of_prior_pics_flag)
      {
        flush_dpb(p_Vid->p_Dpb_layer[0]);
        flush_dpb(p_Vid->p_Dpb_layer[1]);
      }
      init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 1);
    }
    else if(p_Vid->last_profile_idc != p_Vid->active_sps->profile_idc && (
            is_MVC_profile(p_Vid->last_profile_idc) || is_MVC_profile(p_Vid->active_sps->profile_idc)
            )&& (!p_Vid->p_Dpb_layer[1]->init_done))
    {
      assert(p_Vid->p_Dpb_layer[0]->init_done);
      //init_frext(p_Vid);
      if(p_Vid->p_Dpb_layer[0]->init_done)
      {
        free_dpb(p_Vid->p_Dpb_layer[0]);
        init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 1);
      }
      init_global_buffers(p_Vid, 1);
      // for now lets re_init both buffers. Later, we should only re_init appropriate one
      // Note that we seem to be doing this for every frame which seems not good.
      //re_init_dpb(p_Vid, p_Vid->p_Dpb_layer[1], 2);
#if MVC_EXTENSION_ENABLE
      init_dpb(p_Vid, p_Vid->p_Dpb_layer[1], 2);
#endif
      //p_Vid->last_profile_idc = p_Vid->active_sps->profile_idc;
    }
    //p_Vid->p_Dpb_layer[0]->num_ref_frames = p_Vid->active_sps->num_ref_frames;
    //p_Vid->p_Dpb_layer[1]->num_ref_frames = p_Vid->active_sps->num_ref_frames;
    p_Vid->last_pic_width_in_mbs_minus1 = p_Vid->active_sps->pic_width_in_mbs_minus1;  
    p_Vid->last_pic_height_in_map_units_minus1 = p_Vid->active_sps->pic_height_in_map_units_minus1;
    p_Vid->last_max_dec_frame_buffering = GetMaxDecFrameBuffering(p_Vid);
    p_Vid->last_profile_idc = p_Vid->active_sps->profile_idc;

#else
    //init_frext(p_Vid);
    init_global_buffers(p_Vid, 0);

    if (!p_Vid->no_output_of_prior_pics_flag)
    {
      flush_dpb(p_Vid->p_Dpb_layer[0]);
    }
    init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 0);
    // for now lets init both buffers. Later, we should only re_init appropriate one
    //init_dpb(p_Vid, p_Vid->p_Dpb_layer[0], 1);
    // obviously this is not needed her but just adding it for completeness
    //init_dpb(p_Vid, p_Vid->p_Dpb_layer[1], 2);
#endif

#if (DISABLE_ERC == 0)
    ercInit(p_Vid, p_Vid->width, p_Vid->height, 1);
    if(p_Vid->dec_picture)
    {
      ercReset(p_Vid->erc_errorVar, p_Vid->PicSizeInMbs, p_Vid->PicSizeInMbs, p_Vid->dec_picture->size_x);
      p_Vid->erc_mvperMB = 0;
    }
#endif
  }
  
  reset_format_info(sps, p_Vid, &p_Inp->source, &p_Inp->output);
}
           

在激活sps时,有init_global_buffers()

typedef struct coding_par
{
  int layer_id;
  ColorFormat yuv_format;
  int P444_joined;

  short bitdepth_luma;
  short bitdepth_chroma;
  int bitdepth_scale[2];
  int bitdepth_luma_qp_scale;
  int bitdepth_chroma_qp_scale;
  int bitdepth_lambda_scale;

  uint32 dc_pred_value_comp[MAX_PLANE]; //!< component value for DC prediction (depends on component pel bit depth)
  int max_pel_value_comp      [MAX_PLANE];       //!< max value that one picture element (pixel) can take (depends on pic_unit_bitdepth)
  int max_imgpel_value_comp_sq   [MAX_PLANE];       //!< max value that one picture element (pixel) can take (depends on pic_unit_bitdepth)

  int num_blk8x8_uv;
  int num_cdc_coeff;
  
  short mb_cr_size_x;
  short mb_cr_size_y;
  int mb_size[MAX_PLANE][2];

  int max_bitCount;
  int width;                   //!< Number of pels
  int width_padded;            //!< Width in pels of padded picture
  int width_blk;               //!< Number of columns in blocks
  int width_cr;                //!< Number of pels chroma
  int height;                  //!< Number of lines
  int height_padded;           //!< Number in lines of padded picture
  int height_blk;              //!< Number of lines in blocks
  int height_cr;               //!< Number of lines  chroma
  int height_cr_frame;         //!< Number of lines  chroma frame
  int size;                    //!< Luma Picture size in pels
  int size_cr;                 //!< Chroma Picture size in pels

  int padded_size_x;  
  int padded_size_x_m8x8;
  int padded_size_x_m4x4;
  int cr_padded_size_x;
  int cr_padded_size_x_m8;
  int cr_padded_size_x2;
  int cr_padded_size_x4;

  int pad_size_uv_x;
  int pad_size_uv_y;
  unsigned char chroma_mask_mv_y;
  unsigned char chroma_mask_mv_x;
  int chroma_shift_x;
  int chroma_shift_y; 
  int shift_cr_x;
  int shift_cr_y;

  unsigned int PicWidthInMbs;
  unsigned int PicHeightInMapUnits;
  unsigned int FrameHeightInMbs;
  unsigned int FrameSizeInMbs;

  unsigned int frame_num;
  unsigned int prevFrameNum;
  unsigned int FrameNumOffset;
  unsigned int prevFrameNumOffset;
  int last_ref_idc;

  //uint32 dc_pred_value;                 //!< DC prediction value for current component
  //short max_imgpel_value;              //!< max value that one picture element (pixel) can take (depends on pic_unit_bitdepth)
  char md5String[2][33];
}CodingParameters;
           
static void set_coding_par(seq_parameter_set_rbsp_t *sps, CodingParameters *cps)
{
  // maximum vertical motion vector range in luma quarter pixel units
  cps->profile_idc = sps->profile_idc;
  cps->lossless_qpprime_flag   = sps->lossless_qpprime_flag;
  if (sps->level_idc <= 10)
  {
    cps->max_vmv_r = 64 * 4;
  }
  else if (sps->level_idc <= 20)
  {
    cps->max_vmv_r = 128 * 4;
  }
  else if (sps->level_idc <= 30)
  {
    cps->max_vmv_r = 256 * 4;
  }
  else
  {
    cps->max_vmv_r = 512 * 4; // 512 pixels in quarter pixels
  }

  // Fidelity Range Extensions stuff (part 1)
  cps->bitdepth_chroma = 0;
  cps->width_cr        = 0;
  cps->height_cr       = 0;
  亮度bit深度
  cps->bitdepth_luma       = (short) (sps->bit_depth_luma_minus8 + 8);
  cps->bitdepth_scale[0]   = 1 << sps->bit_depth_luma_minus8;
  if (sps->chroma_format_idc != YUV400)
  {
  	有色度分量时
    cps->bitdepth_chroma   = (short) (sps->bit_depth_chroma_minus8 + 8);
    cps->bitdepth_scale[1] = 1 << sps->bit_depth_chroma_minus8;
  }
最大帧号
  cps->max_frame_num = 1<<(sps->log2_max_frame_num_minus4+4);
  cps->PicWidthInMbs = (sps->pic_width_in_mbs_minus1 +1);
  cps->PicHeightInMapUnits = (sps->pic_height_in_map_units_minus1 +1);
  cps->FrameHeightInMbs = ( 2 - sps->frame_mbs_only_flag ) * cps->PicHeightInMapUnits;
  cps->FrameSizeInMbs = cps->PicWidthInMbs * cps->FrameHeightInMbs;
输入原始图像格式
  cps->yuv_format=sps->chroma_format_idc;
  色彩通道是否分离
  [source参数描述](https://blog.csdn.net/dongkun152/article/details/118912591?spm=1001.2014.3001.5501)
  cps->separate_colour_plane_flag = sps->separate_colour_plane_flag;
  if( cps->separate_colour_plane_flag )
  {
    cps->ChromaArrayType = 0;
  }
  else
  {
    cps->ChromaArrayType = sps->chroma_format_idc;
  }
宏块尺寸默认16*16
  cps->width = cps->PicWidthInMbs * MB_BLOCK_SIZE;
  cps->height = cps->FrameHeightInMbs * MB_BLOCK_SIZE;  

  cps->iLumaPadX = MCBUF_LUMA_PAD_X;
  cps->iLumaPadY = MCBUF_LUMA_PAD_Y;
  cps->iChromaPadX = MCBUF_CHROMA_PAD_X;
  cps->iChromaPadY = MCBUF_CHROMA_PAD_Y;
  if (sps->chroma_format_idc == YUV420)
  {
  	YUV420时 色度是亮度的一半
    cps->width_cr  = (cps->width  >> 1);
    cps->height_cr = (cps->height >> 1);
  }
  else if (sps->chroma_format_idc == YUV422)
  {
    cps->width_cr  = (cps->width >> 1);
    cps->height_cr = cps->height;
    cps->iChromaPadY = MCBUF_CHROMA_PAD_Y*2;
  }
  else if (sps->chroma_format_idc == YUV444)
  {
    //YUV444
    cps->width_cr = cps->width;
    cps->height_cr = cps->height;
    cps->iChromaPadX = cps->iLumaPadX;
    cps->iChromaPadY = cps->iLumaPadY;
  }
  //pel bitdepth init
  cps->bitdepth_luma_qp_scale   = 6 * (cps->bitdepth_luma - 8);

  if(cps->bitdepth_luma > cps->bitdepth_chroma || sps->chroma_format_idc == YUV400)
    cps->pic_unit_bitsize_on_disk = (cps->bitdepth_luma > 8)? 16:8;
  else
    cps->pic_unit_bitsize_on_disk = (cps->bitdepth_chroma > 8)? 16:8;
  cps->dc_pred_value_comp[0]    = 1<<(cps->bitdepth_luma - 1);
  cps->max_pel_value_comp[0] = (1<<cps->bitdepth_luma) - 1;
  宏块只存16*16
  cps->mb_size[0][0] = cps->mb_size[0][1] = MB_BLOCK_SIZE;

  if (sps->chroma_format_idc != YUV400)
  {
    //for chrominance part
    cps->bitdepth_chroma_qp_scale = 6 * (cps->bitdepth_chroma - 8);
    cps->dc_pred_value_comp[1]    = (1 << (cps->bitdepth_chroma - 1));
    cps->dc_pred_value_comp[2]    = cps->dc_pred_value_comp[1];
    cps->max_pel_value_comp[1]    = (1 << cps->bitdepth_chroma) - 1;
    cps->max_pel_value_comp[2]    = (1 << cps->bitdepth_chroma) - 1;
    cps->num_blk8x8_uv = (1 << sps->chroma_format_idc) & (~(0x1));
    cps->num_uv_blocks = (cps->num_blk8x8_uv >> 1);
    cps->num_cdc_coeff = (cps->num_blk8x8_uv << 1);
    cps->mb_size[1][0] = cps->mb_size[2][0] = cps->mb_cr_size_x  = (sps->chroma_format_idc==YUV420 || sps->chroma_format_idc==YUV422)?  8 : 16;
    cps->mb_size[1][1] = cps->mb_size[2][1] = cps->mb_cr_size_y  = (sps->chroma_format_idc==YUV444 || sps->chroma_format_idc==YUV422)? 16 :  8;

    cps->subpel_x    = cps->mb_cr_size_x == 8 ? 7 : 3;
    cps->subpel_y    = cps->mb_cr_size_y == 8 ? 7 : 3;
    cps->shiftpel_x  = cps->mb_cr_size_x == 8 ? 3 : 2;
    cps->shiftpel_y  = cps->mb_cr_size_y == 8 ? 3 : 2;
    cps->total_scale = cps->shiftpel_x + cps->shiftpel_y;
  }
  else
  {
    cps->bitdepth_chroma_qp_scale = 0;
    cps->max_pel_value_comp[1] = 0;
    cps->max_pel_value_comp[2] = 0;
    cps->num_blk8x8_uv = 0;
    cps->num_uv_blocks = 0;
    cps->num_cdc_coeff = 0;
    cps->mb_size[1][0] = cps->mb_size[2][0] = cps->mb_cr_size_x  = 0;
    cps->mb_size[1][1] = cps->mb_size[2][1] = cps->mb_cr_size_y  = 0;
    cps->subpel_x      = 0;
    cps->subpel_y      = 0;
    cps->shiftpel_x    = 0;
    cps->shiftpel_y    = 0;
    cps->total_scale   = 0;
  }

  cps->mb_cr_size = cps->mb_cr_size_x * cps->mb_cr_size_y;
  cps->mb_size_blk[0][0] = cps->mb_size_blk[0][1] = cps->mb_size[0][0] >> 2;
  cps->mb_size_blk[1][0] = cps->mb_size_blk[2][0] = cps->mb_size[1][0] >> 2;
  cps->mb_size_blk[1][1] = cps->mb_size_blk[2][1] = cps->mb_size[1][1] >> 2;

  cps->mb_size_shift[0][0] = cps->mb_size_shift[0][1] = CeilLog2_sf (cps->mb_size[0][0]);
  cps->mb_size_shift[1][0] = cps->mb_size_shift[2][0] = CeilLog2_sf (cps->mb_size[1][0]);
  cps->mb_size_shift[1][1] = cps->mb_size_shift[2][1] = CeilLog2_sf (cps->mb_size[1][1]);

  cps->rgb_output =  (sps->vui_seq_parameters.matrix_coefficients==0);
}
           

分配量化参数

/*!
 ************************************************************************
 * \brief
 *    For mapping the q-matrix to the active id and calculate quantisation values
 *
 * \param currSlice
 *    Slice pointer
 * \param pps
 *    Picture parameter set
 * \param sps
 *    Sequence parameter set
 *
 ************************************************************************
 */
void assign_quant_params(Slice *currSlice)
{
  seq_parameter_set_rbsp_t* sps = currSlice->active_sps;
  pic_parameter_set_rbsp_t* pps = currSlice->active_pps;
  int i;
  int n_ScalingList;
当sps和pps中放大系数矩阵都不存在时,使用的默认的参数
  if(!pps->pic_scaling_matrix_present_flag && !sps->seq_scaling_matrix_present_flag)
  {
    for(i=0; i<12; i++)
      currSlice->qmatrix[i] = (i < 6) ? quant_org : quant8_org;
  }
  else
  {
    n_ScalingList = (sps->chroma_format_idc != YUV444) ? 8 : 12;
    if(sps->seq_scaling_matrix_present_flag) // check sps first
    {
      for(i=0; i<n_ScalingList; i++)
      {
        if(i<6)
        {
          if(!sps->seq_scaling_list_present_flag[i]) // fall-back rule A
          {
            if(i==0)
              currSlice->qmatrix[i] = quant_intra_default;
            else if(i==3)
              currSlice->qmatrix[i] = quant_inter_default;
            else
              currSlice->qmatrix[i] = currSlice->qmatrix[i-1];
          }
          else
          {
            if(sps->UseDefaultScalingMatrix4x4Flag[i])
              currSlice->qmatrix[i] = (i<3) ? quant_intra_default : quant_inter_default;
            else
              currSlice->qmatrix[i] = sps->ScalingList4x4[i];
          }
        }
        else
        {
          if(!sps->seq_scaling_list_present_flag[i]) // fall-back rule A
          {
            if(i==6)
              currSlice->qmatrix[i] = quant8_intra_default;
            else if(i==7)
              currSlice->qmatrix[i] = quant8_inter_default;
            else
              currSlice->qmatrix[i] = currSlice->qmatrix[i-2];
          }
          else
          {
            if(sps->UseDefaultScalingMatrix8x8Flag[i-6])
              currSlice->qmatrix[i] = (i==6 || i==8 || i==10) ? quant8_intra_default:quant8_inter_default;
            else
              currSlice->qmatrix[i] = sps->ScalingList8x8[i-6];
          }
        }
      }
    }

    if(pps->pic_scaling_matrix_present_flag) // then check pps
    {
      for(i=0; i<n_ScalingList; i++)
      {
        if(i<6)
        {
          if(!pps->pic_scaling_list_present_flag[i]) // fall-back rule B
          {
            if (i==0)
            {
              if(!sps->seq_scaling_matrix_present_flag)
                currSlice->qmatrix[i] = quant_intra_default;
            }
            else if (i==3)
            {
              if(!sps->seq_scaling_matrix_present_flag)
                currSlice->qmatrix[i] = quant_inter_default;
            }
            else
              currSlice->qmatrix[i] = currSlice->qmatrix[i-1];
          }
          else
          {
            if(pps->UseDefaultScalingMatrix4x4Flag[i])
              currSlice->qmatrix[i] = (i<3) ? quant_intra_default:quant_inter_default;
            else
              currSlice->qmatrix[i] = pps->ScalingList4x4[i];
          }
        }
        else
        {
          if(!pps->pic_scaling_list_present_flag[i]) // fall-back rule B
          {
            if (i==6)
            {
              if(!sps->seq_scaling_matrix_present_flag)
                currSlice->qmatrix[i] = quant8_intra_default;
            }
            else if(i==7)
            {
              if(!sps->seq_scaling_matrix_present_flag)
                currSlice->qmatrix[i] = quant8_inter_default;
            }
            else  
              currSlice->qmatrix[i] = currSlice->qmatrix[i-2];
          }
          else
          {
            if(pps->UseDefaultScalingMatrix8x8Flag[i-6])
              currSlice->qmatrix[i] = (i==6 || i==8 || i==10) ? quant8_intra_default:quant8_inter_default;
            else
              currSlice->qmatrix[i] = pps->ScalingList8x8[i-6];
          }
        }
      }
    }
  }
计算实际使用的4x4量化参数
  CalculateQuant4x4Param(currSlice);
  if(pps->transform_8x8_mode_flag)
    CalculateQuant8x8Param(currSlice);
}
           

计算量化参数就是在 dequant_coef反量化系数矩阵每个元素上乘以默认放大矩阵的每个元素。

/*!
 ************************************************************************
 * \brief
 *    For calculating the quantisation values at frame level
 *
 ************************************************************************
 */
void CalculateQuant4x4Param(Slice *currSlice)
{
  int k;
  const int (*p_dequant_coef)[4][4] = dequant_coef;
  int  (*InvLevelScale4x4_Intra_0)[4][4] = currSlice->InvLevelScale4x4_Intra[0];
  int  (*InvLevelScale4x4_Intra_1)[4][4] = currSlice->InvLevelScale4x4_Intra[1];
  int  (*InvLevelScale4x4_Intra_2)[4][4] = currSlice->InvLevelScale4x4_Intra[2];
  int  (*InvLevelScale4x4_Inter_0)[4][4] = currSlice->InvLevelScale4x4_Inter[0];
  int  (*InvLevelScale4x4_Inter_1)[4][4] = currSlice->InvLevelScale4x4_Inter[1];
  int  (*InvLevelScale4x4_Inter_2)[4][4] = currSlice->InvLevelScale4x4_Inter[2];


  for(k=0; k<6; k++)
  {
    set_dequant4x4(*InvLevelScale4x4_Intra_0++, *p_dequant_coef  , currSlice->qmatrix[0]);
    set_dequant4x4(*InvLevelScale4x4_Intra_1++, *p_dequant_coef  , currSlice->qmatrix[1]);
    set_dequant4x4(*InvLevelScale4x4_Intra_2++, *p_dequant_coef  , currSlice->qmatrix[2]);
    set_dequant4x4(*InvLevelScale4x4_Inter_0++, *p_dequant_coef  , currSlice->qmatrix[3]);
    set_dequant4x4(*InvLevelScale4x4_Inter_1++, *p_dequant_coef  , currSlice->qmatrix[4]);
    set_dequant4x4(*InvLevelScale4x4_Inter_2++, *p_dequant_coef++, currSlice->qmatrix[5]);
  }
}

static void set_dequant4x4(int (*InvLevelScale4x4)[4],  const int (*dequant)[4], int *qmatrix)
{
  int j;
  for(j=0; j<4; j++)
  {
    *(*InvLevelScale4x4      ) = *(*dequant      ) * *qmatrix++;
    *(*InvLevelScale4x4   + 1) = *(*dequant   + 1) * *qmatrix++;
    *(*InvLevelScale4x4   + 2) = *(*dequant   + 2) * *qmatrix++;
    *(*InvLevelScale4x4++ + 3) = *(*dequant++ + 3) * *qmatrix++;
  }
}
           

反量化关于QP的函数矩阵如下

//! Dequantization coefficients
static const int dequant_coef[6][4][4] = {
  {
    { 10, 13, 10, 13},
    { 13, 16, 13, 16},
    { 10, 13, 10, 13},
    { 13, 16, 13, 16}},
  {
    { 11, 14, 11, 14},
    { 14, 18, 14, 18},
    { 11, 14, 11, 14},
    { 14, 18, 14, 18}},
  {
    { 13, 16, 13, 16},
    { 16, 20, 16, 20},
    { 13, 16, 13, 16},
    { 16, 20, 16, 20}},
  {
    { 14, 18, 14, 18},
    { 18, 23, 18, 23},
    { 14, 18, 14, 18},
    { 18, 23, 18, 23}},
  {
    { 16, 20, 16, 20},
    { 20, 25, 20, 25},
    { 16, 20, 16, 20},
    { 20, 25, 20, 25}},
  {
    { 18, 23, 18, 23},
    { 23, 29, 23, 29},
    { 18, 23, 18, 23},
    { 23, 29, 23, 29}}
};
           

slice_data 处理

开始读取slice_data, setup_slice_methods()根据不同的slice type 设置回调函数。

/*!
 ************************************************************************
 * \brief
 *    Set mode interpretation based on slice type
 ************************************************************************
 */
void setup_slice_methods(Slice *currSlice)
{
  setup_read_macroblock (currSlice);
  switch (currSlice->slice_type)
  {
  case P_SLICE: 
    currSlice->interpret_mb_mode         = interpret_mb_mode_P;
    currSlice->read_motion_info_from_NAL = read_motion_info_from_NAL_p_slice;
    currSlice->decode_one_component      = decode_one_component_p_slice;
    currSlice->update_direct_mv_info     = NULL;
#if (MVC_EXTENSION_ENABLE)
    currSlice->init_lists                = currSlice->view_id ? init_lists_p_slice_mvc : init_lists_p_slice;
#else
    currSlice->init_lists                = init_lists_p_slice;
#endif
    break;
  case SP_SLICE:
    currSlice->interpret_mb_mode         = interpret_mb_mode_P;
    currSlice->read_motion_info_from_NAL = read_motion_info_from_NAL_p_slice;
    currSlice->decode_one_component      = decode_one_component_sp_slice;
    currSlice->update_direct_mv_info     = NULL;
#if (MVC_EXTENSION_ENABLE)
    currSlice->init_lists                = currSlice->view_id ? init_lists_p_slice_mvc : init_lists_p_slice;
#else
    currSlice->init_lists                = init_lists_p_slice;
#endif
    break;
  case B_SLICE:
    currSlice->interpret_mb_mode         = interpret_mb_mode_B;
    currSlice->read_motion_info_from_NAL = read_motion_info_from_NAL_b_slice;
    currSlice->decode_one_component      = decode_one_component_b_slice;
    update_direct_types(currSlice);
#if (MVC_EXTENSION_ENABLE)
    currSlice->init_lists                = currSlice->view_id ? init_lists_b_slice_mvc : init_lists_b_slice;
#else
    currSlice->init_lists                = init_lists_b_slice;
#endif
    break;
  case I_SLICE: 
    currSlice->interpret_mb_mode         = interpret_mb_mode_I;
    currSlice->read_motion_info_from_NAL = NULL;
    currSlice->decode_one_component      = decode_one_component_i_slice;
    currSlice->update_direct_mv_info     = NULL;
#if (MVC_EXTENSION_ENABLE)
    currSlice->init_lists                = currSlice->view_id ? init_lists_i_slice_mvc : init_lists_i_slice;
#else
    currSlice->init_lists                = init_lists_i_slice;
#endif
    break;
  case SI_SLICE: 
    currSlice->interpret_mb_mode         = interpret_mb_mode_SI;
    currSlice->read_motion_info_from_NAL = NULL;
    currSlice->decode_one_component      = decode_one_component_i_slice;
    currSlice->update_direct_mv_info     = NULL;
#if (MVC_EXTENSION_ENABLE)
    currSlice->init_lists                = currSlice->view_id ? init_lists_i_slice_mvc : init_lists_i_slice;
#else
    currSlice->init_lists                = init_lists_i_slice;
#endif
    break;
  default:
    printf("Unsupported slice type\n");
    break;
  }

  set_intra_prediction_modes(currSlice);

  if ( currSlice->p_Vid->active_sps->chroma_format_idc==YUV444 && (currSlice->p_Vid->separate_colour_plane_flag == 0) )
    currSlice->read_coeff_4x4_CAVLC = read_coeff_4x4_CAVLC_444;
  else
    currSlice->read_coeff_4x4_CAVLC = read_coeff_4x4_CAVLC;

  switch(currSlice->p_Vid->active_pps->entropy_coding_mode_flag)
  {
  case CABAC:
    set_read_CBP_and_coeffs_cabac(currSlice);
    break;
  case CAVLC:
    set_read_CBP_and_coeffs_cavlc(currSlice);
    break;
  default:
    printf("Unsupported entropy coding mode\n");
    break;
  }
}
           
void set_intra_prediction_modes(Slice *currSlice)
{ 
  if (currSlice->mb_aff_frame_flag)
  {
    currSlice->intra_pred_4x4    = intra_pred_4x4_mbaff;
    currSlice->intra_pred_8x8    = intra_pred_8x8_mbaff;
    currSlice->intra_pred_16x16  = intra_pred_16x16_mbaff;    
    currSlice->intra_pred_chroma = intra_pred_chroma_mbaff;
  }
  else
  {
    currSlice->intra_pred_4x4    = intra_pred_4x4_normal;  
    currSlice->intra_pred_8x8    = intra_pred_8x8_normal;
    currSlice->intra_pred_16x16  = intra_pred_16x16_normal;
    currSlice->intra_pred_chroma = intra_pred_chroma;   
  }
}
           
void set_read_CBP_and_coeffs_cabac(Slice *currSlice)
{
  switch (currSlice->p_Vid->active_sps->chroma_format_idc)
  {
  case YUV444:
    if (currSlice->p_Vid->separate_colour_plane_flag == 0)
    {
      currSlice->read_CBP_and_coeffs_from_NAL = read_CBP_and_coeffs_from_NAL_CABAC_444;
    }
    else
    {
      currSlice->read_CBP_and_coeffs_from_NAL = read_CBP_and_coeffs_from_NAL_CABAC_400;
    }
    break;
  case YUV422:
    currSlice->read_CBP_and_coeffs_from_NAL = read_CBP_and_coeffs_from_NAL_CABAC_422;
    break;
  case YUV420:
    currSlice->read_CBP_and_coeffs_from_NAL = read_CBP_and_coeffs_from_NAL_CABAC_420;
    break;
  case YUV400:
    currSlice->read_CBP_and_coeffs_from_NAL = read_CBP_and_coeffs_from_NAL_CABAC_400;
    break;
  default:
    assert (1);
    currSlice->read_CBP_and_coeffs_from_NAL = NULL;
    break;
  }
}
           

SPS 处理

在解码slice之前,必须要先读到sps和pps,并且首先要读到sps。

case NALU_TYPE_SPS:
      //printf ("Found NALU_TYPE_SPS\n");
      ProcessSPS(p_Vid, nalu);
      break;
           
void ProcessSPS (VideoParameters *p_Vid, NALU_t *nalu)
{  
  DataPartition *dp = AllocPartition(1);
  seq_parameter_set_rbsp_t *sps = AllocSPS();

  memcpy (dp->bitstream->streamBuffer, &nalu->buf[1], nalu->len-1);
  dp->bitstream->code_len = dp->bitstream->bitstream_length = RBSPtoSODB (dp->bitstream->streamBuffer, nalu->len-1);
  dp->bitstream->ei_flag = 0;
  dp->bitstream->read_len = dp->bitstream->frame_bitoffset = 0;

  InterpretSPS (p_Vid, dp, sps);
#if (MVC_EXTENSION_ENABLE)
  get_max_dec_frame_buf_size(sps);
#endif

  if (sps->Valid)
  {
    if (p_Vid->active_sps)
    {
      if (sps->seq_parameter_set_id == p_Vid->active_sps->seq_parameter_set_id)
      {
        if (!sps_is_equal(sps, p_Vid->active_sps))
        {
          if (p_Vid->dec_picture) // && p_Vid->num_dec_mb == p_Vid->PicSizeInMbs) //?
          {
            // this may only happen on slice loss
            exit_picture(p_Vid, &p_Vid->dec_picture);
          }
          p_Vid->active_sps=NULL;
        }
      }
    }
    // SPSConsistencyCheck (pps);
    MakeSPSavailable (p_Vid, sps->seq_parameter_set_id, sps);
#if (MVC_EXTENSION_ENABLE)
    if (p_Vid->profile_idc < (int) sps->profile_idc)
    {
      p_Vid->profile_idc = sps->profile_idc;
    }
#else
    p_Vid->profile_idc = sps->profile_idc;
#endif
    p_Vid->separate_colour_plane_flag = sps->separate_colour_plane_flag;
    if( p_Vid->separate_colour_plane_flag )
    {
      p_Vid->ChromaArrayType = 0;
    }
    else
    {
      p_Vid->ChromaArrayType = sps->chroma_format_idc;
    }
  }

  FreePartition (dp, 1);
  FreeSPS (sps);
}
           
// fill sps with content of p

int InterpretSPS (VideoParameters *p_Vid, DataPartition *p, seq_parameter_set_rbsp_t *sps)
{
  unsigned i;
  unsigned n_ScalingList;
  int reserved_zero;
  Bitstream *s = p->bitstream;

  assert (p != NULL);
  assert (p->bitstream != NULL);
  assert (p->bitstream->streamBuffer != 0);
  assert (sps != NULL);

  p_Dec->UsedBits = 0;

  sps->profile_idc                            = read_u_v  (8, "SPS: profile_idc"                           , s, &p_Dec->UsedBits);

  if ((sps->profile_idc!=BASELINE       ) &&
      (sps->profile_idc!=MAIN           ) &&
      (sps->profile_idc!=EXTENDED       ) &&
      (sps->profile_idc!=FREXT_HP       ) &&
      (sps->profile_idc!=FREXT_Hi10P    ) &&
      (sps->profile_idc!=FREXT_Hi422    ) &&
      (sps->profile_idc!=FREXT_Hi444    ) &&
      (sps->profile_idc!=FREXT_CAVLC444 )
#if (MVC_EXTENSION_ENABLE)
      && (sps->profile_idc!=MVC_HIGH)
      && (sps->profile_idc!=STEREO_HIGH)
#endif
      )
  {
    printf("Invalid Profile IDC (%d) encountered. \n", sps->profile_idc);
    return p_Dec->UsedBits;
  }

  sps->constrained_set0_flag                  = read_u_1  (   "SPS: constrained_set0_flag"                 , s, &p_Dec->UsedBits);
  sps->constrained_set1_flag                  = read_u_1  (   "SPS: constrained_set1_flag"                 , s, &p_Dec->UsedBits);
  sps->constrained_set2_flag                  = read_u_1  (   "SPS: constrained_set2_flag"                 , s, &p_Dec->UsedBits);
  sps->constrained_set3_flag                  = read_u_1  (   "SPS: constrained_set3_flag"                 , s, &p_Dec->UsedBits);
#if (MVC_EXTENSION_ENABLE)
  sps->constrained_set4_flag                  = read_u_1  (   "SPS: constrained_set4_flag"                 , s, &p_Dec->UsedBits);
  sps->constrained_set5_flag                  = read_u_1  (   "SPS: constrained_set5_flag"                 , s, &p_Dec->UsedBits);
  reserved_zero                               = read_u_v  (2, "SPS: reserved_zero_2bits"                   , s, &p_Dec->UsedBits);
#else
  reserved_zero                               = read_u_v  (4, "SPS: reserved_zero_4bits"                   , s, &p_Dec->UsedBits);
#endif
  //assert (reserved_zero==0);
  if (reserved_zero != 0)
  {
    printf("Warning, reserved_zero flag not equal to 0. Possibly new constrained_setX flag introduced.\n");
  }

  sps->level_idc                              = read_u_v  (8, "SPS: level_idc"                             , s, &p_Dec->UsedBits);

  sps->seq_parameter_set_id                   = read_ue_v ("SPS: seq_parameter_set_id"                     , s, &p_Dec->UsedBits);

  // Fidelity Range Extensions stuff
  sps->chroma_format_idc = 1;
  sps->bit_depth_luma_minus8   = 0;
  sps->bit_depth_chroma_minus8 = 0;
  sps->lossless_qpprime_flag   = 0;
  sps->separate_colour_plane_flag = 0;

  if((sps->profile_idc==FREXT_HP   ) ||
     (sps->profile_idc==FREXT_Hi10P) ||
     (sps->profile_idc==FREXT_Hi422) ||
     (sps->profile_idc==FREXT_Hi444) ||
     (sps->profile_idc==FREXT_CAVLC444)
#if (MVC_EXTENSION_ENABLE)
     || (sps->profile_idc==MVC_HIGH)
     || (sps->profile_idc==STEREO_HIGH)
#endif
     )
  {
    sps->chroma_format_idc                      = read_ue_v ("SPS: chroma_format_idc"                       , s, &p_Dec->UsedBits);

    if(sps->chroma_format_idc == YUV444)
    {
      sps->separate_colour_plane_flag           = read_u_1  ("SPS: separate_colour_plane_flag"              , s, &p_Dec->UsedBits);
    }

    sps->bit_depth_luma_minus8                  = read_ue_v ("SPS: bit_depth_luma_minus8"                   , s, &p_Dec->UsedBits);
    sps->bit_depth_chroma_minus8                = read_ue_v ("SPS: bit_depth_chroma_minus8"                 , s, &p_Dec->UsedBits);
    //checking;
    if((sps->bit_depth_luma_minus8+8 > sizeof(imgpel)*8) || (sps->bit_depth_chroma_minus8+8> sizeof(imgpel)*8))
      error ("Source picture has higher bit depth than imgpel data type. \nPlease recompile with larger data type for imgpel.", 500);

    sps->lossless_qpprime_flag                  = read_u_1  ("SPS: lossless_qpprime_y_zero_flag"            , s, &p_Dec->UsedBits);

    sps->seq_scaling_matrix_present_flag        = read_u_1  (   "SPS: seq_scaling_matrix_present_flag"       , s, &p_Dec->UsedBits);
    
    if(sps->seq_scaling_matrix_present_flag)
    {
      n_ScalingList = (sps->chroma_format_idc != YUV444) ? 8 : 12;
      for(i=0; i<n_ScalingList; i++)
      {
        sps->seq_scaling_list_present_flag[i]   = read_u_1  (   "SPS: seq_scaling_list_present_flag"         , s, &p_Dec->UsedBits);
        if(sps->seq_scaling_list_present_flag[i])
        {
          if(i<6)
            Scaling_List(sps->ScalingList4x4[i], 16, &sps->UseDefaultScalingMatrix4x4Flag[i], s);
          else
            Scaling_List(sps->ScalingList8x8[i-6], 64, &sps->UseDefaultScalingMatrix8x8Flag[i-6], s);
        }
      }
    }
  }

  sps->log2_max_frame_num_minus4              = read_ue_v ("SPS: log2_max_frame_num_minus4"                , s, &p_Dec->UsedBits);
  sps->pic_order_cnt_type                     = read_ue_v ("SPS: pic_order_cnt_type"                       , s, &p_Dec->UsedBits);

  if (sps->pic_order_cnt_type == 0)
    sps->log2_max_pic_order_cnt_lsb_minus4 = read_ue_v ("SPS: log2_max_pic_order_cnt_lsb_minus4"           , s, &p_Dec->UsedBits);
  else if (sps->pic_order_cnt_type == 1)
  {
    sps->delta_pic_order_always_zero_flag      = read_u_1  ("SPS: delta_pic_order_always_zero_flag"       , s, &p_Dec->UsedBits);
    sps->offset_for_non_ref_pic                = read_se_v ("SPS: offset_for_non_ref_pic"                 , s, &p_Dec->UsedBits);
    sps->offset_for_top_to_bottom_field        = read_se_v ("SPS: offset_for_top_to_bottom_field"         , s, &p_Dec->UsedBits);
    sps->num_ref_frames_in_pic_order_cnt_cycle = read_ue_v ("SPS: num_ref_frames_in_pic_order_cnt_cycle"  , s, &p_Dec->UsedBits);
    for(i=0; i<sps->num_ref_frames_in_pic_order_cnt_cycle; i++)
      sps->offset_for_ref_frame[i]               = read_se_v ("SPS: offset_for_ref_frame[i]"              , s, &p_Dec->UsedBits);
  }
  sps->num_ref_frames                        = read_ue_v ("SPS: num_ref_frames"                         , s, &p_Dec->UsedBits);
  sps->gaps_in_frame_num_value_allowed_flag  = read_u_1  ("SPS: gaps_in_frame_num_value_allowed_flag"   , s, &p_Dec->UsedBits);
  sps->pic_width_in_mbs_minus1               = read_ue_v ("SPS: pic_width_in_mbs_minus1"                , s, &p_Dec->UsedBits);
  sps->pic_height_in_map_units_minus1        = read_ue_v ("SPS: pic_height_in_map_units_minus1"         , s, &p_Dec->UsedBits);
  sps->frame_mbs_only_flag                   = read_u_1  ("SPS: frame_mbs_only_flag"                    , s, &p_Dec->UsedBits);
  if (!sps->frame_mbs_only_flag)
  {
    sps->mb_adaptive_frame_field_flag        = read_u_1  ("SPS: mb_adaptive_frame_field_flag"           , s, &p_Dec->UsedBits);
  }
  //printf("interlace flags %d %d\n", sps->frame_mbs_only_flag, sps->mb_adaptive_frame_field_flag);
  sps->direct_8x8_inference_flag             = read_u_1  ("SPS: direct_8x8_inference_flag"              , s, &p_Dec->UsedBits);
  sps->frame_cropping_flag                   = read_u_1  ("SPS: frame_cropping_flag"                    , s, &p_Dec->UsedBits);

  if (sps->frame_cropping_flag)
  {
    sps->frame_crop_left_offset      = read_ue_v ("SPS: frame_crop_left_offset"           , s, &p_Dec->UsedBits);
    sps->frame_crop_right_offset     = read_ue_v ("SPS: frame_crop_right_offset"          , s, &p_Dec->UsedBits);
    sps->frame_crop_top_offset       = read_ue_v ("SPS: frame_crop_top_offset"            , s, &p_Dec->UsedBits);
    sps->frame_crop_bottom_offset    = read_ue_v ("SPS: frame_crop_bottom_offset"         , s, &p_Dec->UsedBits);
  }
  sps->vui_parameters_present_flag           = (Boolean) read_u_1  ("SPS: vui_parameters_present_flag"      , s, &p_Dec->UsedBits);

  InitVUI(sps);
  ReadVUI(p, sps);

  sps->Valid = TRUE;
  return p_Dec->UsedBits;
}
           

当seq_scaling_list_present_flag 为1时,放大系数以下面的函数的过程来从sps中获取。一般情况下seq_scaling_list_present_flag都为0,即使用默认的放大系数。

// syntax for scaling list matrix values
void Scaling_List(int *scalingList, int sizeOfScalingList, Boolean *UseDefaultScalingMatrix, Bitstream *s)
{
  int j, scanj;
  int delta_scale, lastScale, nextScale;

  lastScale      = 8;
  nextScale      = 8;

  for(j=0; j<sizeOfScalingList; j++)
  {
    scanj = (sizeOfScalingList==16) ? ZZ_SCAN[j]:ZZ_SCAN8[j];

    if(nextScale!=0)
    {
      delta_scale = read_se_v (   "   : delta_sl   "                           , s, &p_Dec->UsedBits);
      nextScale = (lastScale + delta_scale + 256) % 256;
      *UseDefaultScalingMatrix = (Boolean) (scanj==0 && nextScale==0);
    }

    scalingList[scanj] = (nextScale==0) ? lastScale:nextScale;
    lastScale = scalingList[scanj];
  }
}