天天看點

【caffe】caffe之卷積層BaseConvolutionLayerConvolutionLayer

BaseConvolutionLayer

LayerSetUp

在調用layer->LayerSetUp時,其實是調用的BaseConvolutionLayer::LayerSetUp,雖然BaseConvolutionLayer::LayerSetUp是虛函數,但是在其子類ConvolutionLayer裡并沒有重寫。卷積層的LayerSetUp主要是初始化卷積核的size、pad、stride等等以及輸入輸出blob的一些參數。

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Configure the kernel size, padding, stride, and inputs.
  ConvolutionParameter conv_param = this->layer_param_.convolution_param();
  force_nd_im2col_ = conv_param.force_nd_im2col();//預設為false
  channel_axis_ = bottom[]->CanonicalAxisIndex(conv_param.axis());//預設為1
  const int first_spatial_axis = channel_axis_ + ;//2
  const int num_axes = bottom[]->num_axes();//4
  num_spatial_axes_ = num_axes - first_spatial_axis;//2
  CHECK_GE(num_spatial_axes_, );
  vector<int> bottom_dim_blob_shape(, num_spatial_axes_ + );//3
  vector<int> spatial_dim_blob_shape(, std::max(num_spatial_axes_, ));//2
  // Setup filter kernel dimensions (kernel_shape_).
  kernel_shape_.Reshape(spatial_dim_blob_shape);//2
  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();
  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {
    CHECK_EQ(num_spatial_axes_, )
        << "kernel_h & kernel_w can only be used for 2D convolution.";
    CHECK_EQ(, conv_param.kernel_size_size())
        << "Either kernel_size or kernel_h/w should be specified; not both.";
    kernel_shape_data[] = conv_param.kernel_h();//從輸入擷取kernel
    kernel_shape_data[] = conv_param.kernel_w();
  } else {
    const int num_kernel_dims = conv_param.kernel_size_size();
    CHECK(num_kernel_dims ==  || num_kernel_dims == num_spatial_axes_)
        << "kernel_size must be specified once, or once per spatial dimension "
        << "(kernel_size specified " << num_kernel_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
      for (int i = ; i < num_spatial_axes_; ++i) {
        kernel_shape_data[i] =
            conv_param.kernel_size((num_kernel_dims == ) ?  : i);
      }
  }
  for (int i = ; i < num_spatial_axes_; ++i) {
    CHECK_GT(kernel_shape_data[i], ) << "Filter dimensions must be nonzero.";
  }
  // Setup stride dimensions (stride_).
  stride_.Reshape(spatial_dim_blob_shape);//2
  int* stride_data = stride_.mutable_cpu_data();
  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {
    CHECK_EQ(num_spatial_axes_, )
        << "stride_h & stride_w can only be used for 2D convolution.";
    CHECK_EQ(, conv_param.stride_size())
        << "Either stride or stride_h/w should be specified; not both.";
    stride_data[] = conv_param.stride_h();//從輸入擷取stride
    stride_data[] = conv_param.stride_w();
  } else {
    const int num_stride_dims = conv_param.stride_size();
    CHECK(num_stride_dims ==  || num_stride_dims ==  ||
          num_stride_dims == num_spatial_axes_)
        << "stride must be specified once, or once per spatial dimension "
        << "(stride specified " << num_stride_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultStride = ;
    for (int i = ; i < num_spatial_axes_; ++i) {
      stride_data[i] = (num_stride_dims == ) ? kDefaultStride :
          conv_param.stride((num_stride_dims == ) ?  : i);
      CHECK_GT(stride_data[i], ) << "Stride dimensions must be nonzero.";
    }
  }
  // Setup pad dimensions (pad_).
  pad_.Reshape(spatial_dim_blob_shape);//2
  int* pad_data = pad_.mutable_cpu_data();
  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {
    CHECK_EQ(num_spatial_axes_, )
        << "pad_h & pad_w can only be used for 2D convolution.";
    CHECK_EQ(, conv_param.pad_size())
        << "Either pad or pad_h/w should be specified; not both.";
    pad_data[] = conv_param.pad_h();//從輸入擷取pad
    pad_data[] = conv_param.pad_w();
  } else {
    const int num_pad_dims = conv_param.pad_size();
    CHECK(num_pad_dims ==  || num_pad_dims ==  ||
          num_pad_dims == num_spatial_axes_)
        << "pad must be specified once, or once per spatial dimension "
        << "(pad specified " << num_pad_dims << " times; "
        << num_spatial_axes_ << " spatial dims).";
    const int kDefaultPad = ;
    for (int i = ; i < num_spatial_axes_; ++i) {
      pad_data[i] = (num_pad_dims == ) ? kDefaultPad :
          conv_param.pad((num_pad_dims == ) ?  : i);
    }
  }
  // Setup dilation dimensions (dilation_).
  dilation_.Reshape(spatial_dim_blob_shape);//2
  int* dilation_data = dilation_.mutable_cpu_data();
  const int num_dilation_dims = conv_param.dilation_size();
  CHECK(num_dilation_dims ==  || num_dilation_dims ==  ||
        num_dilation_dims == num_spatial_axes_)
      << "dilation must be specified once, or once per spatial dimension "
      << "(dilation specified " << num_dilation_dims << " times; "
      << num_spatial_axes_ << " spatial dims).";
  const int kDefaultDilation = ;
  for (int i = ; i < num_spatial_axes_; ++i) {
    dilation_data[i] = (num_dilation_dims == ) ? kDefaultDilation :
                       conv_param.dilation((num_dilation_dims == ) ?  : i);
  }
  // Special case: im2col is the identity for 1x1 convolution with stride 1
  // and no padding, so flag for skipping the buffer and transformation.
  is_1x1_ = true;
  for (int i = ; i < num_spatial_axes_; ++i) {
    is_1x1_ &=
        kernel_shape_data[i] ==  && stride_data[i] ==  && pad_data[i] == ;
    if (!is_1x1_) { break; }
  }
  // Configure output channels and groups.
  channels_ = bottom[]->shape(channel_axis_);//擷取輸入通道數
  num_output_ = this->layer_param_.convolution_param().num_output();//輸出數
  CHECK_GT(num_output_, );
  group_ = this->layer_param_.convolution_param().group();
  CHECK_EQ(channels_ % group_, );
  CHECK_EQ(num_output_ % group_, )
      << "Number of output should be multiples of group.";
  if (reverse_dimensions()) {
    conv_out_channels_ = channels_;
    conv_in_channels_ = num_output_;
  } else {
    conv_out_channels_ = num_output_;//輸出為m*n*h*w,m為輸入個數,n為輸出個數
    conv_in_channels_ = channels_;
  }
  // Handle the parameters: weights and biases.
  // - blobs_[0] holds the filter weights
  // - blobs_[1] holds the biases (optional)
  vector<int> weight_shape();
  weight_shape[] = conv_out_channels_;//卷積核第一維
  weight_shape[] = conv_in_channels_ / group_;//第二維
  for (int i = ; i < num_spatial_axes_; ++i) {
    weight_shape.push_back(kernel_shape_data[i]);//第三維,第四維
  }
  bias_term_ = this->layer_param_.convolution_param().bias_term();
  vector<int> bias_shape(bias_term_, num_output_);
  if (this->blobs_.size() > ) {
    CHECK_EQ( + bias_term_, this->blobs_.size())
        << "Incorrect number of weight blobs.";
    if (weight_shape != this->blobs_[]->shape()) {
      Blob<Dtype> weight_shaped_blob(weight_shape);
      LOG(FATAL) << "Incorrect weight shape: expected shape "
          << weight_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[]->shape_string();
    }
    if (bias_term_ && bias_shape != this->blobs_[]->shape()) {
      Blob<Dtype> bias_shaped_blob(bias_shape);
      LOG(FATAL) << "Incorrect bias shape: expected shape "
          << bias_shaped_blob.shape_string() << "; instead, shape was "
          << this->blobs_[]->shape_string();
    }
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (bias_term_) {//如果存在bias,則建立兩個blobs,否則建立一個blobs
      this->blobs_.resize();
    } else {
      this->blobs_.resize();
    }
    // Initialize and fill the weights:
    // output channels x input channels per-group x kernel height x kernel width
    this->blobs_[].reset(new Blob<Dtype>(weight_shape));//将weight_shape賦給blob[0],用以存放weight
    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(
        this->layer_param_.convolution_param().weight_filler()));
    weight_filler->Fill(this->blobs_[].get());
    // If necessary, initialize and fill the biases.
    if (bias_term_) {
      this->blobs_[].reset(new Blob<Dtype>(bias_shape));
      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
          this->layer_param_.convolution_param().bias_filler()));
      bias_filler->Fill(this->blobs_[].get());
    }
  }
  kernel_dim_ = this->blobs_[]->count();//每個kernel的dim
  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;//output個kernel
  // Propagate gradients to the parameters (as directed by backward pass).
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
           

Reshape

同理,reshape函數也是虛函數,但沒有被重寫

template <typename Dtype>
void BaseConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int first_spatial_axis = channel_axis_ + ;//2
  CHECK_EQ(bottom[]->num_axes(), first_spatial_axis + num_spatial_axes_)
      << "bottom num_axes may not change.";
  num_ = bottom[]->count(, channel_axis_);//多少個輸入
  CHECK_EQ(bottom[]->shape(channel_axis_), channels_)
      << "Input size incompatible with convolution kernel.";
  // TODO: generalize to handle inputs of different shapes.
  for (int bottom_id = ; bottom_id < bottom.size(); ++bottom_id) {
    CHECK(bottom[]->shape() == bottom[bottom_id]->shape())
        << "All inputs must have the same shape.";
  }
  // Shape the tops.
  bottom_shape_ = &bottom[]->shape();//擷取bottom的shape
  compute_output_shape();//計算output_shape_,兩維
  vector<int> top_shape(bottom[]->shape().begin(),//擷取輸入個數
      bottom[]->shape().begin() + channel_axis_);
  top_shape.push_back(num_output_);//輸出個數
  for (int i = ; i < num_spatial_axes_; ++i) {
    top_shape.push_back(output_shape_[i]);//輸出shape
  }
  for (int top_id = ; top_id < top.size(); ++top_id) {
    top[top_id]->Reshape(top_shape);
  }
  if (reverse_dimensions()) {
    conv_out_spatial_dim_ = bottom[]->count(first_spatial_axis);
  } else {
    conv_out_spatial_dim_ = top[]->count(first_spatial_axis);//top[0]每個通道的維數
  }
  col_offset_ = kernel_dim_ * conv_out_spatial_dim_;//資料矩陣offset為輸出矩陣的後兩維*kernel的維數
  output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_;
  // Setup input dimensions (conv_input_shape_).
  vector<int> bottom_dim_blob_shape(, num_spatial_axes_ + );
  conv_input_shape_.Reshape(bottom_dim_blob_shape);
  int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data();
  for (int i = ; i < num_spatial_axes_ + ; ++i) {
    if (reverse_dimensions()) {
      conv_input_shape_data[i] = top[]->shape(channel_axis_ + i);
    } else {
      conv_input_shape_data[i] = bottom[]->shape(channel_axis_ + i);//輸入shape為channel*h*w
    }
  }
  // The im2col result buffer will only hold one image at a time to avoid
  // overly large memory usage. In the special case of 1x1 convolution
  // it goes lazily unused to save memory.
  col_buffer_shape_.clear();
  col_buffer_shape_.push_back(kernel_dim_ * group_);
  for (int i = ; i < num_spatial_axes_; ++i) {
    if (reverse_dimensions()) {
      col_buffer_shape_.push_back(input_shape(i + ));
    } else {
      col_buffer_shape_.push_back(output_shape_[i]);
    }
  }
  col_buffer_.Reshape(col_buffer_shape_);
  bottom_dim_ = bottom[]->count(channel_axis_);
  top_dim_ = top[]->count(channel_axis_);
  num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_;
  num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_;
  // Set up the all ones "bias multiplier" for adding biases by BLAS
  out_spatial_dim_ = top[]->count(first_spatial_axis);
  if (bias_term_) {
    vector<int> bias_multiplier_shape(, out_spatial_dim_);
    bias_multiplier_.Reshape(bias_multiplier_shape);
    caffe_set(bias_multiplier_.count(), Dtype(),
        bias_multiplier_.mutable_cpu_data());
  }
}
           

ConvolutionLayer

接下來是ConvolutionLayer類

compute_output_shape

template <typename Dtype>
void ConvolutionLayer<Dtype>::compute_output_shape() {
  const int* kernel_shape_data = this->kernel_shape_.cpu_data();
  const int* stride_data = this->stride_.cpu_data();
  const int* pad_data = this->pad_.cpu_data();
  const int* dilation_data = this->dilation_.cpu_data();
  this->output_shape_.clear();
  for (int i = ; i < this->num_spatial_axes_; ++i) {//num_spatial_axes_=2
    // i + 1 to skip channel axis
    const int input_dim = this->input_shape(i + );//h
    const int kernel_extent = dilation_data[i] * (kernel_shape_data[i] - ) + ;
    const int output_dim = (input_dim +  * pad_data[i] - kernel_extent)
        / stride_data[i] + ;
    this->output_shape_.push_back(output_dim);//output_shape_為輸出矩陣的後兩維
  }
}
           

Forward_cpu

template <typename Dtype>
void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const Dtype* weight = this->blobs_[]->cpu_data();//kernel
  for (int i = ; i < bottom.size(); ++i) {
    const Dtype* bottom_data = bottom[i]->cpu_data();//輸入
    Dtype* top_data = top[i]->mutable_cpu_data();//輸出
    for (int n = ; n < this->num_; ++n) {
      this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight,//卷積-用im2col轉化為矩陣乘法
          top_data + n * this->top_dim_);
      if (this->bias_term_) {
        const Dtype* bias = this->blobs_[]->cpu_data();
        this->forward_cpu_bias(top_data + n * this->top_dim_, bias);//加上偏置
      }
    }
  }
}
           

Backward_cpu

template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  const Dtype* weight = this->blobs_[]->cpu_data();
  Dtype* weight_diff = this->blobs_[]->mutable_cpu_diff();
  for (int i = ; i < top.size(); ++i) {
    const Dtype* top_diff = top[i]->cpu_diff();
    const Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
    // Bias gradient, if necessary.
    if (this->bias_term_ && this->param_propagate_down_[]) {
      Dtype* bias_diff = this->blobs_[]->mutable_cpu_diff();
      for (int n = ; n < this->num_; ++n) {
        this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_);//第一步計算偏置
      }
    }
    if (this->param_propagate_down_[] || propagate_down[i]) {
      for (int n = ; n < this->num_; ++n) {
        // gradient w.r.t. weight. Note that we will accumulate diffs.
        if (this->param_propagate_down_[]) {
          this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_,//第二步計算權重偏差
              top_diff + n * this->top_dim_, weight_diff);
        }
        // gradient w.r.t. bottom data, if necessary.
        if (propagate_down[i]) {
          this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight,//第三步計算輸入偏差
              bottom_diff + n * this->bottom_dim_);
        }
      }
    }
  }
}
           

卷積層裡最隻要的就是Forward_cpu和Backward_cpu這兩個函數,在其他層裡也是一樣,隻有對不同層的前傳與後傳弄清楚了,才能很好的了解其工作原理。

繼續閱讀