码迷,mamicode.com
首页 > 其他好文 > 详细

caffe源码分析--poolinger_layer.cpp

时间:2014-07-30 20:48:44      阅读:348      评论:0      收藏:0      [点我收藏+]

标签:caffe   deep learning   机器学习   源码分析   神经网络   


caffe源码分析--poolinger_layer.cpp


对于采样层,cafffe里实现了最大采样和平均采样的算法。

最大采样,给定一个扫描窗口,找最大值,

平均采样,扫描窗口内所有值的平均值。


其实对于caffe的实现一直有个疑问,

就是每一层貌似没有绑定一个激活函数?

看ufldl教程,感觉激活函数是必要存在的。

这怎么解释呢?


看到源码中,看到一些激活函数,比如sigmoid_layer.cpp和sigmoid_layer.cu。

也就是说,激活函数作为layer层面来实现了。当然,还有tanh_layer和relu_layer。


那,这个意思是说,让我们建立网络的时候更加随意,可自由搭配激活函数吗?

但是,我看了caffe自带的那些例子,貌似很少见到用了激活函数layer的,顶多看到用了relu_layer,其他的没见过。

这意思是说,激活函数不重要吗?真是费解啊。


// Copyright 2013 Yangqing Jia

#include <algorithm>
#include <cfloat>
#include <vector>

#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"

using std::max;
using std::min;

namespace caffe {

template <typename Dtype>
void PoolingLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  CHECK_EQ(bottom.size(), 1) << "PoolingLayer takes a single blob as input.";
  CHECK_EQ(top->size(), 1) << "PoolingLayer takes a single blob as output.";
  KSIZE_ = this->layer_param_.kernelsize();//核大小
  STRIDE_ = this->layer_param_.stride();//步长
  CHANNELS_ = bottom[0]->channels();//通道
  HEIGHT_ = bottom[0]->height();//高
  WIDTH_ = bottom[0]->width();//宽
  POOLED_HEIGHT_ = static_cast<int>(
      ceil(static_cast<float>(HEIGHT_ - KSIZE_) / STRIDE_)) + 1;//计算采样之后的高
  POOLED_WIDTH_ = static_cast<int>(
      ceil(static_cast<float>(WIDTH_ - KSIZE_) / STRIDE_)) + 1;//计算采样之后的宽
  (*top)[0]->Reshape(bottom[0]->num(), CHANNELS_, POOLED_HEIGHT_,//采样之后大小
      POOLED_WIDTH_);
  // If stochastic pooling, we will initialize the random index part.
  if (this->layer_param_.pool() == LayerParameter_PoolMethod_STOCHASTIC) {
    rand_idx_.Reshape(bottom[0]->num(), CHANNELS_, POOLED_HEIGHT_,
      POOLED_WIDTH_);
  }
}

// TODO(Yangqing): Is there a faster way to do pooling in the channel-first
// case?
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();//采样层输入
  Dtype* top_data = (*top)[0]->mutable_cpu_data();//采样层输出
  // Different pooling methods. We explicitly do the switch outside the for
  // loop to save time, although this results in more codes.
  int top_count = (*top)[0]->count();
  switch (this->layer_param_.pool()) {
  case LayerParameter_PoolMethod_MAX://最大采样方法
    // Initialize
    for (int i = 0; i < top_count; ++i) {
      top_data[i] = -FLT_MAX;
    }
    // The main loop
    for (int n = 0; n < bottom[0]->num(); ++n) {
      for (int c = 0; c < CHANNELS_; ++c) {
        for (int ph = 0; ph < POOLED_HEIGHT_; ++ph) {
          for (int pw = 0; pw < POOLED_WIDTH_; ++pw) {
            int hstart = ph * STRIDE_;
            int wstart = pw * STRIDE_;
            int hend = min(hstart + KSIZE_, HEIGHT_);
            int wend = min(wstart + KSIZE_, WIDTH_);
            for (int h = hstart; h < hend; ++h) {//找出核范围内最大
              for (int w = wstart; w < wend; ++w) {
                top_data[ph * POOLED_WIDTH_ + pw] =
                  max(top_data[ph * POOLED_WIDTH_ + pw],
                      bottom_data[h * WIDTH_ + w]);
              }
            }
          }
        }
        // compute offset 指针移动到下一个channel。注意代码这里的位置。采样是针对每个channel的。
        bottom_data += bottom[0]->offset(0, 1);
        top_data += (*top)[0]->offset(0, 1);
      }
    }
    break;
  case LayerParameter_PoolMethod_AVE:
    for (int i = 0; i < top_count; ++i) {
      top_data[i] = 0;
    }
    // The main loop
    for (int n = 0; n < bottom[0]->num(); ++n) {
      for (int c = 0; c < CHANNELS_; ++c) {
        for (int ph = 0; ph < POOLED_HEIGHT_; ++ph) {
          for (int pw = 0; pw < POOLED_WIDTH_; ++pw) {
            int hstart = ph * STRIDE_;
            int wstart = pw * STRIDE_;
            int hend = min(hstart + KSIZE_, HEIGHT_);
            int wend = min(wstart + KSIZE_, WIDTH_);
            for (int h = hstart; h < hend; ++h) {//核范围内算平均
              for (int w = wstart; w < wend; ++w) {
                top_data[ph * POOLED_WIDTH_ + pw] +=
                    bottom_data[h * WIDTH_ + w];
              }
            }
            top_data[ph * POOLED_WIDTH_ + pw] /=
                (hend - hstart) * (wend - wstart);
          }
        }
        // compute offset
        bottom_data += bottom[0]->offset(0, 1);
        top_data += (*top)[0]->offset(0, 1);
      }
    }
    break;
  case LayerParameter_PoolMethod_STOCHASTIC://这种算法这里未实现
    NOT_IMPLEMENTED;
    break;
  default:
    LOG(FATAL) << "Unknown pooling method.";
  }
}

template <typename Dtype>
Dtype PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
  if (!propagate_down) {
    return Dtype(0.);
  }
  const Dtype* top_diff = top[0]->cpu_diff();
  const Dtype* top_data = top[0]->cpu_data();
  const Dtype* bottom_data = (*bottom)[0]->cpu_data();
  Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
  // Different pooling methods. We explicitly do the switch outside the for
  // loop to save time, although this results in more codes.
  memset(bottom_diff, 0, (*bottom)[0]->count() * sizeof(Dtype));
  switch (this->layer_param_.pool()) {
  case LayerParameter_PoolMethod_MAX:
    // The main loop
    for (int n = 0; n < top[0]->num(); ++n) {
      for (int c = 0; c < CHANNELS_; ++c) {
        for (int ph = 0; ph < POOLED_HEIGHT_; ++ph) {
          for (int pw = 0; pw < POOLED_WIDTH_; ++pw) {
            int hstart = ph * STRIDE_;
            int wstart = pw * STRIDE_;
            int hend = min(hstart + KSIZE_, HEIGHT_);
            int wend = min(wstart + KSIZE_, WIDTH_);
            for (int h = hstart; h < hend; ++h) {
              for (int w = wstart; w < wend; ++w) {
                bottom_diff[h * WIDTH_ + w] +=//采样层输出的残传播给输入。由于是最大采样方法,输出存的都是输入范围内最大的值,所以残差传播的时候也只有范围内最大的值受影响
                    top_diff[ph * POOLED_WIDTH_ + pw] *
                    (bottom_data[h * WIDTH_ + w] ==
                        top_data[ph * POOLED_WIDTH_ + pw]);
              }
            }
          }
        }
        // offset  移动到下一个channel
        bottom_data += (*bottom)[0]->offset(0, 1);
        top_data += top[0]->offset(0, 1);
        bottom_diff += (*bottom)[0]->offset(0, 1);
        top_diff += top[0]->offset(0, 1);
      }
    }
    break;
  case LayerParameter_PoolMethod_AVE:
    // The main loop
    for (int n = 0; n < top[0]->num(); ++n) {
      for (int c = 0; c < CHANNELS_; ++c) {
        for (int ph = 0; ph < POOLED_HEIGHT_; ++ph) {
          for (int pw = 0; pw < POOLED_WIDTH_; ++pw) {
            int hstart = ph * STRIDE_;
            int wstart = pw * STRIDE_;
            int hend = min(hstart + KSIZE_, HEIGHT_);
            int wend = min(wstart + KSIZE_, WIDTH_);
            int poolsize = (hend - hstart) * (wend - wstart);
            for (int h = hstart; h < hend; ++h) {
              for (int w = wstart; w < wend; ++w) {
                bottom_diff[h * WIDTH_ + w] +=//采样层输出的残差传播给输入,由于是平均采样,所以权重都是1 / poolsize。
                  top_diff[ph * POOLED_WIDTH_ + pw] / poolsize;
              }
            }
          }
        }
        // offset
        bottom_data += (*bottom)[0]->offset(0, 1);
        top_data += top[0]->offset(0, 1);
        bottom_diff += (*bottom)[0]->offset(0, 1);
        top_diff += top[0]->offset(0, 1);
      }
    }
    break;
  case LayerParameter_PoolMethod_STOCHASTIC:
    NOT_IMPLEMENTED;
    break;
  default:
    LOG(FATAL) << "Unknown pooling method.";
  }
  return Dtype(0.);
}


INSTANTIATE_CLASS(PoolingLayer);


}  // namespace caffe

本文作者:linger
本文链接:http://blog.csdn.net/lingerlanlan/article/details/38294169

caffe源码分析--poolinger_layer.cpp,布布扣,bubuko.com

caffe源码分析--poolinger_layer.cpp

标签:caffe   deep learning   机器学习   源码分析   神经网络   

原文地址:http://blog.csdn.net/lingerlanlan/article/details/38294169

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!