diff --git a/compile_caution.md b/compile_caution.md new file mode 100644 index 000000000..a68449588 --- /dev/null +++ b/compile_caution.md @@ -0,0 +1 @@ +remember to execute source ./external/mlsl/l_mlsl_2018.0.003//intel64/bin/mlslvars.sh when compiling pycaffe diff --git a/include/caffe/layers/continuation_indicator_layer.hpp b/include/caffe/layers/continuation_indicator_layer.hpp new file mode 100644 index 000000000..7c901eb70 --- /dev/null +++ b/include/caffe/layers/continuation_indicator_layer.hpp @@ -0,0 +1,38 @@ +#ifndef CAFFE_CONTINUATION_INDICATOR_LAYER_HPP_ +#define CAFFE_CONTINUATION_INDICATOR_LAYER_HPP_ +#include +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + template + class ContinuationIndicatorLayer: public Layer { + public: + explicit ContinuationIndicatorLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "ContinuationIndicator";} + virtual inline int ExactNumBottomBlobs() const { return 0;} + virtual inline int ExactNumTopBlobs() const { return 1;} + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + int mini_batch_; + int time_step_; + }; +} + +#endif // end file + diff --git a/src/caffe/layers/continuation_indicator_layer.cpp b/src/caffe/layers/continuation_indicator_layer.cpp new file mode 100644 index 000000000..2c0c11edc --- /dev/null +++ b/src/caffe/layers/continuation_indicator_layer.cpp @@ -0,0 +1,43 @@ +#include "caffe/layers/continuation_indicator_layer.hpp" + +namespace caffe { + template + void ContinuationIndicatorLayer::LayerSetUp( + const vector*>& bottom, + const vector*>& top) { + ContinuationIndicatorParameter param = this->layer_param_.continuation_indicator_param(); + mini_batch_ = param.batch_size(); + time_step_ = param.time_step(); + CHECK_GT(mini_batch_, 0) << "The batch size should be greater than 0."; + CHECK_GT(time_step_, 0) << "The time step should be greater than 0."; + } + template + void ContinuationIndicatorLayer::Reshape( + const vector*>& bottom, + const vector*>& top) { + //vector top_shape{time_step_, mini_batch_}; + vector top_shape; + top_shape.push_back(time_step_); + top_shape.push_back(mini_batch_); + top[0]->Reshape(top_shape); + } + template + void ContinuationIndicatorLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(top[0]->shape()[0], time_step_) << "1st dimension of top blob should be same with time step."; + CHECK_EQ(top[0]->shape()[1], mini_batch_) << "2nd dimension of top blob should be same with batch size."; + Dtype* top_data = top[0]->mutable_cpu_data(); + for(int t = 0; t < time_step_; ++t) { + for(int b = 0; b < mini_batch_; ++b) { + // time step index: t, batch index: b + *top_data++ = t == 0? Dtype(0): Dtype(1); + } + } + } +#ifdef CPU_ONLY +STUB_GPU(ContinuationIndicatorLayer); +#endif +INSTANTIATE_CLASS(ContinuationIndicatorLayer); +REGISTER_LAYER_CLASS(ContinuationIndicator); +} diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index f850864ce..771b6c38f 100755 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -610,6 +610,14 @@ message LayerParameter { optional MnActivationParameter mn_activation_param = 151; optional MnParamGradCompressParameter mn_grad_compress_param = 156; optional QuantizationParameter quantization_param = 158; + + optional ContinuationIndicatorParameter continuation_indicator_param = 200; +} + + +message ContinuationIndicatorParameter { + optional uint32 time_step = 1 [default = 0]; + optional uint32 batch_size = 2 [default = 0]; } message MultinodeLayerParameter {