Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nd conv pool #2824

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
Open
Prev Previous commit
Next Next commit
Instantiate virtual forward and backward cpu (Not Implemented)
  • Loading branch information
Guillaume Bono committed Jul 7, 2015
commit 491bf8c4b0414e25d03b5e80a64ff36303a35b43
8 changes: 8 additions & 0 deletions include/caffe/vision_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,10 @@ class CudnnNdConvolutionLayer : public Layer<Dtype> {
virtual inline const char* type() const { return "NdConvolution"; }

protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
Expand Down Expand Up @@ -519,6 +523,10 @@ class CudnnNdPoolingLayer : public Layer<Dtype> {
virtual inline int ExactNumTopBlobs() const { return 1; }

protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
Expand Down
10 changes: 10 additions & 0 deletions src/caffe/layers/cudnn_ndconv_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,16 @@ void CudnnNdConvolutionLayer<Dtype>::compute_output_shape() {
}
}

template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>* >& bottom, const vector<Blob<Dtype>* >& top) {
NOT_IMPLEMENTED;
}

template <typename Dtype>
void CudnnNdConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>* >& bottom, const vector<bool>& propagate_down, const vector<Blob<Dtype>* >& top) {
NOT_IMPLEMENTED;
}

template <typename Dtype>
CudnnNdConvolutionLayer<Dtype>::~CudnnNdConvolutionLayer() {
// Check that handles have been setup before destroying.
Expand Down
10 changes: 10 additions & 0 deletions src/caffe/layers/cudnn_ndpooling_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,16 @@ void CudnnNdPoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
cudnn::setTensorNdDesc<Dtype>(&top_desc_, pooled_shape_);
}

template <typename Dtype>
void CudnnNdPoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>* >& bottom, const vector<Blob<Dtype>* >& top) {
NOT_IMPLEMENTED;
}

template <typename Dtype>
void CudnnNdPoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>* >& bottom, const vector<bool>& propagate_down, const vector<Blob<Dtype>* >& top) {
NOT_IMPLEMENTED;
}

template <typename Dtype>
CudnnNdPoolingLayer<Dtype>::~CudnnNdPoolingLayer() {
// Check that handles have been setup before destroying.
Expand Down