Skip to content

Commit

Permalink
Merge pull request #9 from jeffdonahue/fixflattenbug
Browse files Browse the repository at this point in the history
fix really stupid bug in flatten layer (and add test that shows the
  • Loading branch information
Yangqing committed Dec 5, 2013
2 parents 8c96ac2 + b906327 commit a5658cd
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 8 deletions.
2 changes: 1 addition & 1 deletion include/caffe/vision_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class FlattenLayer : public Layer<Dtype> {
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
int channels_out_;
int count_;
};


Expand Down
16 changes: 9 additions & 7 deletions src/caffe/layers/flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,34 +13,36 @@ void FlattenLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 1) << "Flatten Layer takes a single blob as input.";
CHECK_EQ(top->size(), 1) << "Flatten Layer takes a single blob as output.";
channels_out_ = bottom[0]->channels() * bottom[0]->height()
int channels_out = bottom[0]->channels() * bottom[0]->height()
* bottom[0]->width();
(*top)[0]->Reshape(bottom[0]->num(), channels_out_, 1, 1);
(*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1);
count_ = bottom[0]->num() * channels_out;
CHECK_EQ(count_, bottom[0]->count());
CHECK_EQ(count_, (*top)[0]->count());
};

template <typename Dtype>
void FlattenLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {

const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
caffe_copy(channels_out_, bottom_data, top_data);
caffe_copy(count_, bottom_data, top_data);
}

template <typename Dtype>
void FlattenLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
caffe_gpu_copy(channels_out_, bottom_data, top_data);
caffe_gpu_copy(count_, bottom_data, top_data);
}

template <typename Dtype>
Dtype FlattenLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
caffe_copy(channels_out_, top_diff, bottom_diff);
caffe_copy(count_, top_diff, bottom_diff);
}


Expand All @@ -49,7 +51,7 @@ Dtype FlattenLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
caffe_gpu_copy(channels_out_, top_diff, bottom_diff);
caffe_gpu_copy(count_, top_diff, bottom_diff);
}

INSTANTIATE_CLASS(FlattenLayer);
Expand Down
2 changes: 2 additions & 0 deletions src/caffe/test/test_flatten_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ TYPED_TEST(FlattenLayerTest, TestCPU) {
for (int c = 0; c < 3 * 6 * 5; ++c) {
EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0),
this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5));
EXPECT_EQ(this->blob_top_->data_at(1, c, 0, 0),
this->blob_bottom_->data_at(1, c / (6 * 5), (c / 5) % 6, c % 5));
}
}

Expand Down

0 comments on commit a5658cd

Please sign in to comment.