Skip to content

Commit

Permalink
Minor changes: delete TODO.txt; ran/fixed lint; better formatting, etc.
Browse files Browse the repository at this point in the history
  • Loading branch information
Chuck Cho committed Mar 29, 2016
1 parent 8482a39 commit 7071aef
Show file tree
Hide file tree
Showing 19 changed files with 133 additions and 203 deletions.
18 changes: 0 additions & 18 deletions TODO(chuck).txt

This file was deleted.

11 changes: 6 additions & 5 deletions include/caffe/blob.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,14 @@ class Blob {
inline int channels() const { return LegacyShape(1); }
/// @brief Deprecated legacy shape accessor length: use shape(2) instead.
inline int length() const { return (num_axes() == 5) ? LegacyShape(2) : 1; }
//inline int length() const { return LegacyShape(2); }
/// @brief Deprecated legacy shape accessor height: use shape(3) instead.
inline int height() const { return (num_axes() == 5) ? LegacyShape(3) : LegacyShape(2); }
//inline int height() const { return LegacyShape(3); }
inline int height() const {
return (num_axes() == 5) ? LegacyShape(3) : LegacyShape(2);
}
/// @brief Deprecated legacy shape accessor width: use shape(4) instead.
inline int width() const { return (num_axes() == 5) ? LegacyShape(4) : LegacyShape(3); }
//inline int width() const { return LegacyShape(4); }
inline int width() const {
return (num_axes() == 5) ? LegacyShape(4) : LegacyShape(3);
}
inline int LegacyShape(int index) const {
CHECK_LE(num_axes(), 5)
<< "Cannot use legacy accessors on Blobs with > 5 axes.";
Expand Down
3 changes: 1 addition & 2 deletions include/caffe/layers/video_data_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
// an extension the std::pair which used to store image filename and
// its label (int). now, a frame number associated with the video filename
// is needed (second param) to fully represent a video segment
struct triplet
{
struct triplet {
std::string first;
int second, third;
};
Expand Down
4 changes: 2 additions & 2 deletions include/caffe/util/im2col.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ template <typename Dtype>
void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col, const bool forced_3d=false);
const int* dilation, Dtype* data_col, const bool forced_3d = false);

template <typename Dtype>
void im2col_cpu(const Dtype* data_im, const int channels,
Expand All @@ -20,7 +20,7 @@ template <typename Dtype>
void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im, const bool forced_3d=false);
const int* dilation, Dtype* data_im, const bool forced_3d = false);

template <typename Dtype>
void col2im_cpu(const Dtype* data_col, const int channels,
Expand Down
1 change: 1 addition & 0 deletions include/caffe/util/io.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <iomanip>
#include <iostream> // NOLINT(readability/streams)
#include <string>
#include <vector>

#include "google/protobuf/message.h"

Expand Down
6 changes: 4 additions & 2 deletions scripts/cpp_lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -1595,10 +1595,10 @@ def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
"""Except the base classes, Caffe DataLayer should define DataLayerSetUp
instead of LayerSetUp.
The base DataLayers define common SetUp steps, the subclasses should
not override them.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
Expand All @@ -1610,6 +1610,7 @@ def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
if ix >= 0 and (
line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void VideoDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
Expand All @@ -1622,6 +1623,7 @@ def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
line.find('void Base') == -1 and
line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void VideoDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
Expand Down
69 changes: 40 additions & 29 deletions src/caffe/blob.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,8 @@ void Blob<Dtype>::Reshape(const int num, const int channels, const int height,

template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
//std::cout << "-----------------------------"<<std::endl;
//for (int i=0; i<shape.size(); ++i)
// std::cout<<"shape["<<i<<"]="<<shape[i]<<std::endl;
vector<int> new_shape = shape;
const bool force_3d_ = (shape.size() == 4) && false;
//if (force_3d_)
// std::cout<<"!!!!! force_3d_="<<force_3d_<<" !!!!!"<<std::endl;
//else
// std::cout<<"force_3d_="<<force_3d_<<std::endl;
if (force_3d_)
new_shape.insert(new_shape.begin() + 2, 1);
CHECK_LE(new_shape.size(), kMaxBlobAxes);
Expand All @@ -64,9 +57,6 @@ void Blob<Dtype>::Reshape(const vector<int>& shape) {
data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
}
//std::cout << "----------"<<std::endl;
//for (int i=0; i<shape_.size(); ++i)
// std::cout<<"shape_["<<i<<"]="<<shape_[i]<<std::endl;
}

template <typename Dtype>
Expand Down Expand Up @@ -441,8 +431,7 @@ bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
LegacyShape(-3) == other.length() &&
LegacyShape(-2) == other.height() &&
LegacyShape(-1) == other.width();
}
else if ((other.has_num() || other.has_channels() || other.has_height() ||
} else if ((other.has_num() || other.has_channels() || other.has_height() ||
other.has_width()) && !other.has_length()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
Expand Down Expand Up @@ -498,10 +487,11 @@ void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {

template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
bool is_legacy_C3D_proto = false;
if (reshape) {
vector<int> shape;
if ((proto.has_num() || proto.has_channels() ||
proto.has_height() || proto.has_width()) && proto.has_length()) {
if (proto.has_num() && proto.has_channels() && proto.has_height() &&
proto.has_width() && proto.has_length()) {
// Using deprecated 5D Blob dimensions --
// shape is (num, channels, length, height, width).
shape.resize(5);
Expand All @@ -510,18 +500,31 @@ void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
shape[2] = proto.length();
shape[3] = proto.height();
shape[4] = proto.width();
}
// backward compatibility
else if ((proto.has_num() || proto.has_channels() ||
proto.has_height() || proto.has_width()) && !proto.has_length()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
shape.resize(5);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = 1;
shape[3] = proto.height();
shape[4] = proto.width();
} else if (proto.has_num() && proto.has_channels() && proto.has_height() &&
proto.has_width() && !proto.has_length()) {
// a hacky way to support old proto generated by facebook/C3D
const int forgotten_dim_size = proto.diff_size() / (proto.num() *
proto.channels() * proto.height() * proto.width());
is_legacy_C3D_proto = (forgotten_dim_size > 1);
LOG(INFO) << "legacy C3D proto detected";
if (is_legacy_C3D_proto) {
shape.resize(5);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = proto.height();
shape[3] = proto.width();
shape[4] = forgotten_dim_size;
} else {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
shape.resize(5);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = 1;
shape[3] = proto.height();
shape[4] = proto.width();
}
} else {
shape.resize(proto.shape().dim_size());
for (int i = 0; i < proto.shape().dim_size(); ++i) {
Expand All @@ -540,9 +543,17 @@ void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
data_vec[i] = proto.double_data(i);
}
} else {
CHECK_EQ(count_, proto.data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.data(i);
// normal case
if (!is_legacy_C3D_proto) {
CHECK_EQ(count_, proto.data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.data(i);
}
// binary proto file created by legacy C3D code
} else {
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.diff(i);
}
}
}
if (proto.double_diff_size() > 0) {
Expand All @@ -551,7 +562,7 @@ void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
for (int i = 0; i < count_; ++i) {
diff_vec[i] = proto.double_diff(i);
}
} else if (proto.diff_size() > 0) {
} else if (proto.diff_size() > 0 && !is_legacy_C3D_proto) {
CHECK_EQ(count_, proto.diff_size());
Dtype* diff_vec = mutable_cpu_diff();
for (int i = 0; i < count_; ++i) {
Expand Down
36 changes: 17 additions & 19 deletions src/caffe/layers/base_conv_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,11 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());
const int num_axes = bottom[0]->num_axes();
int first_spatial_axis;
if (num_axes == 5 && channel_axis_ == 1 &&
bottom[0]->shape(2) == 1) {
if (num_axes == 5 && channel_axis_ == 1 && bottom[0]->shape(2) == 1) {
forced_3d_ = true;
first_spatial_axis = 3; // not 2
num_spatial_axes_ = 2; // not 3
}
else {
first_spatial_axis = 3; // not 2
num_spatial_axes_ = 2; // not 3
} else {
forced_3d_ = false;
first_spatial_axis = channel_axis_ + 1;
num_spatial_axes_ = num_axes - first_spatial_axis;
Expand All @@ -47,22 +45,21 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
CHECK(num_kernel_dims == 1 ||
num_kernel_dims == num_spatial_axes_ ||
num_kernel_dims == num_spatial_axes_ + 1)
<< "kernel_size must be specified once, or once per spatial dimension "
<< "(kernel_size specified " << num_kernel_dims << " times; "
<< num_spatial_axes_ << " spatial dims).";
<< "kernel_size must be specified once, or once per spatial "
<< "dimension (kernel_size specified " << num_kernel_dims
<< " times; " << num_spatial_axes_ << " spatial dims).";
} else {
CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)
<< "kernel_size must be specified once, or once per spatial dimension "
<< "(kernel_size specified " << num_kernel_dims << " times; "
<< num_spatial_axes_ << " spatial dims).";
<< "kernel_size must be specified once, or once per spatial "
<< "dimension (kernel_size specified " << num_kernel_dims
<< " times; " << num_spatial_axes_ << " spatial dims).";
}
if (num_kernel_dims == 1) {
for (int i = 0; i < num_spatial_axes_; ++i) {
kernel_shape_data[i] =
conv_param.kernel_size(0);
}
}
else if (num_kernel_dims == num_spatial_axes_) {
} else if (num_kernel_dims == num_spatial_axes_) {
for (int i = 0; i < num_spatial_axes_; ++i) {
kernel_shape_data[i] =
conv_param.kernel_size(i);
Expand All @@ -71,7 +68,7 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
if (num_kernel_dims == num_spatial_axes_ + 1) {
for (int i = 0; i < num_spatial_axes_; ++i) {
kernel_shape_data[i] =
conv_param.kernel_size(i + 1); // ignore the first kernel_size
conv_param.kernel_size(i + 1); // ignore the first kernel_size
}
}
}
Expand Down Expand Up @@ -113,7 +110,7 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
else if (num_stride_dims == num_spatial_axes_ )
stride_data[i] = conv_param.stride(i);
else if (num_stride_dims == num_spatial_axes_ + 1)
stride_data[i] = conv_param.stride(i + 1); // ignore the first one
stride_data[i] = conv_param.stride(i + 1); // ignore the first one
CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";
}
}
Expand Down Expand Up @@ -152,7 +149,7 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
else if (num_pad_dims == num_spatial_axes_ )
pad_data[i] = conv_param.pad(i);
else if (num_pad_dims == num_spatial_axes_ + 1)
pad_data[i] = conv_param.pad(i + 1); // ignore the first one
pad_data[i] = conv_param.pad(i + 1); // ignore the first one
}
}
// Setup dilation dimensions (dilation_).
Expand Down Expand Up @@ -182,7 +179,7 @@ void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
else if (num_dilation_dims == num_spatial_axes_ )
dilation_data[i] = conv_param.dilation(i);
else if (num_dilation_dims == num_spatial_axes_ + 1)
dilation_data[i] = conv_param.dilation(i + 1); // ignore the first one
dilation_data[i] = conv_param.dilation(i + 1); // ignore the first one
}
// Special case: im2col is the identity for 1x1 convolution with stride 1
// and no padding, so flag for skipping the buffer and transformation.
Expand Down Expand Up @@ -305,7 +302,8 @@ void BaseConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
if (reverse_dimensions()) {
conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i + forced_3d_);
} else {
conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i + forced_3d_);
conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i +
forced_3d_);
}
}
// The im2col result buffer will only hold one image at a time to avoid
Expand Down
4 changes: 0 additions & 4 deletions src/caffe/layers/cudnn_conv_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,6 @@ void CuDNNConvolutionLayer<Dtype>::Reshape(
bottom_offset_ = this->bottom_dim_ / this->group_;
top_offset_ = this->top_dim_ / this->group_;
const bool forced_3d = this->forced_3d_;
//std::cout << "cudnn_conv_layer.cpp: num_spatial_axes="<<this->num_spatial_axes_<<std::endl;
//std::cout << "cudnn_conv_layer.cpp: forced_3d="<<forced_3d<<std::endl;
const int height = bottom[0]->shape(this->channel_axis_ + 1 + forced_3d);
const int width = bottom[0]->shape(this->channel_axis_ + 2 + forced_3d);
const int height_out = top[0]->shape(this->channel_axis_ + 1 + forced_3d);
Expand All @@ -110,8 +108,6 @@ void CuDNNConvolutionLayer<Dtype>::Reshape(
const int* stride_data = this->stride_.cpu_data();
const int stride_h = stride_data[0];
const int stride_w = stride_data[1];
//std::cout << "cudnn_conv_layer.cpp: h="<<height<<", w="<<width<<", h_o="<<height_out<<
// ", w_o="<<width_out<<", pad_h="<<pad_h <<std::endl;

// Specify workspace limit for kernels directly until we have a
// planning strategy and a rewrite of Caffe's GPU memory mangagement
Expand Down
7 changes: 0 additions & 7 deletions src/caffe/layers/im2col_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,8 @@ void Im2colLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
forced_3d_ = true;
else
forced_3d_ = false;
//std::cout << "forced_3d_=" << forced_3d_ << std::endl;
const int first_spatial_dim = channel_axis_ + 1 + forced_3d_;
num_spatial_axes_ = input_num_dims - first_spatial_dim;
//std::cout << "num_spatial_axes_=" << num_spatial_axes_ << std::endl;
CHECK_GE(num_spatial_axes_, 1);
vector<int> dim_blob_shape(1, num_spatial_axes_);
// Setup filter kernel dimensions (kernel_shape_).
Expand Down Expand Up @@ -128,11 +126,6 @@ void Im2colLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
/ stride_data[i] + 1;
top_shape[channel_axis_ + i + 1 + forced_3d_] = output_dim;
}
//{
//std::cout << "----------------------" << std::endl;
//for (int i=0; i<top_shape.size(); ++i)
// std::cout << "top_shape["<<i<<"]="<<top_shape[i]<<std::endl;
//}
top[0]->Reshape(top_shape);
num_ = bottom[0]->count(0, channel_axis_);
bottom_dim_ = bottom[0]->count(channel_axis_);
Expand Down
1 change: 0 additions & 1 deletion src/caffe/layers/pooling_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
channels_ = bottom[0]->channels();
height_ = bottom[0]->height();
width_ = bottom[0]->width();
//std::cout<<"c="<<channels_<<",h="<<height_<<",w="<<width_<<std::endl;
if (global_pooling_) {
kernel_h_ = bottom[0]->height();
kernel_w_ = bottom[0]->width();
Expand Down
Loading

0 comments on commit 7071aef

Please sign in to comment.