Skip to content

Commit

Permalink
Fixup AccuracyLayer like SoftmaxLossLayer in BVLC#1970 -- fixes BVLC#…
Browse files Browse the repository at this point in the history
  • Loading branch information
jeffdonahue authored and cbfinn committed Aug 12, 2015
1 parent b63b9fe commit d37d928
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 40 deletions.
6 changes: 1 addition & 5 deletions include/caffe/loss_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,7 @@ class AccuracyLayer : public Layer<Dtype> {
}
}

/// Whether to ignore instances with a certain label
bool has_ignore_label_;
/// The label indicating that an instance should be ignored.
int ignore_label_;

int label_axis_, outer_num_, inner_num_;
int top_k_;
Dtype denominator_;
};
Expand Down
61 changes: 31 additions & 30 deletions src/caffe/layers/accuracy_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,15 @@ void AccuracyLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
CHECK_LE(top_k_, bottom[0]->count() / bottom[1]->count())
<< "top_k must be less than or equal to the number of classes.";
CHECK_GE(bottom[0]->num_axes(), bottom[1]->num_axes());
for (int i = 0; i < bottom[1]->num_axes(); ++i) {
CHECK_LE(bottom[0]->shape(i), bottom[1]->shape(i))
<< "Dimension mismatch between predictions and label.";
}
label_axis_ =
bottom[0]->CanonicalAxisIndex(this->layer_param_.accuracy_param().axis());
outer_num_ = bottom[0]->count(0, label_axis_);
inner_num_ = bottom[0]->count(label_axis_ + 1);
CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
<< "Number of labels must match number of predictions; "
<< "e.g., if label axis == 1 and prediction shape is (N, C, H, W), "
<< "label count (number of labels) must be N*H*W, "
<< "with integer values in {0, 1, ..., C-1}.";
vector<int> top_shape(0); // Accuracy is a scalar; 0 axes.
top[0]->Reshape(top_shape);
}
Expand All @@ -45,38 +49,35 @@ void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
Dtype accuracy = 0;
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* bottom_label = bottom[1]->cpu_data();
int num = bottom[1]->count();
int dim = bottom[0]->count() / num;
const int dim = bottom[0]->count() / outer_num_;
const int num_labels = bottom[0]->shape(label_axis_);
vector<Dtype> maxval(top_k_+1);
vector<int> max_id(top_k_+1);
int count = 0;
for (int i = 0; i < num; ++i) {
// Top-k accuracy
std::vector<std::pair<Dtype, int> > bottom_data_vector;
for (int j = 0; j < dim; ++j) {
bottom_data_vector.push_back(
std::make_pair(bottom_data[i * dim + j], j));
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
// check if true label is in top k predictions
const int label_value = static_cast<int>(bottom_label[i]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
++count;
for (int k = 0; k < top_k_; k++) {
if (bottom_data_vector[k].second == label_value) {
++accuracy;
break;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
// Top-k accuracy
std::vector<std::pair<Dtype, int> > bottom_data_vector;
for (int k = 0; k < num_labels; ++k) {
bottom_data_vector.push_back(std::make_pair(
bottom_data[i * dim + k * inner_num_ + j], k));
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
// check if true label is in top k predictions
const int label_value =
static_cast<int>(bottom_label[i * inner_num_ + j]);
for (int k = 0; k < top_k_; k++) {
if (bottom_data_vector[k].second == label_value) {
++accuracy;
break;
}
}
}
}

// LOG(INFO) << "Accuracy: " << accuracy;
const Dtype denominator = (denominator_ == 0) ? count : denominator_;
top[0]->mutable_cpu_data()[0] = accuracy / denominator;
top[0]->mutable_cpu_data()[0] = accuracy / outer_num_ / inner_num_;
// Accuracy layer should not be used as a loss function.
}

Expand Down
11 changes: 6 additions & 5 deletions src/caffe/proto/caffe.proto
Original file line number Diff line number Diff line change
Expand Up @@ -389,11 +389,12 @@ message AccuracyParameter {
// class (i.e. argmax).
optional uint32 top_k = 1 [default = 1];

// Controls the denominator in the computed accuracy = #correct / denominator.
// Must be a positive number, or the default of 0, for the total input weight.
// If no input weights are used, the denominator is the batch size, as the
// weights each default to 1.
optional float denominator = 2 [default = 0];
// The "label" axis of the prediction blob, whose argmax corresponds to the
// predicted label -- may be negative to index from the end (e.g., -1 for the
// last axis). For example, if axis == 1 and the predictions are
// (N x C x H x W), the label blob is expected to contain N*H*W ground truth
// labels with integer values in {0, 1, ..., C-1}.
optional int32 axis = 2 [default = 1];
}

// Message that stores parameters used by ArgMaxLayer
Expand Down

0 comments on commit d37d928

Please sign in to comment.