Skip to content

Commit

Permalink
Return Python float instead of numpy.float64 in sklearn metrics (hugg…
Browse files Browse the repository at this point in the history
  • Loading branch information
lewtun authored Jul 9, 2021
1 parent 3cbc28f commit 060dc85
Show file tree
Hide file tree
Showing 8 changed files with 18 additions and 16 deletions.
4 changes: 3 additions & 1 deletion metrics/accuracy/accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,5 +83,7 @@ def _info(self):

def _compute(self, predictions, references, normalize=True, sample_weight=None):
return {
"accuracy": accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight),
"accuracy": accuracy_score(
references, predictions, normalize=normalize, sample_weight=sample_weight
).tolist(),
}
2 changes: 1 addition & 1 deletion metrics/f1/f1.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,5 +106,5 @@ def _compute(self, predictions, references, labels=None, pos_label=1, average="b
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
),
).tolist(),
}
8 changes: 4 additions & 4 deletions metrics/glue/glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,21 @@


def simple_accuracy(preds, labels):
return (preds == labels).mean()
return (preds == labels).mean().tolist()


def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
f1 = f1_score(y_true=labels, y_pred=preds).tolist()
return {
"accuracy": acc,
"f1": f1,
}


def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
pearson_corr = pearsonr(preds, labels)[0].tolist()
spearman_corr = spearmanr(preds, labels)[0].tolist()
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
Expand Down
6 changes: 3 additions & 3 deletions metrics/indic_glue/indic_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,12 @@


def simple_accuracy(preds, labels):
return (preds == labels).mean()
return (preds == labels).mean().tolist()


def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
f1 = f1_score(y_true=labels, y_pred=preds).tolist()
return {
"accuracy": acc,
"f1": f1,
Expand All @@ -99,7 +99,7 @@ def precision_at_10(en_sentvecs, in_sentvecs):
actual = np.array(range(n))
preds = sim.argsort(axis=1)[:, :10]
matches = np.any(preds == actual[:, None], axis=1)
return matches.mean()
return matches.mean().tolist()


@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
Expand Down
2 changes: 1 addition & 1 deletion metrics/matthews_correlation/matthews_correlation.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,5 +82,5 @@ def _info(self):

def _compute(self, predictions, references, sample_weight=None):
return {
"matthews_correlation": matthews_corrcoef(references, predictions, sample_weight=sample_weight),
"matthews_correlation": matthews_corrcoef(references, predictions, sample_weight=sample_weight).tolist(),
}
2 changes: 1 addition & 1 deletion metrics/precision/precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,5 +108,5 @@ def _compute(self, predictions, references, labels=None, pos_label=1, average="b
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
),
).tolist(),
}
2 changes: 1 addition & 1 deletion metrics/recall/recall.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,5 +108,5 @@ def _compute(self, predictions, references, labels=None, pos_label=1, average="b
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
),
).tolist(),
}
8 changes: 4 additions & 4 deletions metrics/super_glue/super_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,12 +107,12 @@


def simple_accuracy(preds, labels):
return (preds == labels).mean()
return (preds == labels).mean().tolist()


def acc_and_f1(preds, labels, f1_avg="binary"):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds, average=f1_avg)
f1 = f1_score(y_true=labels, y_pred=preds, average=f1_avg).tolist()
return {
"accuracy": acc,
"f1": f1,
Expand All @@ -138,9 +138,9 @@ def evaluate_multirc(ids_preds, labels):
f1s.append(f1)
em = int(sum([p == l for p, l in preds_labels]) == len(preds_labels))
ems.append(em)
f1_m = sum(f1s) / len(f1s)
f1_m = (sum(f1s) / len(f1s)).tolist()
em = sum(ems) / len(ems)
f1_a = f1_score(y_true=labels, y_pred=[id_pred["prediction"] for id_pred in ids_preds])
f1_a = f1_score(y_true=labels, y_pred=[id_pred["prediction"] for id_pred in ids_preds]).tolist()
return {"exact_match": em, "f1_m": f1_m, "f1_a": f1_a}


Expand Down

0 comments on commit 060dc85

Please sign in to comment.