Commit 35d9e3f1 authored by miaecle's avatar miaecle
Browse files

auPR metric

parent 1d6a1459
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -11,6 +11,8 @@ from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from scipy.stats import pearsonr


@@ -70,6 +72,17 @@ def pearson_r2_score(y, y_pred):
  return pearsonr(y, y_pred)[0]**2


def auPR_score(y, y_pred):
  """Compute area under precision-recall curve"""
  assert y_pred.shape == y.shape
  n_classes = y_pred.shape[1]
  scores = []
  for i in range(n_classes):
    precision, recall, _ = precision_recall_curve(y[:, i], y_pred[:, i])
    scores.append(auc(recall, precision))
  return np.mean(scores)


def rms_score(y_true, y_pred):
  """Computes RMS error."""
  return np.sqrt(mean_squared_error(y_true, y_pred))
@@ -148,7 +161,7 @@ class Metric(object):
      if self.metric.__name__ in [
          "roc_auc_score", "matthews_corrcoef", "recall_score",
          "accuracy_score", "kappa_score", "precision_score",
          "balanced_accuracy_score"
          "balanced_accuracy_score", "auPR_score"
      ]:
        mode = "classification"
      elif self.metric.__name__ in [
@@ -267,7 +280,7 @@ class Metric(object):
      # TODO(rbharath): This has been a major source of bugs. Is there a more
      # robust characterization of which metrics require class-probs and which
      # don't?
      if "roc_auc_score" in self.name:
      if "roc_auc_score" in self.name or "auPR_score" in self.name:
        y_true = to_one_hot(y_true).astype(int)
        y_pred = np.reshape(y_pred, (n_samples, n_classes))
      else:
+0 −4
Original line number Diff line number Diff line
@@ -733,7 +733,6 @@ class TestOverfit(test_util.TensorFlowTestCase):

    # Fit trained model
    model.fit(dataset, nb_epoch=20)
    model.save()

    # Eval model on train
    scores = model.evaluate(dataset, [regression_metric])
@@ -818,7 +817,6 @@ class TestOverfit(test_util.TensorFlowTestCase):

    # Fit trained model
    model.fit(dataset, nb_epoch=50)
    model.save()
    # Eval model on train
    scores = model.evaluate(dataset, [regression_metric])

@@ -912,7 +910,6 @@ class TestOverfit(test_util.TensorFlowTestCase):

    # Fit trained model
    model.fit(dataset, nb_epoch=20)
    model.save()

    # Eval model on train
    scores = model.evaluate(dataset, [classification_metric])
@@ -1009,7 +1006,6 @@ class TestOverfit(test_util.TensorFlowTestCase):

    # Fit trained model
    model.fit(dataset, nb_epoch=40)
    model.save()

    # Eval model on train
    scores = model.evaluate(dataset, [regression_metric])