Commit bb63dd70 authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Cleanup and hyperparameter fix incorporation.

parent e9e7c00e
Loading
Loading
Loading
Loading
+8 −3
Original line number Diff line number Diff line
@@ -682,10 +682,15 @@ class Dataset(object):
      save_to_disk(ys, os.path.join(self.data_dir, row['y_sums']))
      save_to_disk(yss, os.path.join(self.data_dir, row['y_sum_squares']))

  # TODO(rbharath, joegomes): Have to add more comments on why this function is
  # needed.
  def get_grad_statistics(self):
    """Computes and returns statistics of this dataset"""
    """Computes and returns statistics of this dataset

    This function assumes that the first task of a dataset holds the energy for
    an input system, and that the remaining tasks holds the gradient for the system.

    TODO(rbharath, joegomes): It is unclear whether this should be a Dataset
    function. Might get refactored out.
    """
    if len(self) == 0:
      return None, None, None, None
    df = self.metadata_df
+9 −2
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
Contains basic hyperparameter optimizations.
"""
import numpy as np
import os
import itertools
import tempfile
import shutil
@@ -61,7 +62,14 @@ class HyperparamOpt(object):
          self.verbosity, "high")

      if logdir is not None:
        model_dir = logdir
        model_dir = os.path.join(logdir, str(ind))
        log("model_dir is %s" % model_dir, self.verbosity, "high")
        try: 
          os.makedirs(model_dir)
        except OSError:
          if not os.path.isdir(model_dir):
            log("Error creating model_dir, using tempfile directory", self.verbosity, "high")
            model_dir = tempfile.mkdtemp()
      else:
        model_dir = tempfile.mkdtemp()
      #TODO(JG) Fit transformers for TF models
@@ -99,7 +107,6 @@ class HyperparamOpt(object):
          self.verbosity, "low")
      log("\tbest_validation_score so far: %f" % best_validation_score,
          self.verbosity, "low")

    if best_model is None:
      log("No models trained correctly.", self.verbosity, "low")
      # arbitrarily return last model
+3 −3
Original line number Diff line number Diff line
@@ -103,7 +103,7 @@ class Metric(object):
  """Wrapper class for computing user-defined metrics."""

  def __init__(self, metric, task_averager=None, name=None, threshold=None,
               verbosity=None, mode=None, atomicnet=False):
               verbosity=None, mode=None, compute_force_metrics=False):
    """
    Args:
      metric: function that takes args y_true, y_pred (in that order) and
@@ -136,7 +136,7 @@ class Metric(object):
        raise ValueError("Must specify mode for new metric.")
    assert mode in ["classification", "regression"]
    self.mode = mode
    self.atomicnet = atomicnet
    self.compute_force_metrics = compute_force_metrics 

  def compute_metric(self, y_true, y_pred, w=None, n_classes=2, filter_nans=True):
    """Compute a performance metric for each task.
@@ -182,7 +182,7 @@ class Metric(object):
      if filter_nans:
        computed_metrics = np.array(computed_metrics)
        computed_metrics = computed_metrics[~np.isnan(computed_metrics)]
      if self.atomicnet:
      if self.compute_force_metrics:
        force_error = self.task_averager(computed_metrics[1:])*4961.47596096
        print("Force error (metric: np.mean(%s)): %f kJ/mol/A" % (self.name, force_error))
        return computed_metrics[0]
+18 −17
Original line number Diff line number Diff line
@@ -4,6 +4,9 @@ Contains an abstract base class that supports different ML models.
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar and Joseph Gomes"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import sys
import numpy as np
import pandas as pd
@@ -141,42 +144,28 @@ class Model(object):
    n_tasks = len(self.tasks)
    for (X_batch, y_batch, w_batch, ids_batch) in dataset.iterbatches(
        batch_size, deterministic=True):
      ############################################################## DEBUG
      #y_pred_batch = np.reshape(self.predict_on_batch(X_batch), y_batch.shape)
      n_samples = len(X_batch)
      y_pred_batch = np.reshape(self.predict_on_batch(X_batch), (n_samples, n_tasks))
      #print("predict()")
      #print("X_batch.shape, y_batch.shape")
      #print(X_batch.shape, y_batch.shape)
      #print("y_pred_batch.shape")
      #print(y_pred_batch.shape)
      ############################################################## DEBUG
      y_pred_batch = undo_transforms(y_pred_batch, transformers)
      y_preds.append(y_pred_batch)
    ############################################################## DEBUG
    print("predict()")
    print("[y_pred.shape for y_pred in y_preds]")
    print([y_pred.shape for y_pred in y_preds])
    ############################################################## DEBUG
    y_pred = np.vstack(y_preds)
  
    # The iterbatches does padding with zero-weight examples on the last batch.
    # Remove padded examples.
    n_samples, n_tasks = len(dataset), len(self.tasks)
    y_pred = np.reshape(y_pred, (n_samples, n_tasks))
    ############################################################## DEBUG
    # Special case to handle singletasks.
    if n_tasks == 1:
      y_pred = np.squeeze(y_pred)
    print("n_tasks, y_pred.shape")
    print(n_tasks, y_pred.shape)
    ############################################################## DEBUG
    return y_pred

  def predict_grad(self, dataset, transformers=[]):
    """
    Uses self to calculate gradient on provided Dataset object.

    TODO(rbharath): Should we assume each model has meaningful gradients to
    predict? Should this be a subclass for PhysicalModel or the like?

    Returns:
      y_pred: numpy ndarray of shape (n_samples,)
    """
@@ -194,6 +183,10 @@ class Model(object):
  def evaluate_error(self, dataset, transformers=[]):
    """
    Evaluate the error in energy and gradient components, forcebalance-style.

    TODO(rbharath): This looks like it should be a subclass method for a
    PhysicalMethod class. forcebalance style errors aren't meaningful for most
    chem-informatic datasets.
    """
    y_preds = []
    y_train = []
@@ -239,6 +232,10 @@ class Model(object):
  def evaluate_error_class2(self, dataset, transformers=[]):
    """
    Evaluate the error in energy and gradient components, forcebalance-style.

    TODO(rbharath): Should be a subclass PhysicalModel method. Also, need to
    find a better name for this method (class2 doesn't tell us anything about the
    semantics of this method.
    """
    y_preds = []
    y_train = []
@@ -291,6 +288,10 @@ class Model(object):
    Uses self to calculate finite difference gradient on provided Dataset object.
    Currently only useful if your task is energy and self contains predict_grad_on_batch.

    TODO(rbharath): This shouldn't be a method of the Model class. Perhaps a
    method of PhysicalModel subclass. Leaving it in for time-being while refactoring
    continues.

    Returns:
      y_pred: numpy ndarray of shape (n_samples,)
    """
+6 −5
Original line number Diff line number Diff line
@@ -169,11 +169,15 @@ class NormalizationTransformer(Transformer):
        transformed_grad.append(grad_E)   

      transformed_grad = np.asarray(transformed_grad)
      #print("TRAINING gradient")
      #print(self.grad)
      return transformed_grad

class AtomicNormalizationTransformer(Transformer):
  """
  TODO(rbharath): Needs more discussion of what a gradient is semantically.
  It's evident that not every Dataset has meaningful gradient information, so
  this transformer can't be applied to all data. Should there be a subclass of
  Dataset named GradientDataset perhaps?
  """

  def __init__(self, transform_X=False, transform_y=False, transform_w=False,
               dataset=None):
@@ -260,12 +264,9 @@ class AtomicNormalizationTransformer(Transformer):
        transformed_grad.append(grad_E)   

      transformed_grad = np.asarray(transformed_grad)
      #print("TRAINING gradient")
      #print(self.grad)
      return transformed_grad



class ClippingTransformer(Transformer):

  def __init__(self, transform_X=False, transform_y=False,