Commit 372d514e authored by Bharath Ramsundar's avatar Bharath Ramsundar
Browse files

Cleanup

parent 30a5d79f
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -111,12 +111,10 @@ class Model(object):
    # TODO(rbharath/enf): We need a structured way to deal with potential GPU
    #                     memory overflows.
    batch_size = self.model_params["batch_size"]
    ####################################################### DEBUG
    if "pad_batches" in self.model_params:
      pad_batches = self.model_params["pad_batches"]
    else:
      pad_batches = False
    ####################################################### DEBUG
    for epoch in range(self.model_params["nb_epoch"]):
      log("Starting epoch %s" % str(epoch+1), self.verbosity)
      losses = []
+0 −8
Original line number Diff line number Diff line
@@ -142,12 +142,6 @@ class TensorflowGraph(object):
          task_str = str(task).zfill(len(str(self.num_tasks)))
          with self._shared_name_scope('cost_{}'.format(task_str)):
            with tf.name_scope('weighted'):
              ####################################################### DEBUG
              #print("len(self.output)")
              #print(len(self.output))
              #print("self.output")
              #print(self.output)
              ####################################################### DEBUG
              weighted_cost = self.cost(self.output[task], self.labels[task],
                                        self.weights[task])
              weighted_costs.append(weighted_cost)
@@ -218,12 +212,10 @@ class TensorflowGraph(object):
          if shuffle:
            log("About to shuffle dataset before epoch start.", self.verbosity)
            dataset.shuffle()
          ####################################################################### DEBUG
          for ind, (X_b, y_b, w_b, ids_b) in enumerate(
              dataset.iterbatches(batch_size, pad_batches=pad_batches)):
            if ind % log_every_N_batches == 0:
              log("On batch %d" % ind, self.verbosity)
          ####################################################################### DEBUG
            # Run training op.
            feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
            fetches = self.output + [
+0 −8
Original line number Diff line number Diff line
@@ -67,9 +67,7 @@ from deepchem.models.tensorflow_models import TensorflowClassifier
from deepchem.models.tensorflow_models import TensorflowRegressor
from deepchem.models.tensorflow_models import model_ops
from deepchem.metrics import to_one_hot
################################################### DEBUG
from deepchem.datasets import pad_features
################################################### DEBUG

def softmax(x):
  """Simple numpy softmax implementation
@@ -336,13 +334,11 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
      num_tasks = self.num_tasks
      outputs = []
      with self._get_shared_session().as_default():
        ################################################### DEBUG
        n_samples = len(X)
        # Some tensorflow models can't handle variadic batches,
        # especially models using tf.pack, tf.split. Pad batch-size
        # to handle these cases.
        X = pad_features(self.model_params["batch_size"], X)
        ################################################### DEBUG
        feed_dict = self.construct_feed_dict(X)
        data = self._get_shared_session().run(
            self.output, feed_dict=feed_dict)
@@ -352,22 +348,18 @@ class TensorflowMultiTaskRegressor(TensorflowRegressor):
          batch_outputs = batch_outputs.transpose((1, 0, 2))
        elif batch_outputs.ndim == 2:
          batch_outputs = batch_outputs.transpose((1, 0))
        ########################################################### DEBUG
        # Handle edge case when batch-size is 1.
        elif batch_outputs.ndim == 1:
          #print("X.shape, batch_outputs.shape")
          #print(X.shape, batch_outputs.shape)
          n_samples = len(X)
          batch_outputs = batch_outputs.reshape((n_samples, num_tasks))
        ########################################################### DEBUG
        else:
          raise ValueError(
              'Unrecognized rank combination for output: %s' %
              (batch_outputs.shape))
        ##################################################### DEBUG
        # Prune away any padding that was added
        batch_outputs = batch_outputs[:n_samples]
        ##################################################### DEBUG
        outputs.append(batch_outputs)

        outputs = np.squeeze(np.concatenate(outputs))